file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
process_node.go
package workflow import ( "context" "encoding/json" "fmt" "strings" "time" "github.com/fsamin/go-dump" "github.com/go-gorp/gorp" "github.com/ovh/cds/engine/api/cache" "github.com/ovh/cds/engine/api/observability" "github.com/ovh/cds/engine/api/repositoriesmanager" "github.com/ovh/cds/sdk" "github.com/ovh/cds/sdk/log" ) func processNodeTriggers(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, parentNodeRun []*sdk.WorkflowNodeRun, node *sdk.Node, parentSubNumber int) (*ProcessorReport, error) { report := new(ProcessorReport) for j := range node.Triggers { t := &node.Triggers[j] var abortTrigger bool if previousRunArray, ok := wr.WorkflowNodeRuns[t.ChildNode.ID]; ok { for _, previousRun := range previousRunArray { if int(previousRun.SubNumber) == parentSubNumber { abortTrigger = true break } } } if !abortTrigger { //Keep the subnumber of the previous node in the graph r1, _, errPwnr := processNodeRun(ctx, db, store, proj, wr, mapNodes, &t.ChildNode, int(parentSubNumber), parentNodeRun, nil, nil) if errPwnr != nil { log.Error("processWorkflowRun> Unable to process node ID=%d: %s", t.ChildNode.ID, errPwnr) AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errPwnr.Error()}, }) } _, _ = report.Merge(r1, nil) continue } } return report, nil } func processNodeRun(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, n *sdk.Node, subNumber int, parentNodeRuns []*sdk.WorkflowNodeRun, hookEvent *sdk.WorkflowNodeRunHookEvent, manual *sdk.WorkflowNodeRunManual) (*ProcessorReport, bool, error) { report := new(ProcessorReport) exist, errN := nodeRunExist(db, wr.ID, n.ID, wr.Number, subNumber) if errN != nil { return nil, false, sdk.WrapError(errN, "processNodeRun> unable to check if node run exist") } if exist { return nil, false, nil } var end func() ctx, end = observability.Span(ctx, "workflow.processNodeRun", observability.Tag(observability.TagWorkflow, wr.Workflow.Name), observability.Tag(observability.TagWorkflowRun, wr.Number), observability.Tag(observability.TagWorkflowNode, n.Name), ) defer end() // Keep old model behaviour on fork and join // Send manual event to join and fork children when it was a manual run and when fork and join don't have run condition if manual == nil && len(parentNodeRuns) == 1 && parentNodeRuns[0].Manual != nil { n := wr.Workflow.WorkflowData.NodeByID(parentNodeRuns[0].WorkflowNodeID) // If fork or JOIN and No run conditions if (n.Type == sdk.NodeTypeJoin || n.Type == sdk.NodeTypeFork) && (n.Context == nil || (n.Context.Conditions.LuaScript == "" && len(n.Context.Conditions.PlainConditions) == 0)) { manual = parentNodeRuns[0].Manual } } switch n.Type { case sdk.NodeTypeFork, sdk.NodeTypePipeline, sdk.NodeTypeJoin: r1, conditionOK, errT := processNode(ctx, db, store, proj, wr, mapNodes, n, subNumber, parentNodeRuns, hookEvent, manual) if errT != nil { return nil, false, sdk.WrapError(errT, "Unable to processNode") } report.Merge(r1, nil) // nolint return report, conditionOK, nil case sdk.NodeTypeOutGoingHook: r1, conditionOK, errO := processNodeOutGoingHook(ctx, db, store, proj, wr, mapNodes, parentNodeRuns, n, subNumber) if errO != nil { return nil, false, sdk.WrapError(errO, "Unable to processNodeOutGoingHook") } report.Merge(r1, nil) // nolint return report, conditionOK, nil } return nil, false, nil } func processNode(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, n *sdk.Node, subNumber int, parents []*sdk.WorkflowNodeRun, hookEvent *sdk.WorkflowNodeRunHookEvent, manual *sdk.WorkflowNodeRunManual) (*ProcessorReport, bool, error) { report := new(ProcessorReport) //TODO: Check user for manual done but check permission also for automatic trigger and hooks (with system to authenticate a webhook) if n.Context == nil { n.Context = &sdk.NodeContext{} } if n.Context.PipelineID == 0 && n.Type == sdk.NodeTypePipeline { return nil, false, sdk.ErrPipelineNotFound } var runPayload map[string]string var errPayload error runPayload, errPayload = n.Context.DefaultPayloadToMap() if errPayload != nil { return nil, false, sdk.WrapError(errPayload, "Default payload is malformatted") } isDefaultPayload := true // For node with pipeline var stages []sdk.Stage var pip sdk.Pipeline if n.Context.PipelineID > 0 { var has bool pip, has = wr.Workflow.Pipelines[n.Context.PipelineID] if !has { return nil, false, fmt.Errorf("pipeline %d not found in workflow", n.Context.PipelineID) } stages = make([]sdk.Stage, len(pip.Stages)) copy(stages, pip.Stages) //If the pipeline has parameter but none are defined on context, use the defaults if len(pip.Parameter) > 0 && len(n.Context.DefaultPipelineParameters) == 0 { n.Context.DefaultPipelineParameters = pip.Parameter } } // Create run run := &sdk.WorkflowNodeRun{ WorkflowID: wr.WorkflowID, LastModified: time.Now(), Start: time.Now(), Number: wr.Number, SubNumber: int64(subNumber), WorkflowRunID: wr.ID, WorkflowNodeID: n.ID, WorkflowNodeName: n.Name, Status: string(sdk.StatusWaiting), Stages: stages, Header: wr.Header, } if run.SubNumber >= wr.LastSubNumber { wr.LastSubNumber = run.SubNumber } if n.Context.ApplicationID != 0 { run.ApplicationID = n.Context.ApplicationID } parentsIDs := make([]int64, len(parents)) for i := range parents { parentsIDs[i] = parents[i].ID } parentStatus := sdk.StatusSuccess.String() run.SourceNodeRuns = parentsIDs if parents != nil { for _, p := range parents { for _, v := range wr.WorkflowNodeRuns { for _, run := range v { if p.ID == run.ID { if run.Status == sdk.StatusFail.String() || run.Status == sdk.StatusStopped.String() { parentStatus = run.Status } } } } } //Merge the payloads from all the sources _, next := observability.Span(ctx, "workflow.processNode.mergePayload") for _, r := range parents { e := dump.NewDefaultEncoder() e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()} e.ExtraFields.DetailedMap = false e.ExtraFields.DetailedStruct = false e.ExtraFields.Len = false e.ExtraFields.Type = false m1, errm1 := e.ToStringMap(r.Payload) if errm1 != nil { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errm1.Error()}, }) log.Error("processNode> Unable to compute hook payload: %v", errm1) } if isDefaultPayload { // Check if we try to merge for the first time so try to merge the default payload with the first parent run found // if it is the default payload then we have to take the previous git values runPayload = sdk.ParametersMapMerge(runPayload, m1) isDefaultPayload = false } else { runPayload = sdk.ParametersMapMerge(runPayload, m1, sdk.MapMergeOptions.ExcludeGitParams) } } run.Payload = runPayload run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters) // Take first value in pipeline parameter list if no default value is set for i := range run.PipelineParameters { if run.PipelineParameters[i].Type == sdk.ListParameter && strings.Contains(run.PipelineParameters[i].Value, ";") { run.PipelineParameters[i].Value = strings.Split(run.PipelineParameters[i].Value, ";")[0] } } next() } run.HookEvent = hookEvent if hookEvent != nil { runPayload = sdk.ParametersMapMerge(runPayload, hookEvent.Payload) run.Payload = runPayload run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters) } run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "cds.node", Type: sdk.StringParameter, Value: run.WorkflowNodeName, }) run.Manual = manual if manual != nil { payloadStr, err := json.Marshal(manual.Payload) if err != nil { log.Error("processNode> Unable to marshal payload: %v", err) } run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "payload", Type: sdk.TextParameter, Value: string(payloadStr), }) e := dump.NewDefaultEncoder() e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()} e.ExtraFields.DetailedMap = false e.ExtraFields.DetailedStruct = false e.ExtraFields.Len = false e.ExtraFields.Type = false m1, errm1 := e.ToStringMap(manual.Payload) if errm1 != nil { return report, false, sdk.WrapError(errm1, "processNode> Unable to compute payload") } runPayload = sdk.ParametersMapMerge(runPayload, m1) run.Payload = runPayload run.PipelineParameters = sdk.ParametersMerge(n.Context.DefaultPipelineParameters, manual.PipelineParameters) run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "cds.triggered_by.email", Type: sdk.StringParameter, Value: manual.User.Email, }, sdk.Parameter{ Name: "cds.triggered_by.fullname", Type: sdk.StringParameter, Value: manual.User.Fullname, }, sdk.Parameter{ Name: "cds.triggered_by.username", Type: sdk.StringParameter, Value: manual.User.Username, }, sdk.Parameter{ Name: "cds.manual", Type: sdk.StringParameter, Value: "true", }) } else { run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "cds.manual", Type: sdk.StringParameter, Value: "false", }) } cdsStatusParam := sdk.Parameter{ Name: "cds.status", Type: sdk.StringParameter, Value: parentStatus, } run.BuildParameters = sdk.ParametersFromMap( sdk.ParametersMapMerge( sdk.ParametersToMap(run.BuildParameters), sdk.ParametersToMap([]sdk.Parameter{cdsStatusParam}), sdk.MapMergeOptions.ExcludeGitParams, ), ) // Process parameters for the jobs runContext := nodeRunContext{} if n.Context.PipelineID != 0 { runContext.Pipeline = wr.Workflow.Pipelines[n.Context.PipelineID] } if n.Context.ApplicationID != 0 { runContext.Application = wr.Workflow.Applications[n.Context.ApplicationID] } if n.Context.EnvironmentID != 0 { runContext.Environment = wr.Workflow.Environments[n.Context.EnvironmentID] } if n.Context.ProjectIntegrationID != 0 { runContext.ProjectIntegration = wr.Workflow.ProjectIntegrations[n.Context.ProjectIntegrationID] } jobParams, errParam := getNodeRunBuildParameters(ctx, proj, wr, run, runContext) if errParam != nil { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errParam.Error()}, }) // if there an error -> display it in workflowRunInfo and not stop the launch log.Error("processNode> getNodeRunBuildParameters failed. Project:%s [#%d.%d]%s.%d with payload %v err:%v", proj.Name, wr.Number, subNumber, wr.Workflow.Name, n.ID, run.Payload, errParam) } run.BuildParameters = append(run.BuildParameters, jobParams...) // Inherit parameter from parent job if len(parentsIDs) > 0 { _, next := observability.Span(ctx, "workflow.getParentParameters") parentsParams, errPP := getParentParameters(wr, parents, runPayload) next() if errPP != nil { return nil, false, sdk.WrapError(errPP, "processNode> getParentParameters failed") } mapBuildParams := sdk.ParametersToMap(run.BuildParameters) mapParentParams := sdk.ParametersToMap(parentsParams) run.BuildParameters = sdk.ParametersFromMap(sdk.ParametersMapMerge(mapBuildParams, mapParentParams, sdk.MapMergeOptions.ExcludeGitParams)) } //Parse job params to get the VCS infos currentGitValues := map[string]string{} for _, param := range jobParams { switch param.Name { case tagGitHash, tagGitBranch, tagGitTag, tagGitAuthor, tagGitMessage, tagGitRepository, tagGitURL, tagGitHTTPURL: currentGitValues[param.Name] = param.Value } } //Parse job params to get the VCS infos previousGitValues := map[string]string{} for _, param := range run.BuildParameters { switch param.Name { case tagGitHash, tagGitBranch, tagGitTag, tagGitAuthor, tagGitMessage, tagGitRepository, tagGitURL, tagGitHTTPURL: previousGitValues[param.Name] = param.Value } } isRoot := n.ID == wr.Workflow.WorkflowData.Node.ID gitValues := currentGitValues if previousGitValues[tagGitURL] == currentGitValues[tagGitURL] || previousGitValues[tagGitHTTPURL] == currentGitValues[tagGitHTTPURL] { gitValues = previousGitValues } var vcsInfos vcsInfos var app sdk.Application if n.Context.ApplicationID != 0 { app = wr.Workflow.Applications[n.Context.ApplicationID] } var errVcs error vcsServer := repositoriesmanager.GetProjectVCSServer(proj, app.VCSServer) vcsInfos, errVcs = getVCSInfos(ctx, db, store, vcsServer, gitValues, app.Name, app.VCSServer, app.RepositoryFullname, !isRoot, previousGitValues[tagGitRepository]) if errVcs != nil { if strings.Contains(errVcs.Error(), "branch has been deleted") { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowRunBranchDeleted.ID, Args: []interface{}{vcsInfos.Branch}, }) } else { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errVcs.Error()}, }) } if isRoot { return nil, false, sdk.WrapError(errVcs, "processNode> Cannot get VCSInfos") } return nil, true, nil } // only if it's the root pipeline, we put the git... in the build parameters // this allow user to write some run conditions with .git.var on the root pipeline if isRoot { setValuesGitInBuildParameters(run, vcsInfos) } // Check Run Conditions if hookEvent != nil { hooks := wr.Workflow.WorkflowData.GetHooks() hook, ok := hooks[hookEvent.WorkflowNodeHookUUID] if !ok { return nil, false, sdk.WrapError(sdk.ErrNoHook, "Unable to find hook %s", hookEvent.WorkflowNodeHookUUID) } // Check conditions var params = run.BuildParameters // Define specific destination parameters dest := mapNodes[hook.NodeID] if dest == nil { return nil, false, sdk.WrapError(sdk.ErrWorkflowNodeNotFound, "Unable to find node %d", hook.NodeID) } if !checkNodeRunCondition(wr, dest.Context.Conditions, params) { log.Debug("Avoid trigger workflow from hook %s", hook.UUID) return nil, false, nil } } else { if !checkNodeRunCondition(wr, n.Context.Conditions, run.BuildParameters) { log.Debug("Condition failed %d/%d %+v", wr.ID, n.ID, run.BuildParameters) return nil, false, nil } } if !isRoot { setValuesGitInBuildParameters(run, vcsInfos) } // Tag VCS infos : add in tag only if it does not exist if !wr.TagExists(tagGitRepository) { wr.Tag(tagGitRepository, run.VCSRepository) if run.VCSBranch != "" && run.VCSTag == "" { wr.Tag(tagGitBranch, run.VCSBranch) } if run.VCSTag != "" { wr.Tag(tagGitTag, run.VCSTag) } if len(run.VCSHash) >= 7 { wr.Tag(tagGitHash, run.VCSHash[:7]) } else { wr.Tag(tagGitHash, run.VCSHash) } wr.Tag(tagGitAuthor, vcsInfos.Author) } // Add env tag if n.Context.EnvironmentID != 0 { wr.Tag(tagEnvironment, wr.Workflow.Environments[n.Context.EnvironmentID].Name) } for _, info := range wr.Infos { if info.IsError && info.SubNumber == wr.LastSubNumber { run.Status = string(sdk.StatusFail) run.Done = time.Now() break } } if err := insertWorkflowNodeRun(db, run); err != nil { return nil, false, sdk.WrapError(err, "unable to insert run (node id : %d, node name : %s, subnumber : %d)", run.WorkflowNodeID, run.WorkflowNodeName, run.SubNumber) } wr.LastExecution = time.Now() buildParameters := sdk.ParametersToMap(run.BuildParameters) _, okUI := buildParameters["cds.ui.pipeline.run"] _, okID := buildParameters["cds.node.id"] if !okUI || !okID { if !okUI { uiRunURL := fmt.Sprintf("%s/project/%s/workflow/%s/run/%s/node/%d?name=%s", baseUIURL, buildParameters["cds.project"], buildParameters["cds.workflow"], buildParameters["cds.run.number"], run.ID, buildParameters["cds.workflow"]) sdk.AddParameter(&run.BuildParameters, "cds.ui.pipeline.run", sdk.StringParameter, uiRunURL) } if !okID { sdk.AddParameter(&run.BuildParameters, "cds.node.id", sdk.StringParameter, fmt.Sprintf("%d", run.ID)) } if err := UpdateNodeRunBuildParameters(db, run.ID, run.BuildParameters); err != nil
} report.Add(*run) //Update workflow run if wr.WorkflowNodeRuns == nil { wr.WorkflowNodeRuns = make(map[int64][]sdk.WorkflowNodeRun) } wr.WorkflowNodeRuns[run.WorkflowNodeID] = append(wr.WorkflowNodeRuns[run.WorkflowNodeID], *run) wr.LastSubNumber = MaxSubNumber(wr.WorkflowNodeRuns) if err := UpdateWorkflowRun(ctx, db, wr); err != nil { return nil, false, sdk.WrapError(err, "unable to update workflow run") } //Check the context.mutex to know if we are allowed to run it if n.Context.Mutex { //Check if there are builing workflownoderun with the same workflow_node_name for the same workflow mutexQuery := `select count(1) from workflow_node_run join workflow_run on workflow_run.id = workflow_node_run.workflow_run_id join workflow on workflow.id = workflow_run.workflow_id where workflow.id = $1 and workflow_node_run.id <> $2 and workflow_node_run.workflow_node_name = $3 and workflow_node_run.status = $4` nbMutex, err := db.SelectInt(mutexQuery, n.WorkflowID, run.ID, n.Name, string(sdk.StatusBuilding)) if err != nil { return nil, false, sdk.WrapError(err, "unable to check mutexes") } if nbMutex > 0 { log.Debug("Noderun %s processed but not executed because of mutex", n.Name) AddWorkflowRunInfo(wr, false, sdk.SpawnMsg{ ID: sdk.MsgWorkflowNodeMutex.ID, Args: []interface{}{n.Name}, }) if err := UpdateWorkflowRun(ctx, db, wr); err != nil { return nil, false, sdk.WrapError(err, "unable to update workflow run") } //Mutex is locked. exit without error return report, false, nil } //Mutex is free, continue } //Execute the node run ! r1, err := execute(ctx, db, store, proj, run, runContext) if err != nil { return nil, false, sdk.WrapError(err, "unable to execute workflow run") } _, _ = report.Merge(r1, nil) return report, true, nil }
{ return nil, false, sdk.WrapError(err, "unable to update workflow node run build parameters") }
conditional_block
process_node.go
package workflow import ( "context" "encoding/json" "fmt" "strings" "time" "github.com/fsamin/go-dump" "github.com/go-gorp/gorp" "github.com/ovh/cds/engine/api/cache" "github.com/ovh/cds/engine/api/observability" "github.com/ovh/cds/engine/api/repositoriesmanager" "github.com/ovh/cds/sdk" "github.com/ovh/cds/sdk/log" ) func processNodeTriggers(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, parentNodeRun []*sdk.WorkflowNodeRun, node *sdk.Node, parentSubNumber int) (*ProcessorReport, error) { report := new(ProcessorReport) for j := range node.Triggers { t := &node.Triggers[j] var abortTrigger bool if previousRunArray, ok := wr.WorkflowNodeRuns[t.ChildNode.ID]; ok { for _, previousRun := range previousRunArray { if int(previousRun.SubNumber) == parentSubNumber { abortTrigger = true break } } } if !abortTrigger { //Keep the subnumber of the previous node in the graph r1, _, errPwnr := processNodeRun(ctx, db, store, proj, wr, mapNodes, &t.ChildNode, int(parentSubNumber), parentNodeRun, nil, nil) if errPwnr != nil { log.Error("processWorkflowRun> Unable to process node ID=%d: %s", t.ChildNode.ID, errPwnr) AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errPwnr.Error()}, }) } _, _ = report.Merge(r1, nil) continue } } return report, nil } func processNodeRun(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, n *sdk.Node, subNumber int, parentNodeRuns []*sdk.WorkflowNodeRun, hookEvent *sdk.WorkflowNodeRunHookEvent, manual *sdk.WorkflowNodeRunManual) (*ProcessorReport, bool, error)
func processNode(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, n *sdk.Node, subNumber int, parents []*sdk.WorkflowNodeRun, hookEvent *sdk.WorkflowNodeRunHookEvent, manual *sdk.WorkflowNodeRunManual) (*ProcessorReport, bool, error) { report := new(ProcessorReport) //TODO: Check user for manual done but check permission also for automatic trigger and hooks (with system to authenticate a webhook) if n.Context == nil { n.Context = &sdk.NodeContext{} } if n.Context.PipelineID == 0 && n.Type == sdk.NodeTypePipeline { return nil, false, sdk.ErrPipelineNotFound } var runPayload map[string]string var errPayload error runPayload, errPayload = n.Context.DefaultPayloadToMap() if errPayload != nil { return nil, false, sdk.WrapError(errPayload, "Default payload is malformatted") } isDefaultPayload := true // For node with pipeline var stages []sdk.Stage var pip sdk.Pipeline if n.Context.PipelineID > 0 { var has bool pip, has = wr.Workflow.Pipelines[n.Context.PipelineID] if !has { return nil, false, fmt.Errorf("pipeline %d not found in workflow", n.Context.PipelineID) } stages = make([]sdk.Stage, len(pip.Stages)) copy(stages, pip.Stages) //If the pipeline has parameter but none are defined on context, use the defaults if len(pip.Parameter) > 0 && len(n.Context.DefaultPipelineParameters) == 0 { n.Context.DefaultPipelineParameters = pip.Parameter } } // Create run run := &sdk.WorkflowNodeRun{ WorkflowID: wr.WorkflowID, LastModified: time.Now(), Start: time.Now(), Number: wr.Number, SubNumber: int64(subNumber), WorkflowRunID: wr.ID, WorkflowNodeID: n.ID, WorkflowNodeName: n.Name, Status: string(sdk.StatusWaiting), Stages: stages, Header: wr.Header, } if run.SubNumber >= wr.LastSubNumber { wr.LastSubNumber = run.SubNumber } if n.Context.ApplicationID != 0 { run.ApplicationID = n.Context.ApplicationID } parentsIDs := make([]int64, len(parents)) for i := range parents { parentsIDs[i] = parents[i].ID } parentStatus := sdk.StatusSuccess.String() run.SourceNodeRuns = parentsIDs if parents != nil { for _, p := range parents { for _, v := range wr.WorkflowNodeRuns { for _, run := range v { if p.ID == run.ID { if run.Status == sdk.StatusFail.String() || run.Status == sdk.StatusStopped.String() { parentStatus = run.Status } } } } } //Merge the payloads from all the sources _, next := observability.Span(ctx, "workflow.processNode.mergePayload") for _, r := range parents { e := dump.NewDefaultEncoder() e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()} e.ExtraFields.DetailedMap = false e.ExtraFields.DetailedStruct = false e.ExtraFields.Len = false e.ExtraFields.Type = false m1, errm1 := e.ToStringMap(r.Payload) if errm1 != nil { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errm1.Error()}, }) log.Error("processNode> Unable to compute hook payload: %v", errm1) } if isDefaultPayload { // Check if we try to merge for the first time so try to merge the default payload with the first parent run found // if it is the default payload then we have to take the previous git values runPayload = sdk.ParametersMapMerge(runPayload, m1) isDefaultPayload = false } else { runPayload = sdk.ParametersMapMerge(runPayload, m1, sdk.MapMergeOptions.ExcludeGitParams) } } run.Payload = runPayload run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters) // Take first value in pipeline parameter list if no default value is set for i := range run.PipelineParameters { if run.PipelineParameters[i].Type == sdk.ListParameter && strings.Contains(run.PipelineParameters[i].Value, ";") { run.PipelineParameters[i].Value = strings.Split(run.PipelineParameters[i].Value, ";")[0] } } next() } run.HookEvent = hookEvent if hookEvent != nil { runPayload = sdk.ParametersMapMerge(runPayload, hookEvent.Payload) run.Payload = runPayload run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters) } run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "cds.node", Type: sdk.StringParameter, Value: run.WorkflowNodeName, }) run.Manual = manual if manual != nil { payloadStr, err := json.Marshal(manual.Payload) if err != nil { log.Error("processNode> Unable to marshal payload: %v", err) } run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "payload", Type: sdk.TextParameter, Value: string(payloadStr), }) e := dump.NewDefaultEncoder() e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()} e.ExtraFields.DetailedMap = false e.ExtraFields.DetailedStruct = false e.ExtraFields.Len = false e.ExtraFields.Type = false m1, errm1 := e.ToStringMap(manual.Payload) if errm1 != nil { return report, false, sdk.WrapError(errm1, "processNode> Unable to compute payload") } runPayload = sdk.ParametersMapMerge(runPayload, m1) run.Payload = runPayload run.PipelineParameters = sdk.ParametersMerge(n.Context.DefaultPipelineParameters, manual.PipelineParameters) run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "cds.triggered_by.email", Type: sdk.StringParameter, Value: manual.User.Email, }, sdk.Parameter{ Name: "cds.triggered_by.fullname", Type: sdk.StringParameter, Value: manual.User.Fullname, }, sdk.Parameter{ Name: "cds.triggered_by.username", Type: sdk.StringParameter, Value: manual.User.Username, }, sdk.Parameter{ Name: "cds.manual", Type: sdk.StringParameter, Value: "true", }) } else { run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "cds.manual", Type: sdk.StringParameter, Value: "false", }) } cdsStatusParam := sdk.Parameter{ Name: "cds.status", Type: sdk.StringParameter, Value: parentStatus, } run.BuildParameters = sdk.ParametersFromMap( sdk.ParametersMapMerge( sdk.ParametersToMap(run.BuildParameters), sdk.ParametersToMap([]sdk.Parameter{cdsStatusParam}), sdk.MapMergeOptions.ExcludeGitParams, ), ) // Process parameters for the jobs runContext := nodeRunContext{} if n.Context.PipelineID != 0 { runContext.Pipeline = wr.Workflow.Pipelines[n.Context.PipelineID] } if n.Context.ApplicationID != 0 { runContext.Application = wr.Workflow.Applications[n.Context.ApplicationID] } if n.Context.EnvironmentID != 0 { runContext.Environment = wr.Workflow.Environments[n.Context.EnvironmentID] } if n.Context.ProjectIntegrationID != 0 { runContext.ProjectIntegration = wr.Workflow.ProjectIntegrations[n.Context.ProjectIntegrationID] } jobParams, errParam := getNodeRunBuildParameters(ctx, proj, wr, run, runContext) if errParam != nil { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errParam.Error()}, }) // if there an error -> display it in workflowRunInfo and not stop the launch log.Error("processNode> getNodeRunBuildParameters failed. Project:%s [#%d.%d]%s.%d with payload %v err:%v", proj.Name, wr.Number, subNumber, wr.Workflow.Name, n.ID, run.Payload, errParam) } run.BuildParameters = append(run.BuildParameters, jobParams...) // Inherit parameter from parent job if len(parentsIDs) > 0 { _, next := observability.Span(ctx, "workflow.getParentParameters") parentsParams, errPP := getParentParameters(wr, parents, runPayload) next() if errPP != nil { return nil, false, sdk.WrapError(errPP, "processNode> getParentParameters failed") } mapBuildParams := sdk.ParametersToMap(run.BuildParameters) mapParentParams := sdk.ParametersToMap(parentsParams) run.BuildParameters = sdk.ParametersFromMap(sdk.ParametersMapMerge(mapBuildParams, mapParentParams, sdk.MapMergeOptions.ExcludeGitParams)) } //Parse job params to get the VCS infos currentGitValues := map[string]string{} for _, param := range jobParams { switch param.Name { case tagGitHash, tagGitBranch, tagGitTag, tagGitAuthor, tagGitMessage, tagGitRepository, tagGitURL, tagGitHTTPURL: currentGitValues[param.Name] = param.Value } } //Parse job params to get the VCS infos previousGitValues := map[string]string{} for _, param := range run.BuildParameters { switch param.Name { case tagGitHash, tagGitBranch, tagGitTag, tagGitAuthor, tagGitMessage, tagGitRepository, tagGitURL, tagGitHTTPURL: previousGitValues[param.Name] = param.Value } } isRoot := n.ID == wr.Workflow.WorkflowData.Node.ID gitValues := currentGitValues if previousGitValues[tagGitURL] == currentGitValues[tagGitURL] || previousGitValues[tagGitHTTPURL] == currentGitValues[tagGitHTTPURL] { gitValues = previousGitValues } var vcsInfos vcsInfos var app sdk.Application if n.Context.ApplicationID != 0 { app = wr.Workflow.Applications[n.Context.ApplicationID] } var errVcs error vcsServer := repositoriesmanager.GetProjectVCSServer(proj, app.VCSServer) vcsInfos, errVcs = getVCSInfos(ctx, db, store, vcsServer, gitValues, app.Name, app.VCSServer, app.RepositoryFullname, !isRoot, previousGitValues[tagGitRepository]) if errVcs != nil { if strings.Contains(errVcs.Error(), "branch has been deleted") { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowRunBranchDeleted.ID, Args: []interface{}{vcsInfos.Branch}, }) } else { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errVcs.Error()}, }) } if isRoot { return nil, false, sdk.WrapError(errVcs, "processNode> Cannot get VCSInfos") } return nil, true, nil } // only if it's the root pipeline, we put the git... in the build parameters // this allow user to write some run conditions with .git.var on the root pipeline if isRoot { setValuesGitInBuildParameters(run, vcsInfos) } // Check Run Conditions if hookEvent != nil { hooks := wr.Workflow.WorkflowData.GetHooks() hook, ok := hooks[hookEvent.WorkflowNodeHookUUID] if !ok { return nil, false, sdk.WrapError(sdk.ErrNoHook, "Unable to find hook %s", hookEvent.WorkflowNodeHookUUID) } // Check conditions var params = run.BuildParameters // Define specific destination parameters dest := mapNodes[hook.NodeID] if dest == nil { return nil, false, sdk.WrapError(sdk.ErrWorkflowNodeNotFound, "Unable to find node %d", hook.NodeID) } if !checkNodeRunCondition(wr, dest.Context.Conditions, params) { log.Debug("Avoid trigger workflow from hook %s", hook.UUID) return nil, false, nil } } else { if !checkNodeRunCondition(wr, n.Context.Conditions, run.BuildParameters) { log.Debug("Condition failed %d/%d %+v", wr.ID, n.ID, run.BuildParameters) return nil, false, nil } } if !isRoot { setValuesGitInBuildParameters(run, vcsInfos) } // Tag VCS infos : add in tag only if it does not exist if !wr.TagExists(tagGitRepository) { wr.Tag(tagGitRepository, run.VCSRepository) if run.VCSBranch != "" && run.VCSTag == "" { wr.Tag(tagGitBranch, run.VCSBranch) } if run.VCSTag != "" { wr.Tag(tagGitTag, run.VCSTag) } if len(run.VCSHash) >= 7 { wr.Tag(tagGitHash, run.VCSHash[:7]) } else { wr.Tag(tagGitHash, run.VCSHash) } wr.Tag(tagGitAuthor, vcsInfos.Author) } // Add env tag if n.Context.EnvironmentID != 0 { wr.Tag(tagEnvironment, wr.Workflow.Environments[n.Context.EnvironmentID].Name) } for _, info := range wr.Infos { if info.IsError && info.SubNumber == wr.LastSubNumber { run.Status = string(sdk.StatusFail) run.Done = time.Now() break } } if err := insertWorkflowNodeRun(db, run); err != nil { return nil, false, sdk.WrapError(err, "unable to insert run (node id : %d, node name : %s, subnumber : %d)", run.WorkflowNodeID, run.WorkflowNodeName, run.SubNumber) } wr.LastExecution = time.Now() buildParameters := sdk.ParametersToMap(run.BuildParameters) _, okUI := buildParameters["cds.ui.pipeline.run"] _, okID := buildParameters["cds.node.id"] if !okUI || !okID { if !okUI { uiRunURL := fmt.Sprintf("%s/project/%s/workflow/%s/run/%s/node/%d?name=%s", baseUIURL, buildParameters["cds.project"], buildParameters["cds.workflow"], buildParameters["cds.run.number"], run.ID, buildParameters["cds.workflow"]) sdk.AddParameter(&run.BuildParameters, "cds.ui.pipeline.run", sdk.StringParameter, uiRunURL) } if !okID { sdk.AddParameter(&run.BuildParameters, "cds.node.id", sdk.StringParameter, fmt.Sprintf("%d", run.ID)) } if err := UpdateNodeRunBuildParameters(db, run.ID, run.BuildParameters); err != nil { return nil, false, sdk.WrapError(err, "unable to update workflow node run build parameters") } } report.Add(*run) //Update workflow run if wr.WorkflowNodeRuns == nil { wr.WorkflowNodeRuns = make(map[int64][]sdk.WorkflowNodeRun) } wr.WorkflowNodeRuns[run.WorkflowNodeID] = append(wr.WorkflowNodeRuns[run.WorkflowNodeID], *run) wr.LastSubNumber = MaxSubNumber(wr.WorkflowNodeRuns) if err := UpdateWorkflowRun(ctx, db, wr); err != nil { return nil, false, sdk.WrapError(err, "unable to update workflow run") } //Check the context.mutex to know if we are allowed to run it if n.Context.Mutex { //Check if there are builing workflownoderun with the same workflow_node_name for the same workflow mutexQuery := `select count(1) from workflow_node_run join workflow_run on workflow_run.id = workflow_node_run.workflow_run_id join workflow on workflow.id = workflow_run.workflow_id where workflow.id = $1 and workflow_node_run.id <> $2 and workflow_node_run.workflow_node_name = $3 and workflow_node_run.status = $4` nbMutex, err := db.SelectInt(mutexQuery, n.WorkflowID, run.ID, n.Name, string(sdk.StatusBuilding)) if err != nil { return nil, false, sdk.WrapError(err, "unable to check mutexes") } if nbMutex > 0 { log.Debug("Noderun %s processed but not executed because of mutex", n.Name) AddWorkflowRunInfo(wr, false, sdk.SpawnMsg{ ID: sdk.MsgWorkflowNodeMutex.ID, Args: []interface{}{n.Name}, }) if err := UpdateWorkflowRun(ctx, db, wr); err != nil { return nil, false, sdk.WrapError(err, "unable to update workflow run") } //Mutex is locked. exit without error return report, false, nil } //Mutex is free, continue } //Execute the node run ! r1, err := execute(ctx, db, store, proj, run, runContext) if err != nil { return nil, false, sdk.WrapError(err, "unable to execute workflow run") } _, _ = report.Merge(r1, nil) return report, true, nil }
{ report := new(ProcessorReport) exist, errN := nodeRunExist(db, wr.ID, n.ID, wr.Number, subNumber) if errN != nil { return nil, false, sdk.WrapError(errN, "processNodeRun> unable to check if node run exist") } if exist { return nil, false, nil } var end func() ctx, end = observability.Span(ctx, "workflow.processNodeRun", observability.Tag(observability.TagWorkflow, wr.Workflow.Name), observability.Tag(observability.TagWorkflowRun, wr.Number), observability.Tag(observability.TagWorkflowNode, n.Name), ) defer end() // Keep old model behaviour on fork and join // Send manual event to join and fork children when it was a manual run and when fork and join don't have run condition if manual == nil && len(parentNodeRuns) == 1 && parentNodeRuns[0].Manual != nil { n := wr.Workflow.WorkflowData.NodeByID(parentNodeRuns[0].WorkflowNodeID) // If fork or JOIN and No run conditions if (n.Type == sdk.NodeTypeJoin || n.Type == sdk.NodeTypeFork) && (n.Context == nil || (n.Context.Conditions.LuaScript == "" && len(n.Context.Conditions.PlainConditions) == 0)) { manual = parentNodeRuns[0].Manual } } switch n.Type { case sdk.NodeTypeFork, sdk.NodeTypePipeline, sdk.NodeTypeJoin: r1, conditionOK, errT := processNode(ctx, db, store, proj, wr, mapNodes, n, subNumber, parentNodeRuns, hookEvent, manual) if errT != nil { return nil, false, sdk.WrapError(errT, "Unable to processNode") } report.Merge(r1, nil) // nolint return report, conditionOK, nil case sdk.NodeTypeOutGoingHook: r1, conditionOK, errO := processNodeOutGoingHook(ctx, db, store, proj, wr, mapNodes, parentNodeRuns, n, subNumber) if errO != nil { return nil, false, sdk.WrapError(errO, "Unable to processNodeOutGoingHook") } report.Merge(r1, nil) // nolint return report, conditionOK, nil } return nil, false, nil }
identifier_body
process_node.go
package workflow import ( "context" "encoding/json" "fmt" "strings" "time" "github.com/fsamin/go-dump" "github.com/go-gorp/gorp" "github.com/ovh/cds/engine/api/cache" "github.com/ovh/cds/engine/api/observability" "github.com/ovh/cds/engine/api/repositoriesmanager" "github.com/ovh/cds/sdk" "github.com/ovh/cds/sdk/log" ) func processNodeTriggers(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, parentNodeRun []*sdk.WorkflowNodeRun, node *sdk.Node, parentSubNumber int) (*ProcessorReport, error) { report := new(ProcessorReport) for j := range node.Triggers { t := &node.Triggers[j] var abortTrigger bool if previousRunArray, ok := wr.WorkflowNodeRuns[t.ChildNode.ID]; ok { for _, previousRun := range previousRunArray { if int(previousRun.SubNumber) == parentSubNumber { abortTrigger = true break } } } if !abortTrigger { //Keep the subnumber of the previous node in the graph r1, _, errPwnr := processNodeRun(ctx, db, store, proj, wr, mapNodes, &t.ChildNode, int(parentSubNumber), parentNodeRun, nil, nil) if errPwnr != nil { log.Error("processWorkflowRun> Unable to process node ID=%d: %s", t.ChildNode.ID, errPwnr) AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errPwnr.Error()}, }) } _, _ = report.Merge(r1, nil) continue } } return report, nil } func
(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, n *sdk.Node, subNumber int, parentNodeRuns []*sdk.WorkflowNodeRun, hookEvent *sdk.WorkflowNodeRunHookEvent, manual *sdk.WorkflowNodeRunManual) (*ProcessorReport, bool, error) { report := new(ProcessorReport) exist, errN := nodeRunExist(db, wr.ID, n.ID, wr.Number, subNumber) if errN != nil { return nil, false, sdk.WrapError(errN, "processNodeRun> unable to check if node run exist") } if exist { return nil, false, nil } var end func() ctx, end = observability.Span(ctx, "workflow.processNodeRun", observability.Tag(observability.TagWorkflow, wr.Workflow.Name), observability.Tag(observability.TagWorkflowRun, wr.Number), observability.Tag(observability.TagWorkflowNode, n.Name), ) defer end() // Keep old model behaviour on fork and join // Send manual event to join and fork children when it was a manual run and when fork and join don't have run condition if manual == nil && len(parentNodeRuns) == 1 && parentNodeRuns[0].Manual != nil { n := wr.Workflow.WorkflowData.NodeByID(parentNodeRuns[0].WorkflowNodeID) // If fork or JOIN and No run conditions if (n.Type == sdk.NodeTypeJoin || n.Type == sdk.NodeTypeFork) && (n.Context == nil || (n.Context.Conditions.LuaScript == "" && len(n.Context.Conditions.PlainConditions) == 0)) { manual = parentNodeRuns[0].Manual } } switch n.Type { case sdk.NodeTypeFork, sdk.NodeTypePipeline, sdk.NodeTypeJoin: r1, conditionOK, errT := processNode(ctx, db, store, proj, wr, mapNodes, n, subNumber, parentNodeRuns, hookEvent, manual) if errT != nil { return nil, false, sdk.WrapError(errT, "Unable to processNode") } report.Merge(r1, nil) // nolint return report, conditionOK, nil case sdk.NodeTypeOutGoingHook: r1, conditionOK, errO := processNodeOutGoingHook(ctx, db, store, proj, wr, mapNodes, parentNodeRuns, n, subNumber) if errO != nil { return nil, false, sdk.WrapError(errO, "Unable to processNodeOutGoingHook") } report.Merge(r1, nil) // nolint return report, conditionOK, nil } return nil, false, nil } func processNode(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, n *sdk.Node, subNumber int, parents []*sdk.WorkflowNodeRun, hookEvent *sdk.WorkflowNodeRunHookEvent, manual *sdk.WorkflowNodeRunManual) (*ProcessorReport, bool, error) { report := new(ProcessorReport) //TODO: Check user for manual done but check permission also for automatic trigger and hooks (with system to authenticate a webhook) if n.Context == nil { n.Context = &sdk.NodeContext{} } if n.Context.PipelineID == 0 && n.Type == sdk.NodeTypePipeline { return nil, false, sdk.ErrPipelineNotFound } var runPayload map[string]string var errPayload error runPayload, errPayload = n.Context.DefaultPayloadToMap() if errPayload != nil { return nil, false, sdk.WrapError(errPayload, "Default payload is malformatted") } isDefaultPayload := true // For node with pipeline var stages []sdk.Stage var pip sdk.Pipeline if n.Context.PipelineID > 0 { var has bool pip, has = wr.Workflow.Pipelines[n.Context.PipelineID] if !has { return nil, false, fmt.Errorf("pipeline %d not found in workflow", n.Context.PipelineID) } stages = make([]sdk.Stage, len(pip.Stages)) copy(stages, pip.Stages) //If the pipeline has parameter but none are defined on context, use the defaults if len(pip.Parameter) > 0 && len(n.Context.DefaultPipelineParameters) == 0 { n.Context.DefaultPipelineParameters = pip.Parameter } } // Create run run := &sdk.WorkflowNodeRun{ WorkflowID: wr.WorkflowID, LastModified: time.Now(), Start: time.Now(), Number: wr.Number, SubNumber: int64(subNumber), WorkflowRunID: wr.ID, WorkflowNodeID: n.ID, WorkflowNodeName: n.Name, Status: string(sdk.StatusWaiting), Stages: stages, Header: wr.Header, } if run.SubNumber >= wr.LastSubNumber { wr.LastSubNumber = run.SubNumber } if n.Context.ApplicationID != 0 { run.ApplicationID = n.Context.ApplicationID } parentsIDs := make([]int64, len(parents)) for i := range parents { parentsIDs[i] = parents[i].ID } parentStatus := sdk.StatusSuccess.String() run.SourceNodeRuns = parentsIDs if parents != nil { for _, p := range parents { for _, v := range wr.WorkflowNodeRuns { for _, run := range v { if p.ID == run.ID { if run.Status == sdk.StatusFail.String() || run.Status == sdk.StatusStopped.String() { parentStatus = run.Status } } } } } //Merge the payloads from all the sources _, next := observability.Span(ctx, "workflow.processNode.mergePayload") for _, r := range parents { e := dump.NewDefaultEncoder() e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()} e.ExtraFields.DetailedMap = false e.ExtraFields.DetailedStruct = false e.ExtraFields.Len = false e.ExtraFields.Type = false m1, errm1 := e.ToStringMap(r.Payload) if errm1 != nil { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errm1.Error()}, }) log.Error("processNode> Unable to compute hook payload: %v", errm1) } if isDefaultPayload { // Check if we try to merge for the first time so try to merge the default payload with the first parent run found // if it is the default payload then we have to take the previous git values runPayload = sdk.ParametersMapMerge(runPayload, m1) isDefaultPayload = false } else { runPayload = sdk.ParametersMapMerge(runPayload, m1, sdk.MapMergeOptions.ExcludeGitParams) } } run.Payload = runPayload run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters) // Take first value in pipeline parameter list if no default value is set for i := range run.PipelineParameters { if run.PipelineParameters[i].Type == sdk.ListParameter && strings.Contains(run.PipelineParameters[i].Value, ";") { run.PipelineParameters[i].Value = strings.Split(run.PipelineParameters[i].Value, ";")[0] } } next() } run.HookEvent = hookEvent if hookEvent != nil { runPayload = sdk.ParametersMapMerge(runPayload, hookEvent.Payload) run.Payload = runPayload run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters) } run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "cds.node", Type: sdk.StringParameter, Value: run.WorkflowNodeName, }) run.Manual = manual if manual != nil { payloadStr, err := json.Marshal(manual.Payload) if err != nil { log.Error("processNode> Unable to marshal payload: %v", err) } run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "payload", Type: sdk.TextParameter, Value: string(payloadStr), }) e := dump.NewDefaultEncoder() e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()} e.ExtraFields.DetailedMap = false e.ExtraFields.DetailedStruct = false e.ExtraFields.Len = false e.ExtraFields.Type = false m1, errm1 := e.ToStringMap(manual.Payload) if errm1 != nil { return report, false, sdk.WrapError(errm1, "processNode> Unable to compute payload") } runPayload = sdk.ParametersMapMerge(runPayload, m1) run.Payload = runPayload run.PipelineParameters = sdk.ParametersMerge(n.Context.DefaultPipelineParameters, manual.PipelineParameters) run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "cds.triggered_by.email", Type: sdk.StringParameter, Value: manual.User.Email, }, sdk.Parameter{ Name: "cds.triggered_by.fullname", Type: sdk.StringParameter, Value: manual.User.Fullname, }, sdk.Parameter{ Name: "cds.triggered_by.username", Type: sdk.StringParameter, Value: manual.User.Username, }, sdk.Parameter{ Name: "cds.manual", Type: sdk.StringParameter, Value: "true", }) } else { run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "cds.manual", Type: sdk.StringParameter, Value: "false", }) } cdsStatusParam := sdk.Parameter{ Name: "cds.status", Type: sdk.StringParameter, Value: parentStatus, } run.BuildParameters = sdk.ParametersFromMap( sdk.ParametersMapMerge( sdk.ParametersToMap(run.BuildParameters), sdk.ParametersToMap([]sdk.Parameter{cdsStatusParam}), sdk.MapMergeOptions.ExcludeGitParams, ), ) // Process parameters for the jobs runContext := nodeRunContext{} if n.Context.PipelineID != 0 { runContext.Pipeline = wr.Workflow.Pipelines[n.Context.PipelineID] } if n.Context.ApplicationID != 0 { runContext.Application = wr.Workflow.Applications[n.Context.ApplicationID] } if n.Context.EnvironmentID != 0 { runContext.Environment = wr.Workflow.Environments[n.Context.EnvironmentID] } if n.Context.ProjectIntegrationID != 0 { runContext.ProjectIntegration = wr.Workflow.ProjectIntegrations[n.Context.ProjectIntegrationID] } jobParams, errParam := getNodeRunBuildParameters(ctx, proj, wr, run, runContext) if errParam != nil { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errParam.Error()}, }) // if there an error -> display it in workflowRunInfo and not stop the launch log.Error("processNode> getNodeRunBuildParameters failed. Project:%s [#%d.%d]%s.%d with payload %v err:%v", proj.Name, wr.Number, subNumber, wr.Workflow.Name, n.ID, run.Payload, errParam) } run.BuildParameters = append(run.BuildParameters, jobParams...) // Inherit parameter from parent job if len(parentsIDs) > 0 { _, next := observability.Span(ctx, "workflow.getParentParameters") parentsParams, errPP := getParentParameters(wr, parents, runPayload) next() if errPP != nil { return nil, false, sdk.WrapError(errPP, "processNode> getParentParameters failed") } mapBuildParams := sdk.ParametersToMap(run.BuildParameters) mapParentParams := sdk.ParametersToMap(parentsParams) run.BuildParameters = sdk.ParametersFromMap(sdk.ParametersMapMerge(mapBuildParams, mapParentParams, sdk.MapMergeOptions.ExcludeGitParams)) } //Parse job params to get the VCS infos currentGitValues := map[string]string{} for _, param := range jobParams { switch param.Name { case tagGitHash, tagGitBranch, tagGitTag, tagGitAuthor, tagGitMessage, tagGitRepository, tagGitURL, tagGitHTTPURL: currentGitValues[param.Name] = param.Value } } //Parse job params to get the VCS infos previousGitValues := map[string]string{} for _, param := range run.BuildParameters { switch param.Name { case tagGitHash, tagGitBranch, tagGitTag, tagGitAuthor, tagGitMessage, tagGitRepository, tagGitURL, tagGitHTTPURL: previousGitValues[param.Name] = param.Value } } isRoot := n.ID == wr.Workflow.WorkflowData.Node.ID gitValues := currentGitValues if previousGitValues[tagGitURL] == currentGitValues[tagGitURL] || previousGitValues[tagGitHTTPURL] == currentGitValues[tagGitHTTPURL] { gitValues = previousGitValues } var vcsInfos vcsInfos var app sdk.Application if n.Context.ApplicationID != 0 { app = wr.Workflow.Applications[n.Context.ApplicationID] } var errVcs error vcsServer := repositoriesmanager.GetProjectVCSServer(proj, app.VCSServer) vcsInfos, errVcs = getVCSInfos(ctx, db, store, vcsServer, gitValues, app.Name, app.VCSServer, app.RepositoryFullname, !isRoot, previousGitValues[tagGitRepository]) if errVcs != nil { if strings.Contains(errVcs.Error(), "branch has been deleted") { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowRunBranchDeleted.ID, Args: []interface{}{vcsInfos.Branch}, }) } else { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errVcs.Error()}, }) } if isRoot { return nil, false, sdk.WrapError(errVcs, "processNode> Cannot get VCSInfos") } return nil, true, nil } // only if it's the root pipeline, we put the git... in the build parameters // this allow user to write some run conditions with .git.var on the root pipeline if isRoot { setValuesGitInBuildParameters(run, vcsInfos) } // Check Run Conditions if hookEvent != nil { hooks := wr.Workflow.WorkflowData.GetHooks() hook, ok := hooks[hookEvent.WorkflowNodeHookUUID] if !ok { return nil, false, sdk.WrapError(sdk.ErrNoHook, "Unable to find hook %s", hookEvent.WorkflowNodeHookUUID) } // Check conditions var params = run.BuildParameters // Define specific destination parameters dest := mapNodes[hook.NodeID] if dest == nil { return nil, false, sdk.WrapError(sdk.ErrWorkflowNodeNotFound, "Unable to find node %d", hook.NodeID) } if !checkNodeRunCondition(wr, dest.Context.Conditions, params) { log.Debug("Avoid trigger workflow from hook %s", hook.UUID) return nil, false, nil } } else { if !checkNodeRunCondition(wr, n.Context.Conditions, run.BuildParameters) { log.Debug("Condition failed %d/%d %+v", wr.ID, n.ID, run.BuildParameters) return nil, false, nil } } if !isRoot { setValuesGitInBuildParameters(run, vcsInfos) } // Tag VCS infos : add in tag only if it does not exist if !wr.TagExists(tagGitRepository) { wr.Tag(tagGitRepository, run.VCSRepository) if run.VCSBranch != "" && run.VCSTag == "" { wr.Tag(tagGitBranch, run.VCSBranch) } if run.VCSTag != "" { wr.Tag(tagGitTag, run.VCSTag) } if len(run.VCSHash) >= 7 { wr.Tag(tagGitHash, run.VCSHash[:7]) } else { wr.Tag(tagGitHash, run.VCSHash) } wr.Tag(tagGitAuthor, vcsInfos.Author) } // Add env tag if n.Context.EnvironmentID != 0 { wr.Tag(tagEnvironment, wr.Workflow.Environments[n.Context.EnvironmentID].Name) } for _, info := range wr.Infos { if info.IsError && info.SubNumber == wr.LastSubNumber { run.Status = string(sdk.StatusFail) run.Done = time.Now() break } } if err := insertWorkflowNodeRun(db, run); err != nil { return nil, false, sdk.WrapError(err, "unable to insert run (node id : %d, node name : %s, subnumber : %d)", run.WorkflowNodeID, run.WorkflowNodeName, run.SubNumber) } wr.LastExecution = time.Now() buildParameters := sdk.ParametersToMap(run.BuildParameters) _, okUI := buildParameters["cds.ui.pipeline.run"] _, okID := buildParameters["cds.node.id"] if !okUI || !okID { if !okUI { uiRunURL := fmt.Sprintf("%s/project/%s/workflow/%s/run/%s/node/%d?name=%s", baseUIURL, buildParameters["cds.project"], buildParameters["cds.workflow"], buildParameters["cds.run.number"], run.ID, buildParameters["cds.workflow"]) sdk.AddParameter(&run.BuildParameters, "cds.ui.pipeline.run", sdk.StringParameter, uiRunURL) } if !okID { sdk.AddParameter(&run.BuildParameters, "cds.node.id", sdk.StringParameter, fmt.Sprintf("%d", run.ID)) } if err := UpdateNodeRunBuildParameters(db, run.ID, run.BuildParameters); err != nil { return nil, false, sdk.WrapError(err, "unable to update workflow node run build parameters") } } report.Add(*run) //Update workflow run if wr.WorkflowNodeRuns == nil { wr.WorkflowNodeRuns = make(map[int64][]sdk.WorkflowNodeRun) } wr.WorkflowNodeRuns[run.WorkflowNodeID] = append(wr.WorkflowNodeRuns[run.WorkflowNodeID], *run) wr.LastSubNumber = MaxSubNumber(wr.WorkflowNodeRuns) if err := UpdateWorkflowRun(ctx, db, wr); err != nil { return nil, false, sdk.WrapError(err, "unable to update workflow run") } //Check the context.mutex to know if we are allowed to run it if n.Context.Mutex { //Check if there are builing workflownoderun with the same workflow_node_name for the same workflow mutexQuery := `select count(1) from workflow_node_run join workflow_run on workflow_run.id = workflow_node_run.workflow_run_id join workflow on workflow.id = workflow_run.workflow_id where workflow.id = $1 and workflow_node_run.id <> $2 and workflow_node_run.workflow_node_name = $3 and workflow_node_run.status = $4` nbMutex, err := db.SelectInt(mutexQuery, n.WorkflowID, run.ID, n.Name, string(sdk.StatusBuilding)) if err != nil { return nil, false, sdk.WrapError(err, "unable to check mutexes") } if nbMutex > 0 { log.Debug("Noderun %s processed but not executed because of mutex", n.Name) AddWorkflowRunInfo(wr, false, sdk.SpawnMsg{ ID: sdk.MsgWorkflowNodeMutex.ID, Args: []interface{}{n.Name}, }) if err := UpdateWorkflowRun(ctx, db, wr); err != nil { return nil, false, sdk.WrapError(err, "unable to update workflow run") } //Mutex is locked. exit without error return report, false, nil } //Mutex is free, continue } //Execute the node run ! r1, err := execute(ctx, db, store, proj, run, runContext) if err != nil { return nil, false, sdk.WrapError(err, "unable to execute workflow run") } _, _ = report.Merge(r1, nil) return report, true, nil }
processNodeRun
identifier_name
process_node.go
package workflow import ( "context" "encoding/json" "fmt" "strings" "time" "github.com/fsamin/go-dump" "github.com/go-gorp/gorp" "github.com/ovh/cds/engine/api/cache" "github.com/ovh/cds/engine/api/observability" "github.com/ovh/cds/engine/api/repositoriesmanager" "github.com/ovh/cds/sdk" "github.com/ovh/cds/sdk/log" ) func processNodeTriggers(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, parentNodeRun []*sdk.WorkflowNodeRun, node *sdk.Node, parentSubNumber int) (*ProcessorReport, error) { report := new(ProcessorReport) for j := range node.Triggers { t := &node.Triggers[j] var abortTrigger bool if previousRunArray, ok := wr.WorkflowNodeRuns[t.ChildNode.ID]; ok { for _, previousRun := range previousRunArray { if int(previousRun.SubNumber) == parentSubNumber { abortTrigger = true break } } } if !abortTrigger { //Keep the subnumber of the previous node in the graph r1, _, errPwnr := processNodeRun(ctx, db, store, proj, wr, mapNodes, &t.ChildNode, int(parentSubNumber), parentNodeRun, nil, nil) if errPwnr != nil { log.Error("processWorkflowRun> Unable to process node ID=%d: %s", t.ChildNode.ID, errPwnr) AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errPwnr.Error()}, }) } _, _ = report.Merge(r1, nil) continue } } return report, nil } func processNodeRun(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, n *sdk.Node, subNumber int, parentNodeRuns []*sdk.WorkflowNodeRun, hookEvent *sdk.WorkflowNodeRunHookEvent, manual *sdk.WorkflowNodeRunManual) (*ProcessorReport, bool, error) { report := new(ProcessorReport) exist, errN := nodeRunExist(db, wr.ID, n.ID, wr.Number, subNumber) if errN != nil { return nil, false, sdk.WrapError(errN, "processNodeRun> unable to check if node run exist") } if exist { return nil, false, nil } var end func() ctx, end = observability.Span(ctx, "workflow.processNodeRun", observability.Tag(observability.TagWorkflow, wr.Workflow.Name), observability.Tag(observability.TagWorkflowRun, wr.Number), observability.Tag(observability.TagWorkflowNode, n.Name), ) defer end() // Keep old model behaviour on fork and join // Send manual event to join and fork children when it was a manual run and when fork and join don't have run condition if manual == nil && len(parentNodeRuns) == 1 && parentNodeRuns[0].Manual != nil { n := wr.Workflow.WorkflowData.NodeByID(parentNodeRuns[0].WorkflowNodeID) // If fork or JOIN and No run conditions
if (n.Type == sdk.NodeTypeJoin || n.Type == sdk.NodeTypeFork) && (n.Context == nil || (n.Context.Conditions.LuaScript == "" && len(n.Context.Conditions.PlainConditions) == 0)) { manual = parentNodeRuns[0].Manual } } switch n.Type { case sdk.NodeTypeFork, sdk.NodeTypePipeline, sdk.NodeTypeJoin: r1, conditionOK, errT := processNode(ctx, db, store, proj, wr, mapNodes, n, subNumber, parentNodeRuns, hookEvent, manual) if errT != nil { return nil, false, sdk.WrapError(errT, "Unable to processNode") } report.Merge(r1, nil) // nolint return report, conditionOK, nil case sdk.NodeTypeOutGoingHook: r1, conditionOK, errO := processNodeOutGoingHook(ctx, db, store, proj, wr, mapNodes, parentNodeRuns, n, subNumber) if errO != nil { return nil, false, sdk.WrapError(errO, "Unable to processNodeOutGoingHook") } report.Merge(r1, nil) // nolint return report, conditionOK, nil } return nil, false, nil } func processNode(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, n *sdk.Node, subNumber int, parents []*sdk.WorkflowNodeRun, hookEvent *sdk.WorkflowNodeRunHookEvent, manual *sdk.WorkflowNodeRunManual) (*ProcessorReport, bool, error) { report := new(ProcessorReport) //TODO: Check user for manual done but check permission also for automatic trigger and hooks (with system to authenticate a webhook) if n.Context == nil { n.Context = &sdk.NodeContext{} } if n.Context.PipelineID == 0 && n.Type == sdk.NodeTypePipeline { return nil, false, sdk.ErrPipelineNotFound } var runPayload map[string]string var errPayload error runPayload, errPayload = n.Context.DefaultPayloadToMap() if errPayload != nil { return nil, false, sdk.WrapError(errPayload, "Default payload is malformatted") } isDefaultPayload := true // For node with pipeline var stages []sdk.Stage var pip sdk.Pipeline if n.Context.PipelineID > 0 { var has bool pip, has = wr.Workflow.Pipelines[n.Context.PipelineID] if !has { return nil, false, fmt.Errorf("pipeline %d not found in workflow", n.Context.PipelineID) } stages = make([]sdk.Stage, len(pip.Stages)) copy(stages, pip.Stages) //If the pipeline has parameter but none are defined on context, use the defaults if len(pip.Parameter) > 0 && len(n.Context.DefaultPipelineParameters) == 0 { n.Context.DefaultPipelineParameters = pip.Parameter } } // Create run run := &sdk.WorkflowNodeRun{ WorkflowID: wr.WorkflowID, LastModified: time.Now(), Start: time.Now(), Number: wr.Number, SubNumber: int64(subNumber), WorkflowRunID: wr.ID, WorkflowNodeID: n.ID, WorkflowNodeName: n.Name, Status: string(sdk.StatusWaiting), Stages: stages, Header: wr.Header, } if run.SubNumber >= wr.LastSubNumber { wr.LastSubNumber = run.SubNumber } if n.Context.ApplicationID != 0 { run.ApplicationID = n.Context.ApplicationID } parentsIDs := make([]int64, len(parents)) for i := range parents { parentsIDs[i] = parents[i].ID } parentStatus := sdk.StatusSuccess.String() run.SourceNodeRuns = parentsIDs if parents != nil { for _, p := range parents { for _, v := range wr.WorkflowNodeRuns { for _, run := range v { if p.ID == run.ID { if run.Status == sdk.StatusFail.String() || run.Status == sdk.StatusStopped.String() { parentStatus = run.Status } } } } } //Merge the payloads from all the sources _, next := observability.Span(ctx, "workflow.processNode.mergePayload") for _, r := range parents { e := dump.NewDefaultEncoder() e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()} e.ExtraFields.DetailedMap = false e.ExtraFields.DetailedStruct = false e.ExtraFields.Len = false e.ExtraFields.Type = false m1, errm1 := e.ToStringMap(r.Payload) if errm1 != nil { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errm1.Error()}, }) log.Error("processNode> Unable to compute hook payload: %v", errm1) } if isDefaultPayload { // Check if we try to merge for the first time so try to merge the default payload with the first parent run found // if it is the default payload then we have to take the previous git values runPayload = sdk.ParametersMapMerge(runPayload, m1) isDefaultPayload = false } else { runPayload = sdk.ParametersMapMerge(runPayload, m1, sdk.MapMergeOptions.ExcludeGitParams) } } run.Payload = runPayload run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters) // Take first value in pipeline parameter list if no default value is set for i := range run.PipelineParameters { if run.PipelineParameters[i].Type == sdk.ListParameter && strings.Contains(run.PipelineParameters[i].Value, ";") { run.PipelineParameters[i].Value = strings.Split(run.PipelineParameters[i].Value, ";")[0] } } next() } run.HookEvent = hookEvent if hookEvent != nil { runPayload = sdk.ParametersMapMerge(runPayload, hookEvent.Payload) run.Payload = runPayload run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters) } run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "cds.node", Type: sdk.StringParameter, Value: run.WorkflowNodeName, }) run.Manual = manual if manual != nil { payloadStr, err := json.Marshal(manual.Payload) if err != nil { log.Error("processNode> Unable to marshal payload: %v", err) } run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "payload", Type: sdk.TextParameter, Value: string(payloadStr), }) e := dump.NewDefaultEncoder() e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()} e.ExtraFields.DetailedMap = false e.ExtraFields.DetailedStruct = false e.ExtraFields.Len = false e.ExtraFields.Type = false m1, errm1 := e.ToStringMap(manual.Payload) if errm1 != nil { return report, false, sdk.WrapError(errm1, "processNode> Unable to compute payload") } runPayload = sdk.ParametersMapMerge(runPayload, m1) run.Payload = runPayload run.PipelineParameters = sdk.ParametersMerge(n.Context.DefaultPipelineParameters, manual.PipelineParameters) run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "cds.triggered_by.email", Type: sdk.StringParameter, Value: manual.User.Email, }, sdk.Parameter{ Name: "cds.triggered_by.fullname", Type: sdk.StringParameter, Value: manual.User.Fullname, }, sdk.Parameter{ Name: "cds.triggered_by.username", Type: sdk.StringParameter, Value: manual.User.Username, }, sdk.Parameter{ Name: "cds.manual", Type: sdk.StringParameter, Value: "true", }) } else { run.BuildParameters = append(run.BuildParameters, sdk.Parameter{ Name: "cds.manual", Type: sdk.StringParameter, Value: "false", }) } cdsStatusParam := sdk.Parameter{ Name: "cds.status", Type: sdk.StringParameter, Value: parentStatus, } run.BuildParameters = sdk.ParametersFromMap( sdk.ParametersMapMerge( sdk.ParametersToMap(run.BuildParameters), sdk.ParametersToMap([]sdk.Parameter{cdsStatusParam}), sdk.MapMergeOptions.ExcludeGitParams, ), ) // Process parameters for the jobs runContext := nodeRunContext{} if n.Context.PipelineID != 0 { runContext.Pipeline = wr.Workflow.Pipelines[n.Context.PipelineID] } if n.Context.ApplicationID != 0 { runContext.Application = wr.Workflow.Applications[n.Context.ApplicationID] } if n.Context.EnvironmentID != 0 { runContext.Environment = wr.Workflow.Environments[n.Context.EnvironmentID] } if n.Context.ProjectIntegrationID != 0 { runContext.ProjectIntegration = wr.Workflow.ProjectIntegrations[n.Context.ProjectIntegrationID] } jobParams, errParam := getNodeRunBuildParameters(ctx, proj, wr, run, runContext) if errParam != nil { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errParam.Error()}, }) // if there an error -> display it in workflowRunInfo and not stop the launch log.Error("processNode> getNodeRunBuildParameters failed. Project:%s [#%d.%d]%s.%d with payload %v err:%v", proj.Name, wr.Number, subNumber, wr.Workflow.Name, n.ID, run.Payload, errParam) } run.BuildParameters = append(run.BuildParameters, jobParams...) // Inherit parameter from parent job if len(parentsIDs) > 0 { _, next := observability.Span(ctx, "workflow.getParentParameters") parentsParams, errPP := getParentParameters(wr, parents, runPayload) next() if errPP != nil { return nil, false, sdk.WrapError(errPP, "processNode> getParentParameters failed") } mapBuildParams := sdk.ParametersToMap(run.BuildParameters) mapParentParams := sdk.ParametersToMap(parentsParams) run.BuildParameters = sdk.ParametersFromMap(sdk.ParametersMapMerge(mapBuildParams, mapParentParams, sdk.MapMergeOptions.ExcludeGitParams)) } //Parse job params to get the VCS infos currentGitValues := map[string]string{} for _, param := range jobParams { switch param.Name { case tagGitHash, tagGitBranch, tagGitTag, tagGitAuthor, tagGitMessage, tagGitRepository, tagGitURL, tagGitHTTPURL: currentGitValues[param.Name] = param.Value } } //Parse job params to get the VCS infos previousGitValues := map[string]string{} for _, param := range run.BuildParameters { switch param.Name { case tagGitHash, tagGitBranch, tagGitTag, tagGitAuthor, tagGitMessage, tagGitRepository, tagGitURL, tagGitHTTPURL: previousGitValues[param.Name] = param.Value } } isRoot := n.ID == wr.Workflow.WorkflowData.Node.ID gitValues := currentGitValues if previousGitValues[tagGitURL] == currentGitValues[tagGitURL] || previousGitValues[tagGitHTTPURL] == currentGitValues[tagGitHTTPURL] { gitValues = previousGitValues } var vcsInfos vcsInfos var app sdk.Application if n.Context.ApplicationID != 0 { app = wr.Workflow.Applications[n.Context.ApplicationID] } var errVcs error vcsServer := repositoriesmanager.GetProjectVCSServer(proj, app.VCSServer) vcsInfos, errVcs = getVCSInfos(ctx, db, store, vcsServer, gitValues, app.Name, app.VCSServer, app.RepositoryFullname, !isRoot, previousGitValues[tagGitRepository]) if errVcs != nil { if strings.Contains(errVcs.Error(), "branch has been deleted") { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowRunBranchDeleted.ID, Args: []interface{}{vcsInfos.Branch}, }) } else { AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{ ID: sdk.MsgWorkflowError.ID, Args: []interface{}{errVcs.Error()}, }) } if isRoot { return nil, false, sdk.WrapError(errVcs, "processNode> Cannot get VCSInfos") } return nil, true, nil } // only if it's the root pipeline, we put the git... in the build parameters // this allow user to write some run conditions with .git.var on the root pipeline if isRoot { setValuesGitInBuildParameters(run, vcsInfos) } // Check Run Conditions if hookEvent != nil { hooks := wr.Workflow.WorkflowData.GetHooks() hook, ok := hooks[hookEvent.WorkflowNodeHookUUID] if !ok { return nil, false, sdk.WrapError(sdk.ErrNoHook, "Unable to find hook %s", hookEvent.WorkflowNodeHookUUID) } // Check conditions var params = run.BuildParameters // Define specific destination parameters dest := mapNodes[hook.NodeID] if dest == nil { return nil, false, sdk.WrapError(sdk.ErrWorkflowNodeNotFound, "Unable to find node %d", hook.NodeID) } if !checkNodeRunCondition(wr, dest.Context.Conditions, params) { log.Debug("Avoid trigger workflow from hook %s", hook.UUID) return nil, false, nil } } else { if !checkNodeRunCondition(wr, n.Context.Conditions, run.BuildParameters) { log.Debug("Condition failed %d/%d %+v", wr.ID, n.ID, run.BuildParameters) return nil, false, nil } } if !isRoot { setValuesGitInBuildParameters(run, vcsInfos) } // Tag VCS infos : add in tag only if it does not exist if !wr.TagExists(tagGitRepository) { wr.Tag(tagGitRepository, run.VCSRepository) if run.VCSBranch != "" && run.VCSTag == "" { wr.Tag(tagGitBranch, run.VCSBranch) } if run.VCSTag != "" { wr.Tag(tagGitTag, run.VCSTag) } if len(run.VCSHash) >= 7 { wr.Tag(tagGitHash, run.VCSHash[:7]) } else { wr.Tag(tagGitHash, run.VCSHash) } wr.Tag(tagGitAuthor, vcsInfos.Author) } // Add env tag if n.Context.EnvironmentID != 0 { wr.Tag(tagEnvironment, wr.Workflow.Environments[n.Context.EnvironmentID].Name) } for _, info := range wr.Infos { if info.IsError && info.SubNumber == wr.LastSubNumber { run.Status = string(sdk.StatusFail) run.Done = time.Now() break } } if err := insertWorkflowNodeRun(db, run); err != nil { return nil, false, sdk.WrapError(err, "unable to insert run (node id : %d, node name : %s, subnumber : %d)", run.WorkflowNodeID, run.WorkflowNodeName, run.SubNumber) } wr.LastExecution = time.Now() buildParameters := sdk.ParametersToMap(run.BuildParameters) _, okUI := buildParameters["cds.ui.pipeline.run"] _, okID := buildParameters["cds.node.id"] if !okUI || !okID { if !okUI { uiRunURL := fmt.Sprintf("%s/project/%s/workflow/%s/run/%s/node/%d?name=%s", baseUIURL, buildParameters["cds.project"], buildParameters["cds.workflow"], buildParameters["cds.run.number"], run.ID, buildParameters["cds.workflow"]) sdk.AddParameter(&run.BuildParameters, "cds.ui.pipeline.run", sdk.StringParameter, uiRunURL) } if !okID { sdk.AddParameter(&run.BuildParameters, "cds.node.id", sdk.StringParameter, fmt.Sprintf("%d", run.ID)) } if err := UpdateNodeRunBuildParameters(db, run.ID, run.BuildParameters); err != nil { return nil, false, sdk.WrapError(err, "unable to update workflow node run build parameters") } } report.Add(*run) //Update workflow run if wr.WorkflowNodeRuns == nil { wr.WorkflowNodeRuns = make(map[int64][]sdk.WorkflowNodeRun) } wr.WorkflowNodeRuns[run.WorkflowNodeID] = append(wr.WorkflowNodeRuns[run.WorkflowNodeID], *run) wr.LastSubNumber = MaxSubNumber(wr.WorkflowNodeRuns) if err := UpdateWorkflowRun(ctx, db, wr); err != nil { return nil, false, sdk.WrapError(err, "unable to update workflow run") } //Check the context.mutex to know if we are allowed to run it if n.Context.Mutex { //Check if there are builing workflownoderun with the same workflow_node_name for the same workflow mutexQuery := `select count(1) from workflow_node_run join workflow_run on workflow_run.id = workflow_node_run.workflow_run_id join workflow on workflow.id = workflow_run.workflow_id where workflow.id = $1 and workflow_node_run.id <> $2 and workflow_node_run.workflow_node_name = $3 and workflow_node_run.status = $4` nbMutex, err := db.SelectInt(mutexQuery, n.WorkflowID, run.ID, n.Name, string(sdk.StatusBuilding)) if err != nil { return nil, false, sdk.WrapError(err, "unable to check mutexes") } if nbMutex > 0 { log.Debug("Noderun %s processed but not executed because of mutex", n.Name) AddWorkflowRunInfo(wr, false, sdk.SpawnMsg{ ID: sdk.MsgWorkflowNodeMutex.ID, Args: []interface{}{n.Name}, }) if err := UpdateWorkflowRun(ctx, db, wr); err != nil { return nil, false, sdk.WrapError(err, "unable to update workflow run") } //Mutex is locked. exit without error return report, false, nil } //Mutex is free, continue } //Execute the node run ! r1, err := execute(ctx, db, store, proj, run, runContext) if err != nil { return nil, false, sdk.WrapError(err, "unable to execute workflow run") } _, _ = report.Merge(r1, nil) return report, true, nil }
random_line_split
client.go
package cloudformation import ( "context" "encoding/json" "fmt" "regexp" "strings" "time" "github.com/alphagov/gsp/components/service-operator/internal/aws/sdk" "github.com/alphagov/gsp/components/service-operator/internal/object" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/secretsmanager" goformation "github.com/awslabs/goformation/v4/cloudformation" goformationtags "github.com/awslabs/goformation/v4/cloudformation/tags" goformationrds "github.com/awslabs/goformation/v4/cloudformation/rds" goformationiam "github.com/awslabs/goformation/v4/cloudformation/iam" goformations3 "github.com/awslabs/goformation/v4/cloudformation/s3" goformationsqs "github.com/awslabs/goformation/v4/cloudformation/sqs" goformationecr "github.com/awslabs/goformation/v4/cloudformation/ecr" goformationsecretsmanager "github.com/awslabs/goformation/v4/cloudformation/secretsmanager" goformationelasticache "github.com/awslabs/goformation/v4/cloudformation/elasticache" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Alias types from the various cloudformation packages so we can access // relevant parts via this package for convenience type State = cloudformation.Stack type StateEvent = cloudformation.StackEvent type Output = cloudformation.Output type DescribeStacksInput = cloudformation.DescribeStacksInput type CreateStackInput = cloudformation.CreateStackInput type UpdateStackInput = cloudformation.UpdateStackInput type DeleteStackInput = cloudformation.DeleteStackInput type DescribeStackEventsInput = cloudformation.DescribeStackEventsInput type DescribeStacksOutput = cloudformation.DescribeStacksOutput type GetSecretValueInput = secretsmanager.GetSecretValueInput type Parameter = cloudformation.Parameter type Template = goformation.Template type Tag = goformationtags.Tag type AWSRDSDBCluster = goformationrds.DBCluster type AWSRDSDBInstance = goformationrds.DBInstance type AWSRDSDBClusterParameterGroup = goformationrds.DBClusterParameterGroup type AWSRDSDBParameterGroup = goformationrds.DBParameterGroup type AWSIAMPolicy = goformationiam.Policy type AWSIAMRole = goformationiam.Role type AWSS3Bucket = goformations3.Bucket type AWSSecretsManagerSecret = goformationsecretsmanager.Secret type AWSSecretsManagerSecretTargetAttachment = goformationsecretsmanager.SecretTargetAttachment type AWSSQSQueue = goformationsqs.Queue type GenerateSecretString = goformationsecretsmanager.Secret_GenerateSecretString type AWSECRRepository = goformationecr.Repository type AWSECRRepository_LifecyclePolicy = goformationecr.Repository_LifecyclePolicy type AWSElastiCacheReplicationGroup = goformationelasticache.ReplicationGroup var NewTemplate = goformation.NewTemplate var Join = goformation.Join var GetAtt = goformation.GetAtt var Ref = goformation.Ref var Sub = goformation.Sub const CreateInProgress = cloudformation.StackStatusCreateInProgress const DeleteInProgress = cloudformation.StackStatusDeleteInProgress const UpdateInProgress = cloudformation.StackStatusUpdateInProgress const ReviewInProgress = cloudformation.StackStatusReviewInProgress const CreateComplete = cloudformation.StackStatusCreateComplete const DeleteComplete = cloudformation.StackStatusDeleteComplete const UpdateComplete = cloudformation.StackStatusUpdateComplete const CreateFailed = cloudformation.StackStatusCreateFailed const DeleteFailed = cloudformation.StackStatusDeleteFailed const RollbackFailed = cloudformation.StackStatusRollbackFailed const RollbackInProgress = cloudformation.StackStatusRollbackInProgress const UpdateRollbackFailed = cloudformation.StackStatusUpdateRollbackFailed const RollbackComplete = cloudformation.StackStatusRollbackComplete const UpdateRollbackInProgress = cloudformation.StackStatusRollbackInProgress const UpdateRollbackComplete = cloudformation.StackStatusUpdateRollbackComplete const UpdateRollbackCompleteCleanupInProgress = cloudformation.StackStatusUpdateRollbackCompleteCleanupInProgress var ( // capabilities required by cloudformation capabilities = []*string{ aws.String("CAPABILITY_NAMED_IAM"), } // ErrStackNotFound returned when stack does not exist, or has been deleted ErrStackNotFound = fmt.Errorf("STACK_NOT_FOUND") // NoUpdatesErrMatch is string to match in error from aws to detect if nothing to update NoUpdatesErrMatch = "No updates" // NoExistErrMatch is a string to match if stack does not exist NoExistErrMatch = "does not exist" ) // Outputs is used as a more friendly version of cloudformation.Output type Outputs map[string]string // Client performs cloudformation operations on objects that implement the Stack interface type Client struct { // ClusterName is used to prefix any generated names to avoid clashes ClusterName string // Client is the AWS SDK Client implementation to use Client sdk.Client // PollingInterval is the duration between calls to check state when waiting for apply/destroy to complete PollingInterval time.Duration } // Apply reconciles the state of the remote cloudformation stack and blocks // until the stack is no longer in an creating/applying state or a ctx timeout is hit // Calls should be retried if DeadlineExceeded errors are hit // Returns any outputs on successful apply. // Will update stack with current status func (r *Client) Apply(ctx context.Context, stack Stack, params ...*Parameter) (Outputs, error) { // always update stack status defer r.updateStatus(stack) // check if exists exists, err := r.exists(ctx, stack) if err != nil { return nil, err } if !exists { err := r.create(ctx, stack, params...) if err != nil { return nil, err } } _, err = r.waitUntilCompleteState(ctx, stack) if err != nil { return nil, err } err = r.update(ctx, stack, params...) if err != nil { return nil, err } state, err := r.waitUntilCompleteState(ctx, stack) if err != nil { return nil, err } return r.resolveOutputs(ctx, state.Outputs) } // validateParams checks for any unset template parameters func (r *Client) validateTemplateParams(t *Template, params []*Parameter) error { missing := map[string]interface{}{} // copy all wanted params into missing for k, v := range t.Parameters { missing[k] = v } // remove items from missing list as found for wantedKey := range t.Parameters { for _, param := range params { if param.ParameterKey == nil { continue } // phew found it if *param.ParameterKey == wantedKey { delete(missing, wantedKey) break } } } // if any left, then we have an issue if len(missing) > 0 { keys := []string{} for k := range missing { keys = append(keys, k) } keysCSV := strings.Join(keys, ",") return fmt.Errorf("missing required input parameters: [%s]", keysCSV) } return nil } // create initiates a cloudformation create passing in the given params func (r *Client) create(ctx context.Context, stack Stack, params ...*Parameter) error { // fetch and validate template t, err := stack.GetStackTemplate() if err != nil { return err } err = r.validateTemplateParams(t, params) if err != nil { return err } yaml, err := t.YAML() if err != nil { return err } stackPolicy, err := getStackPolicy(stack) if err != nil { return err } _, err = r.Client.CreateStackWithContext(ctx, &CreateStackInput{ Capabilities: capabilities, TemplateBody: aws.String(string(yaml)), StackName: aws.String(stack.GetStackName()), StackPolicyBody: stackPolicy, Parameters: params, }) if err != nil { return err } return nil } // Update the stack and wait for update to complete. func (r *Client) update(ctx context.Context, stack Stack, params ...*Parameter) error { // fetch and validate template params t, err := stack.GetStackTemplate() if err != nil { return err } err = r.validateTemplateParams(t, params) if err != nil { return err } yaml, err := t.YAML() if err != nil { return err } stackPolicy, err := getStackPolicy(stack) if err != nil { return err } _, err = r.Client.UpdateStackWithContext(ctx, &UpdateStackInput{ Capabilities: capabilities, TemplateBody: aws.String(string(yaml)), StackName: aws.String(stack.GetStackName()), StackPolicyBody: stackPolicy, Parameters: params, }) if err != nil && !IsNoUpdateError(err) { return err } return nil } // Destroy will attempt to deprovision the cloudformation stack and block until complete or the ctx Deadline expires // Calls should be retried if DeadlineExceeded errors are hit // Will update stack with current status func (r *Client) Destroy(ctx context.Context, stack Stack) error { // always update stack status defer r.updateStatus(stack) // fetch current state state, err := r.get(ctx, stack) if err == ErrStackNotFound { // resource is already deleted (or never existsed) // so we're done here return nil } else if err != nil { // failed to get stack status return err } if *state.StackStatus == DeleteComplete { // resource already deleted return nil } // trigger a delete unless we're already in a deleting state if *state.StackStatus != DeleteInProgress { _, err := r.Client.DeleteStackWithContext(ctx, &DeleteStackInput{ StackName: aws.String(stack.GetStackName()), }) if err != nil { return err } } _, err = r.waitUntilDestroyedState(ctx, stack) if err != nil { return err } return nil } // Outputs fetches the cloudformation outputs for the given stack // Returns ErrStackNotFound if stack does not exist func (r *Client) Outputs(ctx context.Context, stack Stack) (Outputs, error) { state, err := r.get(ctx, stack) if err != nil { return nil, err } return r.resolveOutputs(ctx, state.Outputs) } // resolveOutputs returns cloudformation outputs in a map format and resolves // any values that are stored in AWS Secrets Manager func (r *Client) resolveOutputs(ctx context.Context, list []*Output) (Outputs, error) { outputs := Outputs{} for _, item := range list { if item.OutputKey == nil || item.OutputValue == nil { continue } key := *item.OutputKey value := *item.OutputValue // we automatically resolve references to AWS Secrets Manager // secrets here, so that we are able to make use of encrypted // sensitive values in cloudformation templates if strings.HasPrefix(value, "{{resolve:secretsmanager:") { // extract ARN and key name from reference secretARNMatcher := regexp.MustCompile(`{{resolve:secretsmanager:(.*):SecretString:(.*)}}`) matches := secretARNMatcher.FindStringSubmatch(value) if len(matches) == 0 { return nil, fmt.Errorf("failed to extract ARN and key name from secretsmanager value: %s", secretARNMatcher) } arn := matches[1] subkey := matches[2] v, err := r.Client.GetSecretValueWithContext(ctx, &GetSecretValueInput{ SecretId: aws.String(arn), }) if err != nil { return nil, err } if v.SecretString == nil { return nil, fmt.Errorf("unexpected nil value in SecretString of %s", arn) } secrets := map[string]interface{}{} err = json.Unmarshal([]byte(*v.SecretString), &secrets) if err != nil { return nil, err } subval, haveSubkey := secrets[subkey] if !haveSubkey { return nil, fmt.Errorf("could not find subkey %s in SecretString of %s", subkey, arn) } subvalString, ok := subval.(string) if !ok { return nil, fmt.Errorf("subval at subkey %s in SecretString of %s is not a string", subkey, arn) } value = subvalString } outputs[key] = value } return outputs, nil } // get fetches the cloudformation stack state // Returns ErrStackNotFound if stack does not exist func (r *Client) get(ctx context.Context, stack Stack) (*State, error) { describeOutput, err := r.Client.DescribeStacksWithContext(ctx, &DescribeStacksInput{ StackName: aws.String(stack.GetStackName()), }) if err != nil { if IsNotFoundError(err) { return nil, ErrStackNotFound } return nil, err } if describeOutput == nil { return nil, fmt.Errorf("describeOutput was nil, potential issue with AWS Client") } if len(describeOutput.Stacks) == 0 { return nil, fmt.Errorf("describeOutput contained no Stacks, potential issue with AWS Client") } if len(describeOutput.Stacks) > 1 { return nil, fmt.Errorf("describeOutput contained multiple Stacks which is unexpected when calling with StackName, potential issue with AWS Client") } state := describeOutput.Stacks[0] if state.StackStatus == nil { return nil, fmt.Errorf("describeOutput contained a nil StackStatus, potential issue with AWS Client") } return state, nil } // update mutates the stack's status with current state, events and any // whitelisted outputs. ignores any errors encountered and just updates // whatever it can with the intension of getting as much info visible as // possible even under error conditions. func (r *Client) updateStatus(stack Stack) { // use a fresh context or we might not be able to update status after // deadline is hit, but this feels a little wrong ctx := context.Background() state, _ := r.get(ctx, stack) events, _ := r.events(ctx, stack) s := stack.GetStatus() // update aws specific state if state != nil { if state.StackId != nil { s.AWS.ID = *state.StackId } if state.StackName != nil { s.AWS.Name = *state.StackName } if state.StackStatus != nil { s.AWS.Status = *state.StackStatus } if state.StackStatusReason != nil { s.AWS.Reason = *state.StackStatusReason } } // add any event details if len(events) > 0 { s.AWS.Events = []object.AWSEvent{} for _, event := range events { reason := "-" if event.ResourceStatusReason != nil { reason = *event.ResourceStatusReason } s.AWS.Events = append(s.AWS.Events, object.AWSEvent{ Status: *event.ResourceStatus, Reason: reason, Time: &metav1.Time{Time: *event.Timestamp}, }) } } // update generic state switch s.AWS.Status { case DeleteFailed, CreateFailed, RollbackFailed, UpdateRollbackFailed, RollbackComplete, UpdateRollbackComplete: s.State = object.ErrorState case DeleteInProgress, DeleteComplete: s.State = object.DeletingState case CreateComplete, UpdateComplete: s.State = object.ReadyState default: s.State = object.ReconcilingState } // if object implements whitelisting of output keys, then update info if w, ok := stack.(StackOutputWhitelister); ok { if s.AWS.Info == nil { s.AWS.Info = map[string]string{} } outputs, _ := r.Outputs(ctx, stack) for _, whitelistedKey := range w.GetStackOutputWhitelist() { if val, ok := outputs[whitelistedKey]; ok { s.AWS.Info[whitelistedKey] = val } } } stack.SetStatus(s) } func (r *Client) events(ctx context.Context, stack Stack) ([]*StateEvent, error) { eventsOutput, err := r.Client.DescribeStackEventsWithContext(ctx, &DescribeStackEventsInput{ StackName: aws.String(stack.GetStackName()), }) if err != nil { return nil, err } if eventsOutput == nil { return []*StateEvent{}, nil } return eventsOutput.StackEvents, nil } // Exists checks if the stack has been provisioned func (r *Client) exists(ctx context.Context, stack Stack) (bool, error) { _, err := r.get(ctx, stack) if err == ErrStackNotFound { return false, nil } else if err != nil { return false, err } return true, nil } func (r *Client) waitUntilCompleteState(ctx context.Context, stack Stack) (*State, error) { return r.waitUntilState(ctx, stack, []string{ CreateComplete, UpdateComplete, UpdateRollbackComplete, RollbackComplete, }) } func (r *Client) waitUntilDestroyedState(ctx context.Context, stack Stack) (*State, error) { return r.waitUntilState(ctx, stack, []string{ DeleteComplete, }) } func (r *Client) waitUntilState(ctx context.Context, stack Stack, desiredStates []string) (*State, error) { for { select { case <-ctx.Done(): return nil, context.DeadlineExceeded default: state, err := r.get(ctx, stack) if IsNotFoundError(err) && in(DeleteComplete, desiredStates) { // If we are waiting for DeleteComplete state and the // stack has gone missing, consider this DeleteComplete return &State{}, nil } else if err != nil { return nil, err } if in(*state.StackStatus, desiredStates) { return state, nil } } time.Sleep(r.PollingInterval) } } func in(needle string, haystack []string) bool { for _, s := range haystack { if needle == s { return true } } return false } func IsNoUpdateError(err error) bool { if err == nil
return strings.Contains(err.Error(), NoUpdatesErrMatch) } func IsNotFoundError(err error) bool { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == "ResourceNotFoundException" { return true } else if awsErr.Code() == "ValidationError" && strings.Contains(awsErr.Message(), NoExistErrMatch) { return true } } return false } func getStackPolicy(stack Stack) (*string, error) { var stackPolicy *string = nil if stackPolicyProvider, ok := stack.(StackPolicyProvider); ok { policy, err := json.Marshal(stackPolicyProvider.GetStackPolicy()) if err != nil { return nil, err } stackPolicy = aws.String(string(policy)) } return stackPolicy, nil }
{ return false }
conditional_block
client.go
package cloudformation import ( "context" "encoding/json" "fmt" "regexp" "strings" "time" "github.com/alphagov/gsp/components/service-operator/internal/aws/sdk" "github.com/alphagov/gsp/components/service-operator/internal/object" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/secretsmanager" goformation "github.com/awslabs/goformation/v4/cloudformation" goformationtags "github.com/awslabs/goformation/v4/cloudformation/tags" goformationrds "github.com/awslabs/goformation/v4/cloudformation/rds" goformationiam "github.com/awslabs/goformation/v4/cloudformation/iam" goformations3 "github.com/awslabs/goformation/v4/cloudformation/s3" goformationsqs "github.com/awslabs/goformation/v4/cloudformation/sqs" goformationecr "github.com/awslabs/goformation/v4/cloudformation/ecr" goformationsecretsmanager "github.com/awslabs/goformation/v4/cloudformation/secretsmanager" goformationelasticache "github.com/awslabs/goformation/v4/cloudformation/elasticache" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Alias types from the various cloudformation packages so we can access // relevant parts via this package for convenience type State = cloudformation.Stack type StateEvent = cloudformation.StackEvent type Output = cloudformation.Output type DescribeStacksInput = cloudformation.DescribeStacksInput type CreateStackInput = cloudformation.CreateStackInput type UpdateStackInput = cloudformation.UpdateStackInput type DeleteStackInput = cloudformation.DeleteStackInput type DescribeStackEventsInput = cloudformation.DescribeStackEventsInput type DescribeStacksOutput = cloudformation.DescribeStacksOutput type GetSecretValueInput = secretsmanager.GetSecretValueInput type Parameter = cloudformation.Parameter type Template = goformation.Template type Tag = goformationtags.Tag type AWSRDSDBCluster = goformationrds.DBCluster type AWSRDSDBInstance = goformationrds.DBInstance type AWSRDSDBClusterParameterGroup = goformationrds.DBClusterParameterGroup type AWSRDSDBParameterGroup = goformationrds.DBParameterGroup type AWSIAMPolicy = goformationiam.Policy type AWSIAMRole = goformationiam.Role type AWSS3Bucket = goformations3.Bucket type AWSSecretsManagerSecret = goformationsecretsmanager.Secret type AWSSecretsManagerSecretTargetAttachment = goformationsecretsmanager.SecretTargetAttachment type AWSSQSQueue = goformationsqs.Queue type GenerateSecretString = goformationsecretsmanager.Secret_GenerateSecretString type AWSECRRepository = goformationecr.Repository type AWSECRRepository_LifecyclePolicy = goformationecr.Repository_LifecyclePolicy type AWSElastiCacheReplicationGroup = goformationelasticache.ReplicationGroup var NewTemplate = goformation.NewTemplate var Join = goformation.Join var GetAtt = goformation.GetAtt var Ref = goformation.Ref var Sub = goformation.Sub const CreateInProgress = cloudformation.StackStatusCreateInProgress const DeleteInProgress = cloudformation.StackStatusDeleteInProgress const UpdateInProgress = cloudformation.StackStatusUpdateInProgress const ReviewInProgress = cloudformation.StackStatusReviewInProgress const CreateComplete = cloudformation.StackStatusCreateComplete const DeleteComplete = cloudformation.StackStatusDeleteComplete const UpdateComplete = cloudformation.StackStatusUpdateComplete const CreateFailed = cloudformation.StackStatusCreateFailed const DeleteFailed = cloudformation.StackStatusDeleteFailed const RollbackFailed = cloudformation.StackStatusRollbackFailed const RollbackInProgress = cloudformation.StackStatusRollbackInProgress const UpdateRollbackFailed = cloudformation.StackStatusUpdateRollbackFailed const RollbackComplete = cloudformation.StackStatusRollbackComplete const UpdateRollbackInProgress = cloudformation.StackStatusRollbackInProgress const UpdateRollbackComplete = cloudformation.StackStatusUpdateRollbackComplete const UpdateRollbackCompleteCleanupInProgress = cloudformation.StackStatusUpdateRollbackCompleteCleanupInProgress var ( // capabilities required by cloudformation capabilities = []*string{ aws.String("CAPABILITY_NAMED_IAM"), } // ErrStackNotFound returned when stack does not exist, or has been deleted ErrStackNotFound = fmt.Errorf("STACK_NOT_FOUND") // NoUpdatesErrMatch is string to match in error from aws to detect if nothing to update NoUpdatesErrMatch = "No updates" // NoExistErrMatch is a string to match if stack does not exist NoExistErrMatch = "does not exist" ) // Outputs is used as a more friendly version of cloudformation.Output type Outputs map[string]string // Client performs cloudformation operations on objects that implement the Stack interface type Client struct { // ClusterName is used to prefix any generated names to avoid clashes ClusterName string // Client is the AWS SDK Client implementation to use Client sdk.Client // PollingInterval is the duration between calls to check state when waiting for apply/destroy to complete PollingInterval time.Duration } // Apply reconciles the state of the remote cloudformation stack and blocks // until the stack is no longer in an creating/applying state or a ctx timeout is hit // Calls should be retried if DeadlineExceeded errors are hit // Returns any outputs on successful apply. // Will update stack with current status func (r *Client) Apply(ctx context.Context, stack Stack, params ...*Parameter) (Outputs, error) { // always update stack status defer r.updateStatus(stack) // check if exists exists, err := r.exists(ctx, stack) if err != nil { return nil, err } if !exists { err := r.create(ctx, stack, params...) if err != nil { return nil, err } } _, err = r.waitUntilCompleteState(ctx, stack) if err != nil { return nil, err } err = r.update(ctx, stack, params...) if err != nil { return nil, err } state, err := r.waitUntilCompleteState(ctx, stack) if err != nil { return nil, err } return r.resolveOutputs(ctx, state.Outputs) } // validateParams checks for any unset template parameters func (r *Client) validateTemplateParams(t *Template, params []*Parameter) error { missing := map[string]interface{}{} // copy all wanted params into missing for k, v := range t.Parameters { missing[k] = v } // remove items from missing list as found for wantedKey := range t.Parameters { for _, param := range params { if param.ParameterKey == nil { continue } // phew found it if *param.ParameterKey == wantedKey { delete(missing, wantedKey) break } } } // if any left, then we have an issue if len(missing) > 0 { keys := []string{} for k := range missing { keys = append(keys, k) } keysCSV := strings.Join(keys, ",") return fmt.Errorf("missing required input parameters: [%s]", keysCSV) } return nil } // create initiates a cloudformation create passing in the given params func (r *Client) create(ctx context.Context, stack Stack, params ...*Parameter) error { // fetch and validate template t, err := stack.GetStackTemplate() if err != nil { return err } err = r.validateTemplateParams(t, params) if err != nil { return err } yaml, err := t.YAML() if err != nil { return err } stackPolicy, err := getStackPolicy(stack) if err != nil { return err } _, err = r.Client.CreateStackWithContext(ctx, &CreateStackInput{ Capabilities: capabilities, TemplateBody: aws.String(string(yaml)), StackName: aws.String(stack.GetStackName()), StackPolicyBody: stackPolicy, Parameters: params, }) if err != nil { return err } return nil } // Update the stack and wait for update to complete. func (r *Client) update(ctx context.Context, stack Stack, params ...*Parameter) error { // fetch and validate template params t, err := stack.GetStackTemplate() if err != nil { return err } err = r.validateTemplateParams(t, params) if err != nil { return err } yaml, err := t.YAML() if err != nil { return err } stackPolicy, err := getStackPolicy(stack) if err != nil { return err } _, err = r.Client.UpdateStackWithContext(ctx, &UpdateStackInput{ Capabilities: capabilities, TemplateBody: aws.String(string(yaml)), StackName: aws.String(stack.GetStackName()), StackPolicyBody: stackPolicy, Parameters: params, }) if err != nil && !IsNoUpdateError(err) { return err } return nil } // Destroy will attempt to deprovision the cloudformation stack and block until complete or the ctx Deadline expires // Calls should be retried if DeadlineExceeded errors are hit // Will update stack with current status func (r *Client) Destroy(ctx context.Context, stack Stack) error { // always update stack status defer r.updateStatus(stack) // fetch current state state, err := r.get(ctx, stack) if err == ErrStackNotFound { // resource is already deleted (or never existsed) // so we're done here return nil } else if err != nil { // failed to get stack status return err } if *state.StackStatus == DeleteComplete { // resource already deleted return nil } // trigger a delete unless we're already in a deleting state if *state.StackStatus != DeleteInProgress { _, err := r.Client.DeleteStackWithContext(ctx, &DeleteStackInput{ StackName: aws.String(stack.GetStackName()), }) if err != nil { return err } } _, err = r.waitUntilDestroyedState(ctx, stack) if err != nil { return err } return nil } // Outputs fetches the cloudformation outputs for the given stack // Returns ErrStackNotFound if stack does not exist func (r *Client)
(ctx context.Context, stack Stack) (Outputs, error) { state, err := r.get(ctx, stack) if err != nil { return nil, err } return r.resolveOutputs(ctx, state.Outputs) } // resolveOutputs returns cloudformation outputs in a map format and resolves // any values that are stored in AWS Secrets Manager func (r *Client) resolveOutputs(ctx context.Context, list []*Output) (Outputs, error) { outputs := Outputs{} for _, item := range list { if item.OutputKey == nil || item.OutputValue == nil { continue } key := *item.OutputKey value := *item.OutputValue // we automatically resolve references to AWS Secrets Manager // secrets here, so that we are able to make use of encrypted // sensitive values in cloudformation templates if strings.HasPrefix(value, "{{resolve:secretsmanager:") { // extract ARN and key name from reference secretARNMatcher := regexp.MustCompile(`{{resolve:secretsmanager:(.*):SecretString:(.*)}}`) matches := secretARNMatcher.FindStringSubmatch(value) if len(matches) == 0 { return nil, fmt.Errorf("failed to extract ARN and key name from secretsmanager value: %s", secretARNMatcher) } arn := matches[1] subkey := matches[2] v, err := r.Client.GetSecretValueWithContext(ctx, &GetSecretValueInput{ SecretId: aws.String(arn), }) if err != nil { return nil, err } if v.SecretString == nil { return nil, fmt.Errorf("unexpected nil value in SecretString of %s", arn) } secrets := map[string]interface{}{} err = json.Unmarshal([]byte(*v.SecretString), &secrets) if err != nil { return nil, err } subval, haveSubkey := secrets[subkey] if !haveSubkey { return nil, fmt.Errorf("could not find subkey %s in SecretString of %s", subkey, arn) } subvalString, ok := subval.(string) if !ok { return nil, fmt.Errorf("subval at subkey %s in SecretString of %s is not a string", subkey, arn) } value = subvalString } outputs[key] = value } return outputs, nil } // get fetches the cloudformation stack state // Returns ErrStackNotFound if stack does not exist func (r *Client) get(ctx context.Context, stack Stack) (*State, error) { describeOutput, err := r.Client.DescribeStacksWithContext(ctx, &DescribeStacksInput{ StackName: aws.String(stack.GetStackName()), }) if err != nil { if IsNotFoundError(err) { return nil, ErrStackNotFound } return nil, err } if describeOutput == nil { return nil, fmt.Errorf("describeOutput was nil, potential issue with AWS Client") } if len(describeOutput.Stacks) == 0 { return nil, fmt.Errorf("describeOutput contained no Stacks, potential issue with AWS Client") } if len(describeOutput.Stacks) > 1 { return nil, fmt.Errorf("describeOutput contained multiple Stacks which is unexpected when calling with StackName, potential issue with AWS Client") } state := describeOutput.Stacks[0] if state.StackStatus == nil { return nil, fmt.Errorf("describeOutput contained a nil StackStatus, potential issue with AWS Client") } return state, nil } // update mutates the stack's status with current state, events and any // whitelisted outputs. ignores any errors encountered and just updates // whatever it can with the intension of getting as much info visible as // possible even under error conditions. func (r *Client) updateStatus(stack Stack) { // use a fresh context or we might not be able to update status after // deadline is hit, but this feels a little wrong ctx := context.Background() state, _ := r.get(ctx, stack) events, _ := r.events(ctx, stack) s := stack.GetStatus() // update aws specific state if state != nil { if state.StackId != nil { s.AWS.ID = *state.StackId } if state.StackName != nil { s.AWS.Name = *state.StackName } if state.StackStatus != nil { s.AWS.Status = *state.StackStatus } if state.StackStatusReason != nil { s.AWS.Reason = *state.StackStatusReason } } // add any event details if len(events) > 0 { s.AWS.Events = []object.AWSEvent{} for _, event := range events { reason := "-" if event.ResourceStatusReason != nil { reason = *event.ResourceStatusReason } s.AWS.Events = append(s.AWS.Events, object.AWSEvent{ Status: *event.ResourceStatus, Reason: reason, Time: &metav1.Time{Time: *event.Timestamp}, }) } } // update generic state switch s.AWS.Status { case DeleteFailed, CreateFailed, RollbackFailed, UpdateRollbackFailed, RollbackComplete, UpdateRollbackComplete: s.State = object.ErrorState case DeleteInProgress, DeleteComplete: s.State = object.DeletingState case CreateComplete, UpdateComplete: s.State = object.ReadyState default: s.State = object.ReconcilingState } // if object implements whitelisting of output keys, then update info if w, ok := stack.(StackOutputWhitelister); ok { if s.AWS.Info == nil { s.AWS.Info = map[string]string{} } outputs, _ := r.Outputs(ctx, stack) for _, whitelistedKey := range w.GetStackOutputWhitelist() { if val, ok := outputs[whitelistedKey]; ok { s.AWS.Info[whitelistedKey] = val } } } stack.SetStatus(s) } func (r *Client) events(ctx context.Context, stack Stack) ([]*StateEvent, error) { eventsOutput, err := r.Client.DescribeStackEventsWithContext(ctx, &DescribeStackEventsInput{ StackName: aws.String(stack.GetStackName()), }) if err != nil { return nil, err } if eventsOutput == nil { return []*StateEvent{}, nil } return eventsOutput.StackEvents, nil } // Exists checks if the stack has been provisioned func (r *Client) exists(ctx context.Context, stack Stack) (bool, error) { _, err := r.get(ctx, stack) if err == ErrStackNotFound { return false, nil } else if err != nil { return false, err } return true, nil } func (r *Client) waitUntilCompleteState(ctx context.Context, stack Stack) (*State, error) { return r.waitUntilState(ctx, stack, []string{ CreateComplete, UpdateComplete, UpdateRollbackComplete, RollbackComplete, }) } func (r *Client) waitUntilDestroyedState(ctx context.Context, stack Stack) (*State, error) { return r.waitUntilState(ctx, stack, []string{ DeleteComplete, }) } func (r *Client) waitUntilState(ctx context.Context, stack Stack, desiredStates []string) (*State, error) { for { select { case <-ctx.Done(): return nil, context.DeadlineExceeded default: state, err := r.get(ctx, stack) if IsNotFoundError(err) && in(DeleteComplete, desiredStates) { // If we are waiting for DeleteComplete state and the // stack has gone missing, consider this DeleteComplete return &State{}, nil } else if err != nil { return nil, err } if in(*state.StackStatus, desiredStates) { return state, nil } } time.Sleep(r.PollingInterval) } } func in(needle string, haystack []string) bool { for _, s := range haystack { if needle == s { return true } } return false } func IsNoUpdateError(err error) bool { if err == nil { return false } return strings.Contains(err.Error(), NoUpdatesErrMatch) } func IsNotFoundError(err error) bool { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == "ResourceNotFoundException" { return true } else if awsErr.Code() == "ValidationError" && strings.Contains(awsErr.Message(), NoExistErrMatch) { return true } } return false } func getStackPolicy(stack Stack) (*string, error) { var stackPolicy *string = nil if stackPolicyProvider, ok := stack.(StackPolicyProvider); ok { policy, err := json.Marshal(stackPolicyProvider.GetStackPolicy()) if err != nil { return nil, err } stackPolicy = aws.String(string(policy)) } return stackPolicy, nil }
Outputs
identifier_name
client.go
package cloudformation import ( "context" "encoding/json" "fmt" "regexp" "strings" "time" "github.com/alphagov/gsp/components/service-operator/internal/aws/sdk" "github.com/alphagov/gsp/components/service-operator/internal/object" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/secretsmanager" goformation "github.com/awslabs/goformation/v4/cloudformation" goformationtags "github.com/awslabs/goformation/v4/cloudformation/tags" goformationrds "github.com/awslabs/goformation/v4/cloudformation/rds" goformationiam "github.com/awslabs/goformation/v4/cloudformation/iam" goformations3 "github.com/awslabs/goformation/v4/cloudformation/s3" goformationsqs "github.com/awslabs/goformation/v4/cloudformation/sqs" goformationecr "github.com/awslabs/goformation/v4/cloudformation/ecr" goformationsecretsmanager "github.com/awslabs/goformation/v4/cloudformation/secretsmanager" goformationelasticache "github.com/awslabs/goformation/v4/cloudformation/elasticache" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Alias types from the various cloudformation packages so we can access // relevant parts via this package for convenience type State = cloudformation.Stack type StateEvent = cloudformation.StackEvent type Output = cloudformation.Output type DescribeStacksInput = cloudformation.DescribeStacksInput type CreateStackInput = cloudformation.CreateStackInput type UpdateStackInput = cloudformation.UpdateStackInput type DeleteStackInput = cloudformation.DeleteStackInput type DescribeStackEventsInput = cloudformation.DescribeStackEventsInput type DescribeStacksOutput = cloudformation.DescribeStacksOutput type GetSecretValueInput = secretsmanager.GetSecretValueInput type Parameter = cloudformation.Parameter type Template = goformation.Template type Tag = goformationtags.Tag type AWSRDSDBCluster = goformationrds.DBCluster type AWSRDSDBInstance = goformationrds.DBInstance type AWSRDSDBClusterParameterGroup = goformationrds.DBClusterParameterGroup type AWSRDSDBParameterGroup = goformationrds.DBParameterGroup type AWSIAMPolicy = goformationiam.Policy type AWSIAMRole = goformationiam.Role type AWSS3Bucket = goformations3.Bucket type AWSSecretsManagerSecret = goformationsecretsmanager.Secret type AWSSecretsManagerSecretTargetAttachment = goformationsecretsmanager.SecretTargetAttachment type AWSSQSQueue = goformationsqs.Queue type GenerateSecretString = goformationsecretsmanager.Secret_GenerateSecretString type AWSECRRepository = goformationecr.Repository type AWSECRRepository_LifecyclePolicy = goformationecr.Repository_LifecyclePolicy type AWSElastiCacheReplicationGroup = goformationelasticache.ReplicationGroup var NewTemplate = goformation.NewTemplate var Join = goformation.Join var GetAtt = goformation.GetAtt var Ref = goformation.Ref var Sub = goformation.Sub const CreateInProgress = cloudformation.StackStatusCreateInProgress const DeleteInProgress = cloudformation.StackStatusDeleteInProgress const UpdateInProgress = cloudformation.StackStatusUpdateInProgress const ReviewInProgress = cloudformation.StackStatusReviewInProgress const CreateComplete = cloudformation.StackStatusCreateComplete const DeleteComplete = cloudformation.StackStatusDeleteComplete const UpdateComplete = cloudformation.StackStatusUpdateComplete const CreateFailed = cloudformation.StackStatusCreateFailed const DeleteFailed = cloudformation.StackStatusDeleteFailed const RollbackFailed = cloudformation.StackStatusRollbackFailed const RollbackInProgress = cloudformation.StackStatusRollbackInProgress const UpdateRollbackFailed = cloudformation.StackStatusUpdateRollbackFailed const RollbackComplete = cloudformation.StackStatusRollbackComplete const UpdateRollbackInProgress = cloudformation.StackStatusRollbackInProgress const UpdateRollbackComplete = cloudformation.StackStatusUpdateRollbackComplete const UpdateRollbackCompleteCleanupInProgress = cloudformation.StackStatusUpdateRollbackCompleteCleanupInProgress var ( // capabilities required by cloudformation capabilities = []*string{ aws.String("CAPABILITY_NAMED_IAM"), } // ErrStackNotFound returned when stack does not exist, or has been deleted ErrStackNotFound = fmt.Errorf("STACK_NOT_FOUND") // NoUpdatesErrMatch is string to match in error from aws to detect if nothing to update NoUpdatesErrMatch = "No updates" // NoExistErrMatch is a string to match if stack does not exist NoExistErrMatch = "does not exist" ) // Outputs is used as a more friendly version of cloudformation.Output type Outputs map[string]string // Client performs cloudformation operations on objects that implement the Stack interface type Client struct { // ClusterName is used to prefix any generated names to avoid clashes ClusterName string // Client is the AWS SDK Client implementation to use Client sdk.Client // PollingInterval is the duration between calls to check state when waiting for apply/destroy to complete PollingInterval time.Duration } // Apply reconciles the state of the remote cloudformation stack and blocks // until the stack is no longer in an creating/applying state or a ctx timeout is hit // Calls should be retried if DeadlineExceeded errors are hit // Returns any outputs on successful apply. // Will update stack with current status func (r *Client) Apply(ctx context.Context, stack Stack, params ...*Parameter) (Outputs, error) { // always update stack status defer r.updateStatus(stack) // check if exists exists, err := r.exists(ctx, stack) if err != nil { return nil, err } if !exists { err := r.create(ctx, stack, params...) if err != nil { return nil, err } } _, err = r.waitUntilCompleteState(ctx, stack) if err != nil { return nil, err } err = r.update(ctx, stack, params...) if err != nil { return nil, err } state, err := r.waitUntilCompleteState(ctx, stack) if err != nil { return nil, err } return r.resolveOutputs(ctx, state.Outputs) } // validateParams checks for any unset template parameters func (r *Client) validateTemplateParams(t *Template, params []*Parameter) error { missing := map[string]interface{}{} // copy all wanted params into missing for k, v := range t.Parameters { missing[k] = v } // remove items from missing list as found for wantedKey := range t.Parameters { for _, param := range params { if param.ParameterKey == nil { continue } // phew found it if *param.ParameterKey == wantedKey { delete(missing, wantedKey) break } } } // if any left, then we have an issue if len(missing) > 0 { keys := []string{} for k := range missing { keys = append(keys, k) } keysCSV := strings.Join(keys, ",") return fmt.Errorf("missing required input parameters: [%s]", keysCSV) } return nil } // create initiates a cloudformation create passing in the given params func (r *Client) create(ctx context.Context, stack Stack, params ...*Parameter) error { // fetch and validate template t, err := stack.GetStackTemplate() if err != nil { return err } err = r.validateTemplateParams(t, params) if err != nil { return err } yaml, err := t.YAML() if err != nil { return err } stackPolicy, err := getStackPolicy(stack) if err != nil { return err } _, err = r.Client.CreateStackWithContext(ctx, &CreateStackInput{ Capabilities: capabilities, TemplateBody: aws.String(string(yaml)), StackName: aws.String(stack.GetStackName()), StackPolicyBody: stackPolicy, Parameters: params, }) if err != nil { return err } return nil } // Update the stack and wait for update to complete. func (r *Client) update(ctx context.Context, stack Stack, params ...*Parameter) error { // fetch and validate template params t, err := stack.GetStackTemplate() if err != nil { return err } err = r.validateTemplateParams(t, params) if err != nil { return err } yaml, err := t.YAML() if err != nil { return err } stackPolicy, err := getStackPolicy(stack) if err != nil { return err } _, err = r.Client.UpdateStackWithContext(ctx, &UpdateStackInput{ Capabilities: capabilities, TemplateBody: aws.String(string(yaml)), StackName: aws.String(stack.GetStackName()), StackPolicyBody: stackPolicy, Parameters: params, }) if err != nil && !IsNoUpdateError(err) { return err } return nil } // Destroy will attempt to deprovision the cloudformation stack and block until complete or the ctx Deadline expires // Calls should be retried if DeadlineExceeded errors are hit // Will update stack with current status func (r *Client) Destroy(ctx context.Context, stack Stack) error { // always update stack status defer r.updateStatus(stack) // fetch current state state, err := r.get(ctx, stack) if err == ErrStackNotFound { // resource is already deleted (or never existsed) // so we're done here return nil } else if err != nil { // failed to get stack status return err } if *state.StackStatus == DeleteComplete { // resource already deleted return nil } // trigger a delete unless we're already in a deleting state if *state.StackStatus != DeleteInProgress { _, err := r.Client.DeleteStackWithContext(ctx, &DeleteStackInput{ StackName: aws.String(stack.GetStackName()), }) if err != nil { return err } } _, err = r.waitUntilDestroyedState(ctx, stack) if err != nil { return err } return nil } // Outputs fetches the cloudformation outputs for the given stack // Returns ErrStackNotFound if stack does not exist func (r *Client) Outputs(ctx context.Context, stack Stack) (Outputs, error) { state, err := r.get(ctx, stack) if err != nil { return nil, err } return r.resolveOutputs(ctx, state.Outputs) } // resolveOutputs returns cloudformation outputs in a map format and resolves // any values that are stored in AWS Secrets Manager func (r *Client) resolveOutputs(ctx context.Context, list []*Output) (Outputs, error) { outputs := Outputs{} for _, item := range list { if item.OutputKey == nil || item.OutputValue == nil { continue } key := *item.OutputKey value := *item.OutputValue // we automatically resolve references to AWS Secrets Manager // secrets here, so that we are able to make use of encrypted // sensitive values in cloudformation templates if strings.HasPrefix(value, "{{resolve:secretsmanager:") { // extract ARN and key name from reference secretARNMatcher := regexp.MustCompile(`{{resolve:secretsmanager:(.*):SecretString:(.*)}}`) matches := secretARNMatcher.FindStringSubmatch(value) if len(matches) == 0 { return nil, fmt.Errorf("failed to extract ARN and key name from secretsmanager value: %s", secretARNMatcher) } arn := matches[1] subkey := matches[2] v, err := r.Client.GetSecretValueWithContext(ctx, &GetSecretValueInput{ SecretId: aws.String(arn), }) if err != nil { return nil, err } if v.SecretString == nil { return nil, fmt.Errorf("unexpected nil value in SecretString of %s", arn) } secrets := map[string]interface{}{} err = json.Unmarshal([]byte(*v.SecretString), &secrets) if err != nil { return nil, err } subval, haveSubkey := secrets[subkey] if !haveSubkey { return nil, fmt.Errorf("could not find subkey %s in SecretString of %s", subkey, arn) } subvalString, ok := subval.(string) if !ok { return nil, fmt.Errorf("subval at subkey %s in SecretString of %s is not a string", subkey, arn) } value = subvalString } outputs[key] = value } return outputs, nil } // get fetches the cloudformation stack state // Returns ErrStackNotFound if stack does not exist func (r *Client) get(ctx context.Context, stack Stack) (*State, error) { describeOutput, err := r.Client.DescribeStacksWithContext(ctx, &DescribeStacksInput{ StackName: aws.String(stack.GetStackName()), }) if err != nil { if IsNotFoundError(err) { return nil, ErrStackNotFound } return nil, err } if describeOutput == nil { return nil, fmt.Errorf("describeOutput was nil, potential issue with AWS Client") } if len(describeOutput.Stacks) == 0 { return nil, fmt.Errorf("describeOutput contained no Stacks, potential issue with AWS Client") } if len(describeOutput.Stacks) > 1 { return nil, fmt.Errorf("describeOutput contained multiple Stacks which is unexpected when calling with StackName, potential issue with AWS Client") } state := describeOutput.Stacks[0] if state.StackStatus == nil { return nil, fmt.Errorf("describeOutput contained a nil StackStatus, potential issue with AWS Client") } return state, nil } // update mutates the stack's status with current state, events and any // whitelisted outputs. ignores any errors encountered and just updates // whatever it can with the intension of getting as much info visible as // possible even under error conditions. func (r *Client) updateStatus(stack Stack)
func (r *Client) events(ctx context.Context, stack Stack) ([]*StateEvent, error) { eventsOutput, err := r.Client.DescribeStackEventsWithContext(ctx, &DescribeStackEventsInput{ StackName: aws.String(stack.GetStackName()), }) if err != nil { return nil, err } if eventsOutput == nil { return []*StateEvent{}, nil } return eventsOutput.StackEvents, nil } // Exists checks if the stack has been provisioned func (r *Client) exists(ctx context.Context, stack Stack) (bool, error) { _, err := r.get(ctx, stack) if err == ErrStackNotFound { return false, nil } else if err != nil { return false, err } return true, nil } func (r *Client) waitUntilCompleteState(ctx context.Context, stack Stack) (*State, error) { return r.waitUntilState(ctx, stack, []string{ CreateComplete, UpdateComplete, UpdateRollbackComplete, RollbackComplete, }) } func (r *Client) waitUntilDestroyedState(ctx context.Context, stack Stack) (*State, error) { return r.waitUntilState(ctx, stack, []string{ DeleteComplete, }) } func (r *Client) waitUntilState(ctx context.Context, stack Stack, desiredStates []string) (*State, error) { for { select { case <-ctx.Done(): return nil, context.DeadlineExceeded default: state, err := r.get(ctx, stack) if IsNotFoundError(err) && in(DeleteComplete, desiredStates) { // If we are waiting for DeleteComplete state and the // stack has gone missing, consider this DeleteComplete return &State{}, nil } else if err != nil { return nil, err } if in(*state.StackStatus, desiredStates) { return state, nil } } time.Sleep(r.PollingInterval) } } func in(needle string, haystack []string) bool { for _, s := range haystack { if needle == s { return true } } return false } func IsNoUpdateError(err error) bool { if err == nil { return false } return strings.Contains(err.Error(), NoUpdatesErrMatch) } func IsNotFoundError(err error) bool { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == "ResourceNotFoundException" { return true } else if awsErr.Code() == "ValidationError" && strings.Contains(awsErr.Message(), NoExistErrMatch) { return true } } return false } func getStackPolicy(stack Stack) (*string, error) { var stackPolicy *string = nil if stackPolicyProvider, ok := stack.(StackPolicyProvider); ok { policy, err := json.Marshal(stackPolicyProvider.GetStackPolicy()) if err != nil { return nil, err } stackPolicy = aws.String(string(policy)) } return stackPolicy, nil }
{ // use a fresh context or we might not be able to update status after // deadline is hit, but this feels a little wrong ctx := context.Background() state, _ := r.get(ctx, stack) events, _ := r.events(ctx, stack) s := stack.GetStatus() // update aws specific state if state != nil { if state.StackId != nil { s.AWS.ID = *state.StackId } if state.StackName != nil { s.AWS.Name = *state.StackName } if state.StackStatus != nil { s.AWS.Status = *state.StackStatus } if state.StackStatusReason != nil { s.AWS.Reason = *state.StackStatusReason } } // add any event details if len(events) > 0 { s.AWS.Events = []object.AWSEvent{} for _, event := range events { reason := "-" if event.ResourceStatusReason != nil { reason = *event.ResourceStatusReason } s.AWS.Events = append(s.AWS.Events, object.AWSEvent{ Status: *event.ResourceStatus, Reason: reason, Time: &metav1.Time{Time: *event.Timestamp}, }) } } // update generic state switch s.AWS.Status { case DeleteFailed, CreateFailed, RollbackFailed, UpdateRollbackFailed, RollbackComplete, UpdateRollbackComplete: s.State = object.ErrorState case DeleteInProgress, DeleteComplete: s.State = object.DeletingState case CreateComplete, UpdateComplete: s.State = object.ReadyState default: s.State = object.ReconcilingState } // if object implements whitelisting of output keys, then update info if w, ok := stack.(StackOutputWhitelister); ok { if s.AWS.Info == nil { s.AWS.Info = map[string]string{} } outputs, _ := r.Outputs(ctx, stack) for _, whitelistedKey := range w.GetStackOutputWhitelist() { if val, ok := outputs[whitelistedKey]; ok { s.AWS.Info[whitelistedKey] = val } } } stack.SetStatus(s) }
identifier_body
client.go
package cloudformation import ( "context" "encoding/json" "fmt" "regexp" "strings" "time" "github.com/alphagov/gsp/components/service-operator/internal/aws/sdk" "github.com/alphagov/gsp/components/service-operator/internal/object" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/secretsmanager" goformation "github.com/awslabs/goformation/v4/cloudformation" goformationtags "github.com/awslabs/goformation/v4/cloudformation/tags" goformationrds "github.com/awslabs/goformation/v4/cloudformation/rds" goformationiam "github.com/awslabs/goformation/v4/cloudformation/iam" goformations3 "github.com/awslabs/goformation/v4/cloudformation/s3" goformationsqs "github.com/awslabs/goformation/v4/cloudformation/sqs" goformationecr "github.com/awslabs/goformation/v4/cloudformation/ecr" goformationsecretsmanager "github.com/awslabs/goformation/v4/cloudformation/secretsmanager" goformationelasticache "github.com/awslabs/goformation/v4/cloudformation/elasticache" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Alias types from the various cloudformation packages so we can access // relevant parts via this package for convenience type State = cloudformation.Stack type StateEvent = cloudformation.StackEvent type Output = cloudformation.Output type DescribeStacksInput = cloudformation.DescribeStacksInput type CreateStackInput = cloudformation.CreateStackInput type UpdateStackInput = cloudformation.UpdateStackInput type DeleteStackInput = cloudformation.DeleteStackInput type DescribeStackEventsInput = cloudformation.DescribeStackEventsInput type DescribeStacksOutput = cloudformation.DescribeStacksOutput type GetSecretValueInput = secretsmanager.GetSecretValueInput type Parameter = cloudformation.Parameter type Template = goformation.Template type Tag = goformationtags.Tag type AWSRDSDBCluster = goformationrds.DBCluster type AWSRDSDBInstance = goformationrds.DBInstance type AWSRDSDBClusterParameterGroup = goformationrds.DBClusterParameterGroup type AWSRDSDBParameterGroup = goformationrds.DBParameterGroup type AWSIAMPolicy = goformationiam.Policy type AWSIAMRole = goformationiam.Role type AWSS3Bucket = goformations3.Bucket type AWSSecretsManagerSecret = goformationsecretsmanager.Secret type AWSSecretsManagerSecretTargetAttachment = goformationsecretsmanager.SecretTargetAttachment type AWSSQSQueue = goformationsqs.Queue type GenerateSecretString = goformationsecretsmanager.Secret_GenerateSecretString type AWSECRRepository = goformationecr.Repository type AWSECRRepository_LifecyclePolicy = goformationecr.Repository_LifecyclePolicy type AWSElastiCacheReplicationGroup = goformationelasticache.ReplicationGroup var NewTemplate = goformation.NewTemplate var Join = goformation.Join var GetAtt = goformation.GetAtt var Ref = goformation.Ref var Sub = goformation.Sub const CreateInProgress = cloudformation.StackStatusCreateInProgress const DeleteInProgress = cloudformation.StackStatusDeleteInProgress const UpdateInProgress = cloudformation.StackStatusUpdateInProgress const ReviewInProgress = cloudformation.StackStatusReviewInProgress const CreateComplete = cloudformation.StackStatusCreateComplete const DeleteComplete = cloudformation.StackStatusDeleteComplete const UpdateComplete = cloudformation.StackStatusUpdateComplete const CreateFailed = cloudformation.StackStatusCreateFailed const DeleteFailed = cloudformation.StackStatusDeleteFailed const RollbackFailed = cloudformation.StackStatusRollbackFailed const RollbackInProgress = cloudformation.StackStatusRollbackInProgress const UpdateRollbackFailed = cloudformation.StackStatusUpdateRollbackFailed const RollbackComplete = cloudformation.StackStatusRollbackComplete const UpdateRollbackInProgress = cloudformation.StackStatusRollbackInProgress const UpdateRollbackComplete = cloudformation.StackStatusUpdateRollbackComplete const UpdateRollbackCompleteCleanupInProgress = cloudformation.StackStatusUpdateRollbackCompleteCleanupInProgress var ( // capabilities required by cloudformation capabilities = []*string{ aws.String("CAPABILITY_NAMED_IAM"), } // ErrStackNotFound returned when stack does not exist, or has been deleted ErrStackNotFound = fmt.Errorf("STACK_NOT_FOUND") // NoUpdatesErrMatch is string to match in error from aws to detect if nothing to update NoUpdatesErrMatch = "No updates" // NoExistErrMatch is a string to match if stack does not exist NoExistErrMatch = "does not exist" ) // Outputs is used as a more friendly version of cloudformation.Output type Outputs map[string]string // Client performs cloudformation operations on objects that implement the Stack interface type Client struct { // ClusterName is used to prefix any generated names to avoid clashes ClusterName string // Client is the AWS SDK Client implementation to use Client sdk.Client // PollingInterval is the duration between calls to check state when waiting for apply/destroy to complete PollingInterval time.Duration } // Apply reconciles the state of the remote cloudformation stack and blocks // until the stack is no longer in an creating/applying state or a ctx timeout is hit // Calls should be retried if DeadlineExceeded errors are hit // Returns any outputs on successful apply. // Will update stack with current status func (r *Client) Apply(ctx context.Context, stack Stack, params ...*Parameter) (Outputs, error) { // always update stack status defer r.updateStatus(stack) // check if exists exists, err := r.exists(ctx, stack) if err != nil { return nil, err } if !exists { err := r.create(ctx, stack, params...) if err != nil { return nil, err } } _, err = r.waitUntilCompleteState(ctx, stack) if err != nil { return nil, err } err = r.update(ctx, stack, params...) if err != nil { return nil, err } state, err := r.waitUntilCompleteState(ctx, stack) if err != nil { return nil, err } return r.resolveOutputs(ctx, state.Outputs) } // validateParams checks for any unset template parameters func (r *Client) validateTemplateParams(t *Template, params []*Parameter) error { missing := map[string]interface{}{} // copy all wanted params into missing for k, v := range t.Parameters { missing[k] = v } // remove items from missing list as found for wantedKey := range t.Parameters { for _, param := range params { if param.ParameterKey == nil { continue } // phew found it if *param.ParameterKey == wantedKey { delete(missing, wantedKey) break } } } // if any left, then we have an issue if len(missing) > 0 { keys := []string{} for k := range missing { keys = append(keys, k) } keysCSV := strings.Join(keys, ",") return fmt.Errorf("missing required input parameters: [%s]", keysCSV) } return nil } // create initiates a cloudformation create passing in the given params func (r *Client) create(ctx context.Context, stack Stack, params ...*Parameter) error { // fetch and validate template t, err := stack.GetStackTemplate() if err != nil { return err } err = r.validateTemplateParams(t, params) if err != nil { return err } yaml, err := t.YAML() if err != nil { return err } stackPolicy, err := getStackPolicy(stack) if err != nil { return err } _, err = r.Client.CreateStackWithContext(ctx, &CreateStackInput{ Capabilities: capabilities, TemplateBody: aws.String(string(yaml)), StackName: aws.String(stack.GetStackName()), StackPolicyBody: stackPolicy, Parameters: params, }) if err != nil { return err } return nil } // Update the stack and wait for update to complete. func (r *Client) update(ctx context.Context, stack Stack, params ...*Parameter) error { // fetch and validate template params t, err := stack.GetStackTemplate() if err != nil { return err } err = r.validateTemplateParams(t, params) if err != nil { return err } yaml, err := t.YAML() if err != nil { return err } stackPolicy, err := getStackPolicy(stack) if err != nil { return err } _, err = r.Client.UpdateStackWithContext(ctx, &UpdateStackInput{ Capabilities: capabilities, TemplateBody: aws.String(string(yaml)), StackName: aws.String(stack.GetStackName()), StackPolicyBody: stackPolicy, Parameters: params, }) if err != nil && !IsNoUpdateError(err) { return err } return nil } // Destroy will attempt to deprovision the cloudformation stack and block until complete or the ctx Deadline expires // Calls should be retried if DeadlineExceeded errors are hit // Will update stack with current status func (r *Client) Destroy(ctx context.Context, stack Stack) error { // always update stack status defer r.updateStatus(stack) // fetch current state state, err := r.get(ctx, stack) if err == ErrStackNotFound { // resource is already deleted (or never existsed) // so we're done here return nil } else if err != nil { // failed to get stack status return err } if *state.StackStatus == DeleteComplete { // resource already deleted return nil } // trigger a delete unless we're already in a deleting state if *state.StackStatus != DeleteInProgress { _, err := r.Client.DeleteStackWithContext(ctx, &DeleteStackInput{ StackName: aws.String(stack.GetStackName()), }) if err != nil { return err } } _, err = r.waitUntilDestroyedState(ctx, stack) if err != nil { return err } return nil } // Outputs fetches the cloudformation outputs for the given stack // Returns ErrStackNotFound if stack does not exist func (r *Client) Outputs(ctx context.Context, stack Stack) (Outputs, error) { state, err := r.get(ctx, stack) if err != nil { return nil, err } return r.resolveOutputs(ctx, state.Outputs) } // resolveOutputs returns cloudformation outputs in a map format and resolves // any values that are stored in AWS Secrets Manager func (r *Client) resolveOutputs(ctx context.Context, list []*Output) (Outputs, error) { outputs := Outputs{} for _, item := range list { if item.OutputKey == nil || item.OutputValue == nil { continue } key := *item.OutputKey value := *item.OutputValue // we automatically resolve references to AWS Secrets Manager // secrets here, so that we are able to make use of encrypted // sensitive values in cloudformation templates if strings.HasPrefix(value, "{{resolve:secretsmanager:") { // extract ARN and key name from reference secretARNMatcher := regexp.MustCompile(`{{resolve:secretsmanager:(.*):SecretString:(.*)}}`) matches := secretARNMatcher.FindStringSubmatch(value) if len(matches) == 0 { return nil, fmt.Errorf("failed to extract ARN and key name from secretsmanager value: %s", secretARNMatcher) } arn := matches[1] subkey := matches[2] v, err := r.Client.GetSecretValueWithContext(ctx, &GetSecretValueInput{ SecretId: aws.String(arn), }) if err != nil { return nil, err } if v.SecretString == nil {
if err != nil { return nil, err } subval, haveSubkey := secrets[subkey] if !haveSubkey { return nil, fmt.Errorf("could not find subkey %s in SecretString of %s", subkey, arn) } subvalString, ok := subval.(string) if !ok { return nil, fmt.Errorf("subval at subkey %s in SecretString of %s is not a string", subkey, arn) } value = subvalString } outputs[key] = value } return outputs, nil } // get fetches the cloudformation stack state // Returns ErrStackNotFound if stack does not exist func (r *Client) get(ctx context.Context, stack Stack) (*State, error) { describeOutput, err := r.Client.DescribeStacksWithContext(ctx, &DescribeStacksInput{ StackName: aws.String(stack.GetStackName()), }) if err != nil { if IsNotFoundError(err) { return nil, ErrStackNotFound } return nil, err } if describeOutput == nil { return nil, fmt.Errorf("describeOutput was nil, potential issue with AWS Client") } if len(describeOutput.Stacks) == 0 { return nil, fmt.Errorf("describeOutput contained no Stacks, potential issue with AWS Client") } if len(describeOutput.Stacks) > 1 { return nil, fmt.Errorf("describeOutput contained multiple Stacks which is unexpected when calling with StackName, potential issue with AWS Client") } state := describeOutput.Stacks[0] if state.StackStatus == nil { return nil, fmt.Errorf("describeOutput contained a nil StackStatus, potential issue with AWS Client") } return state, nil } // update mutates the stack's status with current state, events and any // whitelisted outputs. ignores any errors encountered and just updates // whatever it can with the intension of getting as much info visible as // possible even under error conditions. func (r *Client) updateStatus(stack Stack) { // use a fresh context or we might not be able to update status after // deadline is hit, but this feels a little wrong ctx := context.Background() state, _ := r.get(ctx, stack) events, _ := r.events(ctx, stack) s := stack.GetStatus() // update aws specific state if state != nil { if state.StackId != nil { s.AWS.ID = *state.StackId } if state.StackName != nil { s.AWS.Name = *state.StackName } if state.StackStatus != nil { s.AWS.Status = *state.StackStatus } if state.StackStatusReason != nil { s.AWS.Reason = *state.StackStatusReason } } // add any event details if len(events) > 0 { s.AWS.Events = []object.AWSEvent{} for _, event := range events { reason := "-" if event.ResourceStatusReason != nil { reason = *event.ResourceStatusReason } s.AWS.Events = append(s.AWS.Events, object.AWSEvent{ Status: *event.ResourceStatus, Reason: reason, Time: &metav1.Time{Time: *event.Timestamp}, }) } } // update generic state switch s.AWS.Status { case DeleteFailed, CreateFailed, RollbackFailed, UpdateRollbackFailed, RollbackComplete, UpdateRollbackComplete: s.State = object.ErrorState case DeleteInProgress, DeleteComplete: s.State = object.DeletingState case CreateComplete, UpdateComplete: s.State = object.ReadyState default: s.State = object.ReconcilingState } // if object implements whitelisting of output keys, then update info if w, ok := stack.(StackOutputWhitelister); ok { if s.AWS.Info == nil { s.AWS.Info = map[string]string{} } outputs, _ := r.Outputs(ctx, stack) for _, whitelistedKey := range w.GetStackOutputWhitelist() { if val, ok := outputs[whitelistedKey]; ok { s.AWS.Info[whitelistedKey] = val } } } stack.SetStatus(s) } func (r *Client) events(ctx context.Context, stack Stack) ([]*StateEvent, error) { eventsOutput, err := r.Client.DescribeStackEventsWithContext(ctx, &DescribeStackEventsInput{ StackName: aws.String(stack.GetStackName()), }) if err != nil { return nil, err } if eventsOutput == nil { return []*StateEvent{}, nil } return eventsOutput.StackEvents, nil } // Exists checks if the stack has been provisioned func (r *Client) exists(ctx context.Context, stack Stack) (bool, error) { _, err := r.get(ctx, stack) if err == ErrStackNotFound { return false, nil } else if err != nil { return false, err } return true, nil } func (r *Client) waitUntilCompleteState(ctx context.Context, stack Stack) (*State, error) { return r.waitUntilState(ctx, stack, []string{ CreateComplete, UpdateComplete, UpdateRollbackComplete, RollbackComplete, }) } func (r *Client) waitUntilDestroyedState(ctx context.Context, stack Stack) (*State, error) { return r.waitUntilState(ctx, stack, []string{ DeleteComplete, }) } func (r *Client) waitUntilState(ctx context.Context, stack Stack, desiredStates []string) (*State, error) { for { select { case <-ctx.Done(): return nil, context.DeadlineExceeded default: state, err := r.get(ctx, stack) if IsNotFoundError(err) && in(DeleteComplete, desiredStates) { // If we are waiting for DeleteComplete state and the // stack has gone missing, consider this DeleteComplete return &State{}, nil } else if err != nil { return nil, err } if in(*state.StackStatus, desiredStates) { return state, nil } } time.Sleep(r.PollingInterval) } } func in(needle string, haystack []string) bool { for _, s := range haystack { if needle == s { return true } } return false } func IsNoUpdateError(err error) bool { if err == nil { return false } return strings.Contains(err.Error(), NoUpdatesErrMatch) } func IsNotFoundError(err error) bool { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == "ResourceNotFoundException" { return true } else if awsErr.Code() == "ValidationError" && strings.Contains(awsErr.Message(), NoExistErrMatch) { return true } } return false } func getStackPolicy(stack Stack) (*string, error) { var stackPolicy *string = nil if stackPolicyProvider, ok := stack.(StackPolicyProvider); ok { policy, err := json.Marshal(stackPolicyProvider.GetStackPolicy()) if err != nil { return nil, err } stackPolicy = aws.String(string(policy)) } return stackPolicy, nil }
return nil, fmt.Errorf("unexpected nil value in SecretString of %s", arn) } secrets := map[string]interface{}{} err = json.Unmarshal([]byte(*v.SecretString), &secrets)
random_line_split
runtime.rs
//! An extension to start the tokio runtime at the appropriate time. use std::fmt::Debug; use std::sync::Arc; use std::time::Duration; use failure::Error; use futures::future::{self, Future}; use log::{trace, warn}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use spirit::bodies::InnerBody; use spirit::extension::{Extensible, Extension}; use spirit::{Builder, Spirit}; use structdoc::StructDoc; use structopt::StructOpt; use tokio::runtime; /// A body run on tokio runtime. /// /// When specifying custom tokio runtime through the [`Runtime`](enum.Runtime.html) extension, this /// is the future to be run inside the runtime. pub type TokioBody = Box<dyn Future<Item = (), Error = Error> + Send>; /// An extension to initialize a tokio runtime as part of spirit. /// /// The [`FutureInstaller`] in this crate (and as a result pipelines with [`Fragment`]s like /// [`TcpListen`], [`UdpListen`]) use this to make sure they have a runtime to handle the sockets /// on. /// /// If you prefer to specify configuration of the runtime to use, instead of the default one, you /// can create an instance of this extension yourself and register it *before registering any socket /// pipelines*, which will take precedence and the sockets will use the one provided by you. You /// must register it using the [`with_singleton`] method. /// /// Similarly, if all the pipelines are registered within the [`run`] method (or generally, after /// building is done), you need to install this manually *before* doing [`run`]. /// /// Note that the provided closures are `FnMut` mostly because `Box<FnOnce>` doesn't work. They /// will be called just once, so you can use `Option<T>` inside and consume the value by /// `take.unwrap()`. /// /// # Runtime configuration /// /// You may have noticed the callbacks here don't have access to configuration. If you intend to /// configure eg. the number of threads from user configuration, use the [`ThreadPoolConfig`] /// instead. /// /// # Future compatibility /// /// More variants may be added into the enum at any time. Such change will not be considered a /// breaking change. /// /// # Examples /// /// ``` /// extern crate failure; /// extern crate serde; /// #[macro_use] /// extern crate serde_derive; /// extern crate spirit; /// extern crate spirit_tokio; /// extern crate tokio; /// /// use std::sync::Arc; /// /// use failure::Error; /// use spirit::prelude::*; /// use spirit_tokio::{HandleListener, TcpListen}; /// use spirit_tokio::runtime::Runtime; /// use tokio::prelude::*; /// /// #[derive(Default, Deserialize)] /// struct Config { /// #[serde(default)] /// listening_socket: Vec<TcpListen>, /// } /// /// impl Config { /// fn listener(&self) -> Vec<TcpListen> { /// self.listening_socket.clone() /// } /// } /// /// fn connection() -> impl Future<Item = (), Error = Error> { /// future::ok(()) // Just a dummy implementation /// } /// /// fn main() { /// Spirit::<Empty, Config>::new() /// // Uses the current thread runtime instead of the default threadpool. This'll create /// // smaller number of threads. /// .with_singleton(Runtime::CurrentThread(Box::new(|_| ()))) /// .with( /// Pipeline::new("listener") /// .extract_cfg(Config::listener) /// .transform(HandleListener(|_conn, _cfg: &_| connection())) /// ) /// .run(|spirit| { /// # let spirit = Arc::clone(spirit); /// # std::thread::spawn(move || spirit.terminate()); /// Ok(()) /// }); /// } /// ``` /// /// [`TcpListen`]: crate::TcpListen /// [`UdpListen`]: crate::UdpListen /// [`FutureInstaller`]: crate::installer::FutureInstaller /// [`Fragment`]: spirit::Fragment /// [`run`]: spirit::SpiritBuilder::run /// [`with_singleton`]: spirit::extension::Extension::with_singleton pub enum
{ /// Use the threadpool runtime. /// /// The threadpool runtime is the default (both in tokio and spirit). /// /// This allows you to modify the builder prior to starting it, specifying custom options like /// number of threads. ThreadPool(Box<dyn FnMut(&mut runtime::Builder) + Send>), /// Use the current thread runtime. /// /// If you prefer to run everything in a single thread, use this variant. The provided closure /// can modify the builder prior to starting it. CurrentThread(Box<dyn FnMut(&mut runtime::current_thread::Builder) + Send>), /// Use completely custom runtime. /// /// The provided closure should start the runtime and execute the provided future on it, /// blocking until the runtime becomes empty. /// /// This allows combining arbitrary runtimes that are not directly supported by either tokio or /// spirit. Custom(Box<dyn FnMut(TokioBody) -> Result<(), Error> + Send>), #[doc(hidden)] __NonExhaustive__, // TODO: Support loading this from configuration? But it won't be possible to modify at // runtime, will it? } impl Default for Runtime { fn default() -> Self { Runtime::ThreadPool(Box::new(|_| {})) } } impl Runtime { fn execute<O, C>(self, spirit: &Arc<Spirit<O, C>>, inner: InnerBody) -> Result<(), Error> where C: DeserializeOwned + Send + Sync + 'static, O: StructOpt + Send + Sync + 'static, { let spirit = Arc::clone(spirit); let fut = future::lazy(move || { inner.run().map_err(move |e| { spirit.terminate(); e }) }); match self { Runtime::ThreadPool(mut mod_builder) => { let mut builder = runtime::Builder::new(); mod_builder(&mut builder); let mut runtime = builder.build()?; runtime.block_on(fut)?; runtime.block_on_all(future::lazy(|| Ok(()))) } Runtime::CurrentThread(mut mod_builder) => { let mut builder = runtime::current_thread::Builder::new(); mod_builder(&mut builder); let mut runtime = builder.build()?; runtime.block_on(fut)?; runtime.run().map_err(Error::from) } Runtime::Custom(mut callback) => callback(Box::new(fut)), Runtime::__NonExhaustive__ => unreachable!(), } } } impl<E> Extension<E> for Runtime where E: Extensible<Ok = E>, E::Config: DeserializeOwned + Send + Sync + 'static, E::Opts: StructOpt + Send + Sync + 'static, { fn apply(self, ext: E) -> Result<E, Error> { trace!("Wrapping in tokio runtime"); ext.run_around(|spirit, inner| self.execute(spirit, inner)) } } /// A configuration extension for the Tokio Threadpool runtime. /// /// Using the [`extension`][ThreadPoolConfig::extension] or the /// [`postprocess_extension`][ThreadPoolConfig::postprocess_extension] provides the [`Runtime`] to /// the spirit application. However, this allows reading the parameters of the threadpool (mostly /// number of threads) from the configuration instead of hardcoding it into the application. /// /// # Panics /// /// If this is inserted after something already registered a [`Runtime`]. /// /// # Examples /// /// ```rust /// use serde::Deserialize; /// use spirit::prelude::*; /// use spirit_tokio::runtime::ThreadPoolConfig; /// /// #[derive(Debug, Default, Deserialize)] /// struct Cfg { /// #[serde(default)] // Allow empty configuration with default runtime /// threadpool: ThreadPoolConfig, /// } /// /// impl Cfg { /// fn threadpool(&self) -> ThreadPoolConfig { /// self.threadpool.clone() /// } /// } /// /// fn main() { /// Spirit::<Empty, Cfg>::new() /// .with(ThreadPoolConfig::extension(Cfg::threadpool)) /// .run(|_| { /// // This runs inside a configured runtime /// Ok(()) /// }); /// } /// ``` #[derive( Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize, StructDoc, Ord, PartialOrd, Hash, )] #[serde(rename_all = "kebab-case")] pub struct ThreadPoolConfig { /// Maximum number of asynchronous worker threads. /// /// These do most of the work. There's little reason to set it to more than number of CPUs, but /// it may make sense to set it lower. /// /// If not set, the application will start with number of CPUs available in the system. #[serde(skip_serializing_if = "Option::is_none")] pub async_threads: Option<usize>, /// Maximum number of blocking worker threads. /// /// These do tasks that take longer time. This includes file IO and CPU intensive tasks. /// /// If not set, defaults to 100. /// /// Often, the application doesn't start these threads as they might not always be needed. #[serde(skip_serializing_if = "Option::is_none")] pub blocking_threads: Option<usize>, #[serde( skip_serializing_if = "Option::is_none", serialize_with = "spirit::utils::serialize_opt_duration", deserialize_with = "spirit::utils::deserialize_opt_duration", default )] /// How long to keep an idle thread around. /// /// A thread will be shut down if it sits around idle for this long. The default (unset) is /// never to shut it down. /// /// Accepts human-parsable times, like „3days“ or „5s“. pub keep_alive: Option<Duration>, #[serde(skip)] _sentinel: (), } impl ThreadPoolConfig { /// The extension to be plugged in with [`with`]. /// /// See the [example](#examples). /// /// [`with`]: spirit::extension::Extension::with pub fn extension<O, C, F>(extract: F) -> impl Extension<Builder<O, C>> where F: Fn(&C) -> Self + Clone + Send + Sync + 'static, O: Debug + StructOpt + Send + Sync + 'static, C: DeserializeOwned + Send + Sync + 'static, { Self::postprocess_extension(extract, |_: &mut _| ()) } /// Similar to [`extension`][ThreadPoolConfig::extension], but allows further tweaking. /// /// This allows to tweak the [threadpool builder][runtime::Builder] after it was pre-configured /// by the configuration file. This might be desirable, for example, if the application also /// wants to install an [`after_start`][runtime::Builder::after_start] or set the stack size /// which either can't or don't make sense to configure by the user. pub fn postprocess_extension<O, C, F, P>(extract: F, post: P) -> impl Extension<Builder<O, C>> where F: Fn(&C) -> Self + Clone + Send + Sync + 'static, P: FnOnce(&mut runtime::Builder) + Send + 'static, O: Debug + StructOpt + Send + Sync + 'static, C: DeserializeOwned + Send + Sync + 'static, { let mut post = Some(post); |mut builder: Builder<O, C>| { assert!( builder.singleton::<Runtime>(), "Tokio Runtime already inserted" ); trace!("Inserting configurable tokio runtime"); builder .on_config({ let extract = extract.clone(); let mut first = None; move |_: &O, cfg: &Arc<C>| { let cfg = extract(cfg); if first.is_none() { first = Some(cfg); } else if first.as_ref() != Some(&cfg) { warn!("Tokio threadpool configuration can't be changed at runtime"); } } }) .run_around(|spirit, inner| { Runtime::ThreadPool({ let spirit = Arc::clone(spirit); Box::new(move |builder| { let cfg = extract(&spirit.config()); if let Some(threads) = cfg.async_threads { builder.core_threads(threads); } if let Some(threads) = cfg.blocking_threads { builder.blocking_threads(threads); } if let Some(alive) = cfg.keep_alive { builder.keep_alive(Some(alive)); } (post.take().unwrap())(builder) }) }) .execute(spirit, inner) }) } } }
Runtime
identifier_name
runtime.rs
//! An extension to start the tokio runtime at the appropriate time. use std::fmt::Debug; use std::sync::Arc; use std::time::Duration; use failure::Error; use futures::future::{self, Future}; use log::{trace, warn}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use spirit::bodies::InnerBody; use spirit::extension::{Extensible, Extension}; use spirit::{Builder, Spirit}; use structdoc::StructDoc; use structopt::StructOpt; use tokio::runtime; /// A body run on tokio runtime. /// /// When specifying custom tokio runtime through the [`Runtime`](enum.Runtime.html) extension, this /// is the future to be run inside the runtime. pub type TokioBody = Box<dyn Future<Item = (), Error = Error> + Send>; /// An extension to initialize a tokio runtime as part of spirit. /// /// The [`FutureInstaller`] in this crate (and as a result pipelines with [`Fragment`]s like /// [`TcpListen`], [`UdpListen`]) use this to make sure they have a runtime to handle the sockets /// on. /// /// If you prefer to specify configuration of the runtime to use, instead of the default one, you /// can create an instance of this extension yourself and register it *before registering any socket /// pipelines*, which will take precedence and the sockets will use the one provided by you. You /// must register it using the [`with_singleton`] method. /// /// Similarly, if all the pipelines are registered within the [`run`] method (or generally, after /// building is done), you need to install this manually *before* doing [`run`]. /// /// Note that the provided closures are `FnMut` mostly because `Box<FnOnce>` doesn't work. They /// will be called just once, so you can use `Option<T>` inside and consume the value by /// `take.unwrap()`. /// /// # Runtime configuration /// /// You may have noticed the callbacks here don't have access to configuration. If you intend to /// configure eg. the number of threads from user configuration, use the [`ThreadPoolConfig`] /// instead. /// /// # Future compatibility /// /// More variants may be added into the enum at any time. Such change will not be considered a /// breaking change. /// /// # Examples /// /// ``` /// extern crate failure; /// extern crate serde; /// #[macro_use] /// extern crate serde_derive; /// extern crate spirit; /// extern crate spirit_tokio; /// extern crate tokio; /// /// use std::sync::Arc; /// /// use failure::Error; /// use spirit::prelude::*; /// use spirit_tokio::{HandleListener, TcpListen}; /// use spirit_tokio::runtime::Runtime; /// use tokio::prelude::*; /// /// #[derive(Default, Deserialize)] /// struct Config { /// #[serde(default)] /// listening_socket: Vec<TcpListen>, /// } /// /// impl Config { /// fn listener(&self) -> Vec<TcpListen> { /// self.listening_socket.clone() /// } /// } /// /// fn connection() -> impl Future<Item = (), Error = Error> { /// future::ok(()) // Just a dummy implementation /// } /// /// fn main() { /// Spirit::<Empty, Config>::new() /// // Uses the current thread runtime instead of the default threadpool. This'll create /// // smaller number of threads. /// .with_singleton(Runtime::CurrentThread(Box::new(|_| ()))) /// .with( /// Pipeline::new("listener") /// .extract_cfg(Config::listener) /// .transform(HandleListener(|_conn, _cfg: &_| connection())) /// ) /// .run(|spirit| { /// # let spirit = Arc::clone(spirit); /// # std::thread::spawn(move || spirit.terminate()); /// Ok(()) /// }); /// } /// ``` /// /// [`TcpListen`]: crate::TcpListen /// [`UdpListen`]: crate::UdpListen /// [`FutureInstaller`]: crate::installer::FutureInstaller /// [`Fragment`]: spirit::Fragment /// [`run`]: spirit::SpiritBuilder::run /// [`with_singleton`]: spirit::extension::Extension::with_singleton pub enum Runtime { /// Use the threadpool runtime. /// /// The threadpool runtime is the default (both in tokio and spirit). /// /// This allows you to modify the builder prior to starting it, specifying custom options like /// number of threads. ThreadPool(Box<dyn FnMut(&mut runtime::Builder) + Send>), /// Use the current thread runtime. /// /// If you prefer to run everything in a single thread, use this variant. The provided closure /// can modify the builder prior to starting it. CurrentThread(Box<dyn FnMut(&mut runtime::current_thread::Builder) + Send>), /// Use completely custom runtime. /// /// The provided closure should start the runtime and execute the provided future on it, /// blocking until the runtime becomes empty. /// /// This allows combining arbitrary runtimes that are not directly supported by either tokio or /// spirit. Custom(Box<dyn FnMut(TokioBody) -> Result<(), Error> + Send>), #[doc(hidden)] __NonExhaustive__, // TODO: Support loading this from configuration? But it won't be possible to modify at // runtime, will it? } impl Default for Runtime { fn default() -> Self { Runtime::ThreadPool(Box::new(|_| {})) } } impl Runtime { fn execute<O, C>(self, spirit: &Arc<Spirit<O, C>>, inner: InnerBody) -> Result<(), Error> where C: DeserializeOwned + Send + Sync + 'static, O: StructOpt + Send + Sync + 'static,
} impl<E> Extension<E> for Runtime where E: Extensible<Ok = E>, E::Config: DeserializeOwned + Send + Sync + 'static, E::Opts: StructOpt + Send + Sync + 'static, { fn apply(self, ext: E) -> Result<E, Error> { trace!("Wrapping in tokio runtime"); ext.run_around(|spirit, inner| self.execute(spirit, inner)) } } /// A configuration extension for the Tokio Threadpool runtime. /// /// Using the [`extension`][ThreadPoolConfig::extension] or the /// [`postprocess_extension`][ThreadPoolConfig::postprocess_extension] provides the [`Runtime`] to /// the spirit application. However, this allows reading the parameters of the threadpool (mostly /// number of threads) from the configuration instead of hardcoding it into the application. /// /// # Panics /// /// If this is inserted after something already registered a [`Runtime`]. /// /// # Examples /// /// ```rust /// use serde::Deserialize; /// use spirit::prelude::*; /// use spirit_tokio::runtime::ThreadPoolConfig; /// /// #[derive(Debug, Default, Deserialize)] /// struct Cfg { /// #[serde(default)] // Allow empty configuration with default runtime /// threadpool: ThreadPoolConfig, /// } /// /// impl Cfg { /// fn threadpool(&self) -> ThreadPoolConfig { /// self.threadpool.clone() /// } /// } /// /// fn main() { /// Spirit::<Empty, Cfg>::new() /// .with(ThreadPoolConfig::extension(Cfg::threadpool)) /// .run(|_| { /// // This runs inside a configured runtime /// Ok(()) /// }); /// } /// ``` #[derive( Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize, StructDoc, Ord, PartialOrd, Hash, )] #[serde(rename_all = "kebab-case")] pub struct ThreadPoolConfig { /// Maximum number of asynchronous worker threads. /// /// These do most of the work. There's little reason to set it to more than number of CPUs, but /// it may make sense to set it lower. /// /// If not set, the application will start with number of CPUs available in the system. #[serde(skip_serializing_if = "Option::is_none")] pub async_threads: Option<usize>, /// Maximum number of blocking worker threads. /// /// These do tasks that take longer time. This includes file IO and CPU intensive tasks. /// /// If not set, defaults to 100. /// /// Often, the application doesn't start these threads as they might not always be needed. #[serde(skip_serializing_if = "Option::is_none")] pub blocking_threads: Option<usize>, #[serde( skip_serializing_if = "Option::is_none", serialize_with = "spirit::utils::serialize_opt_duration", deserialize_with = "spirit::utils::deserialize_opt_duration", default )] /// How long to keep an idle thread around. /// /// A thread will be shut down if it sits around idle for this long. The default (unset) is /// never to shut it down. /// /// Accepts human-parsable times, like „3days“ or „5s“. pub keep_alive: Option<Duration>, #[serde(skip)] _sentinel: (), } impl ThreadPoolConfig { /// The extension to be plugged in with [`with`]. /// /// See the [example](#examples). /// /// [`with`]: spirit::extension::Extension::with pub fn extension<O, C, F>(extract: F) -> impl Extension<Builder<O, C>> where F: Fn(&C) -> Self + Clone + Send + Sync + 'static, O: Debug + StructOpt + Send + Sync + 'static, C: DeserializeOwned + Send + Sync + 'static, { Self::postprocess_extension(extract, |_: &mut _| ()) } /// Similar to [`extension`][ThreadPoolConfig::extension], but allows further tweaking. /// /// This allows to tweak the [threadpool builder][runtime::Builder] after it was pre-configured /// by the configuration file. This might be desirable, for example, if the application also /// wants to install an [`after_start`][runtime::Builder::after_start] or set the stack size /// which either can't or don't make sense to configure by the user. pub fn postprocess_extension<O, C, F, P>(extract: F, post: P) -> impl Extension<Builder<O, C>> where F: Fn(&C) -> Self + Clone + Send + Sync + 'static, P: FnOnce(&mut runtime::Builder) + Send + 'static, O: Debug + StructOpt + Send + Sync + 'static, C: DeserializeOwned + Send + Sync + 'static, { let mut post = Some(post); |mut builder: Builder<O, C>| { assert!( builder.singleton::<Runtime>(), "Tokio Runtime already inserted" ); trace!("Inserting configurable tokio runtime"); builder .on_config({ let extract = extract.clone(); let mut first = None; move |_: &O, cfg: &Arc<C>| { let cfg = extract(cfg); if first.is_none() { first = Some(cfg); } else if first.as_ref() != Some(&cfg) { warn!("Tokio threadpool configuration can't be changed at runtime"); } } }) .run_around(|spirit, inner| { Runtime::ThreadPool({ let spirit = Arc::clone(spirit); Box::new(move |builder| { let cfg = extract(&spirit.config()); if let Some(threads) = cfg.async_threads { builder.core_threads(threads); } if let Some(threads) = cfg.blocking_threads { builder.blocking_threads(threads); } if let Some(alive) = cfg.keep_alive { builder.keep_alive(Some(alive)); } (post.take().unwrap())(builder) }) }) .execute(spirit, inner) }) } } }
{ let spirit = Arc::clone(spirit); let fut = future::lazy(move || { inner.run().map_err(move |e| { spirit.terminate(); e }) }); match self { Runtime::ThreadPool(mut mod_builder) => { let mut builder = runtime::Builder::new(); mod_builder(&mut builder); let mut runtime = builder.build()?; runtime.block_on(fut)?; runtime.block_on_all(future::lazy(|| Ok(()))) } Runtime::CurrentThread(mut mod_builder) => { let mut builder = runtime::current_thread::Builder::new(); mod_builder(&mut builder); let mut runtime = builder.build()?; runtime.block_on(fut)?; runtime.run().map_err(Error::from) } Runtime::Custom(mut callback) => callback(Box::new(fut)), Runtime::__NonExhaustive__ => unreachable!(), } }
identifier_body
runtime.rs
//! An extension to start the tokio runtime at the appropriate time. use std::fmt::Debug; use std::sync::Arc; use std::time::Duration; use failure::Error; use futures::future::{self, Future}; use log::{trace, warn}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use spirit::bodies::InnerBody; use spirit::extension::{Extensible, Extension}; use spirit::{Builder, Spirit}; use structdoc::StructDoc; use structopt::StructOpt; use tokio::runtime; /// A body run on tokio runtime. /// /// When specifying custom tokio runtime through the [`Runtime`](enum.Runtime.html) extension, this /// is the future to be run inside the runtime. pub type TokioBody = Box<dyn Future<Item = (), Error = Error> + Send>; /// An extension to initialize a tokio runtime as part of spirit. /// /// The [`FutureInstaller`] in this crate (and as a result pipelines with [`Fragment`]s like /// [`TcpListen`], [`UdpListen`]) use this to make sure they have a runtime to handle the sockets /// on. /// /// If you prefer to specify configuration of the runtime to use, instead of the default one, you /// can create an instance of this extension yourself and register it *before registering any socket /// pipelines*, which will take precedence and the sockets will use the one provided by you. You /// must register it using the [`with_singleton`] method. /// /// Similarly, if all the pipelines are registered within the [`run`] method (or generally, after /// building is done), you need to install this manually *before* doing [`run`]. /// /// Note that the provided closures are `FnMut` mostly because `Box<FnOnce>` doesn't work. They /// will be called just once, so you can use `Option<T>` inside and consume the value by /// `take.unwrap()`. /// /// # Runtime configuration /// /// You may have noticed the callbacks here don't have access to configuration. If you intend to /// configure eg. the number of threads from user configuration, use the [`ThreadPoolConfig`] /// instead. /// /// # Future compatibility /// /// More variants may be added into the enum at any time. Such change will not be considered a /// breaking change. /// /// # Examples /// /// ``` /// extern crate failure; /// extern crate serde; /// #[macro_use] /// extern crate serde_derive; /// extern crate spirit; /// extern crate spirit_tokio; /// extern crate tokio; /// /// use std::sync::Arc; /// /// use failure::Error; /// use spirit::prelude::*; /// use spirit_tokio::{HandleListener, TcpListen}; /// use spirit_tokio::runtime::Runtime; /// use tokio::prelude::*; /// /// #[derive(Default, Deserialize)] /// struct Config { /// #[serde(default)] /// listening_socket: Vec<TcpListen>, /// } /// /// impl Config { /// fn listener(&self) -> Vec<TcpListen> { /// self.listening_socket.clone() /// } /// } /// /// fn connection() -> impl Future<Item = (), Error = Error> { /// future::ok(()) // Just a dummy implementation /// } /// /// fn main() { /// Spirit::<Empty, Config>::new() /// // Uses the current thread runtime instead of the default threadpool. This'll create /// // smaller number of threads. /// .with_singleton(Runtime::CurrentThread(Box::new(|_| ()))) /// .with( /// Pipeline::new("listener") /// .extract_cfg(Config::listener) /// .transform(HandleListener(|_conn, _cfg: &_| connection())) /// ) /// .run(|spirit| { /// # let spirit = Arc::clone(spirit); /// # std::thread::spawn(move || spirit.terminate()); /// Ok(()) /// }); /// } /// ``` /// /// [`TcpListen`]: crate::TcpListen /// [`UdpListen`]: crate::UdpListen /// [`FutureInstaller`]: crate::installer::FutureInstaller /// [`Fragment`]: spirit::Fragment /// [`run`]: spirit::SpiritBuilder::run /// [`with_singleton`]: spirit::extension::Extension::with_singleton pub enum Runtime { /// Use the threadpool runtime. /// /// The threadpool runtime is the default (both in tokio and spirit). /// /// This allows you to modify the builder prior to starting it, specifying custom options like /// number of threads. ThreadPool(Box<dyn FnMut(&mut runtime::Builder) + Send>), /// Use the current thread runtime. /// /// If you prefer to run everything in a single thread, use this variant. The provided closure /// can modify the builder prior to starting it. CurrentThread(Box<dyn FnMut(&mut runtime::current_thread::Builder) + Send>), /// Use completely custom runtime. /// /// The provided closure should start the runtime and execute the provided future on it, /// blocking until the runtime becomes empty. /// /// This allows combining arbitrary runtimes that are not directly supported by either tokio or /// spirit. Custom(Box<dyn FnMut(TokioBody) -> Result<(), Error> + Send>), #[doc(hidden)] __NonExhaustive__, // TODO: Support loading this from configuration? But it won't be possible to modify at // runtime, will it? } impl Default for Runtime { fn default() -> Self { Runtime::ThreadPool(Box::new(|_| {})) } } impl Runtime { fn execute<O, C>(self, spirit: &Arc<Spirit<O, C>>, inner: InnerBody) -> Result<(), Error> where C: DeserializeOwned + Send + Sync + 'static, O: StructOpt + Send + Sync + 'static, { let spirit = Arc::clone(spirit); let fut = future::lazy(move || { inner.run().map_err(move |e| { spirit.terminate(); e }) }); match self { Runtime::ThreadPool(mut mod_builder) => { let mut builder = runtime::Builder::new(); mod_builder(&mut builder); let mut runtime = builder.build()?; runtime.block_on(fut)?; runtime.block_on_all(future::lazy(|| Ok(()))) } Runtime::CurrentThread(mut mod_builder) => { let mut builder = runtime::current_thread::Builder::new(); mod_builder(&mut builder); let mut runtime = builder.build()?; runtime.block_on(fut)?; runtime.run().map_err(Error::from) } Runtime::Custom(mut callback) => callback(Box::new(fut)), Runtime::__NonExhaustive__ => unreachable!(), } } } impl<E> Extension<E> for Runtime where E: Extensible<Ok = E>, E::Config: DeserializeOwned + Send + Sync + 'static, E::Opts: StructOpt + Send + Sync + 'static, { fn apply(self, ext: E) -> Result<E, Error> { trace!("Wrapping in tokio runtime"); ext.run_around(|spirit, inner| self.execute(spirit, inner)) } } /// A configuration extension for the Tokio Threadpool runtime. /// /// Using the [`extension`][ThreadPoolConfig::extension] or the /// [`postprocess_extension`][ThreadPoolConfig::postprocess_extension] provides the [`Runtime`] to /// the spirit application. However, this allows reading the parameters of the threadpool (mostly /// number of threads) from the configuration instead of hardcoding it into the application. /// /// # Panics /// /// If this is inserted after something already registered a [`Runtime`]. /// /// # Examples /// /// ```rust /// use serde::Deserialize; /// use spirit::prelude::*; /// use spirit_tokio::runtime::ThreadPoolConfig; /// /// #[derive(Debug, Default, Deserialize)] /// struct Cfg { /// #[serde(default)] // Allow empty configuration with default runtime /// threadpool: ThreadPoolConfig, /// } /// /// impl Cfg { /// fn threadpool(&self) -> ThreadPoolConfig { /// self.threadpool.clone() /// } /// } /// /// fn main() { /// Spirit::<Empty, Cfg>::new() /// .with(ThreadPoolConfig::extension(Cfg::threadpool)) /// .run(|_| { /// // This runs inside a configured runtime /// Ok(()) /// }); /// } /// ``` #[derive( Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize, StructDoc, Ord, PartialOrd, Hash, )] #[serde(rename_all = "kebab-case")] pub struct ThreadPoolConfig { /// Maximum number of asynchronous worker threads. /// /// These do most of the work. There's little reason to set it to more than number of CPUs, but
/// Maximum number of blocking worker threads. /// /// These do tasks that take longer time. This includes file IO and CPU intensive tasks. /// /// If not set, defaults to 100. /// /// Often, the application doesn't start these threads as they might not always be needed. #[serde(skip_serializing_if = "Option::is_none")] pub blocking_threads: Option<usize>, #[serde( skip_serializing_if = "Option::is_none", serialize_with = "spirit::utils::serialize_opt_duration", deserialize_with = "spirit::utils::deserialize_opt_duration", default )] /// How long to keep an idle thread around. /// /// A thread will be shut down if it sits around idle for this long. The default (unset) is /// never to shut it down. /// /// Accepts human-parsable times, like „3days“ or „5s“. pub keep_alive: Option<Duration>, #[serde(skip)] _sentinel: (), } impl ThreadPoolConfig { /// The extension to be plugged in with [`with`]. /// /// See the [example](#examples). /// /// [`with`]: spirit::extension::Extension::with pub fn extension<O, C, F>(extract: F) -> impl Extension<Builder<O, C>> where F: Fn(&C) -> Self + Clone + Send + Sync + 'static, O: Debug + StructOpt + Send + Sync + 'static, C: DeserializeOwned + Send + Sync + 'static, { Self::postprocess_extension(extract, |_: &mut _| ()) } /// Similar to [`extension`][ThreadPoolConfig::extension], but allows further tweaking. /// /// This allows to tweak the [threadpool builder][runtime::Builder] after it was pre-configured /// by the configuration file. This might be desirable, for example, if the application also /// wants to install an [`after_start`][runtime::Builder::after_start] or set the stack size /// which either can't or don't make sense to configure by the user. pub fn postprocess_extension<O, C, F, P>(extract: F, post: P) -> impl Extension<Builder<O, C>> where F: Fn(&C) -> Self + Clone + Send + Sync + 'static, P: FnOnce(&mut runtime::Builder) + Send + 'static, O: Debug + StructOpt + Send + Sync + 'static, C: DeserializeOwned + Send + Sync + 'static, { let mut post = Some(post); |mut builder: Builder<O, C>| { assert!( builder.singleton::<Runtime>(), "Tokio Runtime already inserted" ); trace!("Inserting configurable tokio runtime"); builder .on_config({ let extract = extract.clone(); let mut first = None; move |_: &O, cfg: &Arc<C>| { let cfg = extract(cfg); if first.is_none() { first = Some(cfg); } else if first.as_ref() != Some(&cfg) { warn!("Tokio threadpool configuration can't be changed at runtime"); } } }) .run_around(|spirit, inner| { Runtime::ThreadPool({ let spirit = Arc::clone(spirit); Box::new(move |builder| { let cfg = extract(&spirit.config()); if let Some(threads) = cfg.async_threads { builder.core_threads(threads); } if let Some(threads) = cfg.blocking_threads { builder.blocking_threads(threads); } if let Some(alive) = cfg.keep_alive { builder.keep_alive(Some(alive)); } (post.take().unwrap())(builder) }) }) .execute(spirit, inner) }) } } }
/// it may make sense to set it lower. /// /// If not set, the application will start with number of CPUs available in the system. #[serde(skip_serializing_if = "Option::is_none")] pub async_threads: Option<usize>,
random_line_split
init.rs
use crate::{Error, Result, Logger, LogLevel, netlink, sys}; use crate::cmdline::CmdLine; use crate::sys::{sethostname, setsid, set_controlling_tty, mount_devtmpfs, mount_tmpfs, mkdir, umount, mount_sysfs, mount_procfs, mount_devpts, chown, chmod, create_directories, mount_overlay, move_mount, pivot_root, mount_9p, mount, waitpid, reboot, getpid, mount_tmpdir, mount_cgroup, umask, _chown}; use std::path::Path; use std::{fs, process, io, env}; use crate::service::{Service, ServiceLaunch}; use std::collections::BTreeMap; use std::io::Read; use std::net::Ipv4Addr; use std::str::FromStr; use crate::audio::AudioSupport; use crate::netlink::NetlinkSocket; const BASHRC: &str = r#" export PS1="airwolf > " umask 022 shopt -s checkwinsize alias ls='ls --color=auto' if [ -f /usr/share/bash-completion/bash_completion ]; then . /usr/share/bash-completion/bash_completion elif [ -f /etc/bash_completion ]; then . /etc/bash_completion fi "#; pub struct InitServer { hostname: String, homedir: String, cmdline: CmdLine, rootfs: RootFS, services: BTreeMap<u32, Service>, } impl InitServer { fn new(hostname: &str) -> Result<InitServer> { Self::check_pid1()?; let hostname = hostname.to_string(); let cmdline = CmdLine::load()?; let homedir = cmdline.lookup("phinit.home") .unwrap_or("/home/user".to_string()); let rootfs = RootFS::load(&cmdline)?; let services = BTreeMap::new(); Ok(InitServer { hostname, homedir, cmdline, rootfs, services, }) } pub fn create(hostname: &str) -> Result<InitServer> { let init = Self::new(hostname)?; init.initialize()?; Ok(init) } fn initialize(&self) -> Result<()> { self.set_loglevel(); umask(0); sethostname(&self.hostname)?; setsid()?; set_controlling_tty(0, true)?; Ok(()) } fn check_pid1() -> Result<()> { if getpid() == 1 { Ok(()) } else { Err(Error::Pid1) } } fn homedir(&self) -> &str { &self.homedir } pub fn set_loglevel(&self) { if self.cmdline.has_var("phinit.verbose") { Logger::set_log_level(LogLevel::Verbose); } else if self.cmdline.has_var("phinit.debug") { Logger::set_log_level(LogLevel::Debug); } else { Logger::set_log_level(LogLevel::Info); } } pub fn setup_filesystem(&self) -> Result<()> { sys::set_umask(0o022); //mount_devtmpfs()?; mount_tmpfs("/tmp")?; mkdir("/tmp/sysroot")?; if self.rootfs.read_only() { self.setup_readonly_root()?; } else { self.setup_writeable_root()?; } fs::write("/etc/hosts", format!("127.0.0.1 {} localhost\n", self.hostname)) .map_err(Error::WriteEtcHosts)?; umount("/opt/ph/tmp")?; umount("/opt/ph/proc")?; umount("/opt/ph/dev")?; mount_sysfs()?; mount_cgroup()?; mount_procfs()?; mount_devtmpfs()?; mount_devpts()?; mount_tmpfs("/run")?; mount_tmpdir("/tmp")?; mkdir("/dev/shm")?; mount_tmpdir("/dev/shm")?; mkdir("/run/user")?; mkdir("/run/user/1000")?; chown("/run/user/1000", 1000,1000)?; AudioSupport::setup()?; self.mount_home_if_exists()?; Logger::set_file_output("/run/phinit.log") .map_err(Error::OpenLogFailed)?; Ok(()) } fn setup_readonly_root(&self) -> Result<()> { create_directories(&[ "/tmp/ro", "/tmp/rw", "/tmp/rw/upper", "/tmp/rw/work", ])?; mount_tmpfs("/tmp/rw")?; create_directories(&["/tmp/rw/upper", "/tmp/rw/work"])?; self.rootfs.mount("/tmp/ro")?; mount_overlay("/tmp/sysroot", "lowerdir=/tmp/ro,upperdir=/tmp/rw/upper,workdir=/tmp/rw/work")?; create_directories(&[ "/tmp/sysroot/ro", "/tmp/sysroot/rw" ])?; move_mount("/tmp/ro", "/tmp/sysroot/ro")?; move_mount("/tmp/rw", "/tmp/sysroot/rw")?; let toolsdir = Path::new("/tmp/sysroot/opt/ph"); if !toolsdir.exists() { fs::create_dir_all(toolsdir) .map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?; } pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?; Ok(()) } fn setup_writeable_root(&self) -> Result<()> { self.rootfs.mount("/tmp/sysroot")?; let toolsdir = Path::new("/tmp/sysroot/opt/ph"); if !toolsdir.exists() { fs::create_dir_all(toolsdir) .map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?; } pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?; Ok(()) } fn has_9p_home(&self) -> bool { // XXX // /sys/bus/virtio/drivers/9pnet_virtio/virtio*/mount_tag true } pub fn mount_home_if_exists(&self) -> Result<()> { if self.has_9p_home() { let homedir = Path::new(self.homedir()); if !homedir.exists() { mkdir(homedir)?; } mount_9p("home", self.homedir())?; } Ok(()) } pub fn run_daemons(&mut self) -> Result<()> { if !Path::new("/dev/wl0").exists() { return Ok(()); } chmod("/dev/wl0", 0o666)?; let dbus = ServiceLaunch::new("dbus-daemon", "/usr/bin/dbus-daemon") .base_environment() .uidgid(1000,1000) .env("HOME", self.homedir()) .env("NO_AT_BRIDGE", "1") .env("QT_ACCESSIBILITY", "1") .env("SHELL", "/bin/bash") .env("USER", "user") .env("WAYLAND_DISPLAY", "wayland-0") .arg("--session") .arg("--nosyslog") .arg("--address=unix:path=/run/user/1000/bus") .arg("--print-address") .pipe_output() .launch()?; self.services.insert(dbus.pid(), dbus); let sommelier = ServiceLaunch::new("sommelier", "/opt/ph/usr/bin/sommelier") .base_environment() .uidgid(1000,1000) .arg("--parent") .pipe_output() .launch()?; self.services.insert(sommelier.pid(), sommelier); if self.cmdline.has_var("phinit.no_x11") { return Ok(()); } mkdir("/tmp/.X11-unix")?; chmod("/tmp/.X11-unix", 0o1777)?; self.write_xauth().map_err(Error::XAuthFail)?; let sommelierx = ServiceLaunch::new("sommelier-x", "/opt/ph/usr/bin/sommelier") .base_environment() .uidgid(1000,1000) .arg("-X") .arg("--x-display=0") .arg("--no-exit-with-child") .arg(format!("--x-auth={}/.Xauthority", self.homedir())) .arg("/bin/true") .pipe_output() .launch()?; self.services.insert(sommelierx.pid(), sommelierx); Ok(()) } pub fn setup_network(&self) -> Result<()> { if let Some(val) = self.cmdline.lookup("phinit.ip") { if let Ok(ip) = Ipv4Addr::from_str(&val) { self.configure_network(ip) .map_err(Error::NetworkConfigure)?; } sys::bind_mount("/opt/ph/etc/resolv.conf", "/etc/resolv.conf")?; } Ok(()) } fn configure_network(&self, ip: Ipv4Addr) -> netlink::Result<()>
fn write_xauth(&self) -> io::Result<()> { let xauth_path = format!("{}/.Xauthority", self.homedir()); let mut randbuf = [0; 16]; let mut file = fs::File::open("/dev/urandom")?; file.read_exact(&mut randbuf)?; let mut v: Vec<u8> = Vec::new(); // ??? v.extend_from_slice(&[0x01, 0x00]); // "airwolf".len() v.extend_from_slice(&[0x00, 0x07]); v.extend_from_slice(b"airwolf"); // "0".len() (DISPLAY=:0) v.extend_from_slice(&[0x00, 0x01]); v.extend_from_slice(b"0"); // "MIT-MAGIC-COOKIE-a".len() v.extend_from_slice(&[0x00, 0x12]); v.extend_from_slice(b"MIT-MAGIC-COOKIE-1"); // randbuf.len() v.extend_from_slice(&[0x00, 0x10]); v.extend_from_slice(&randbuf); fs::write(&xauth_path, v)?; _chown(&xauth_path, 1000, 1000)?; Ok(()) } pub fn launch_console_shell(&mut self, splash: &'static str) -> Result<()> { fs::write("/run/bashrc", BASHRC).map_err(Error::WriteBashrc)?; let root = self.cmdline.has_var("phinit.rootshell"); let realm = self.cmdline.lookup("phinit.realm"); let home = if root { "/".to_string() } else { self.homedir().to_string() }; let shell = ServiceLaunch::new_shell(root, &home, realm) .arg("--rcfile").arg("/run/bashrc") .launch_with_preexec(move || { // set_controlling_tty(0, true)?; env::set_current_dir(&home)?; println!("{}", splash); Ok(()) })?; self.services.insert(shell.pid(), shell); Ok(()) } fn wait_for_next_child(&mut self) -> Result<()> { if let Some(child) = self.wait_for_child() { info!("Service exited: {}", child.name()); if child.name() == "shell" { reboot(libc::RB_AUTOBOOT) .map_err(Error::RebootFailed)?; } } Ok(()) } pub fn run(&mut self) -> Result<()> { loop { self.wait_for_next_child()?; } } fn handle_waitpid_err(err: io::Error) -> ! { if let Some(errno) = err.raw_os_error() { if errno == libc::ECHILD { if let Err(err) = reboot(libc::RB_AUTOBOOT) { warn!("reboot() failed: {:?}", err); process::exit(-1); } } } warn!("error on waitpid: {:?}", err); process::exit(-1); } fn wait_for_child(&mut self) -> Option<Service> { match waitpid(-1, 0) { Ok((pid,_status)) => self.services.remove(&(pid as u32)), Err(err) => Self::handle_waitpid_err(err) } } } struct RootFS { root: String, fstype: String, rootflags: Option<String>, readonly: bool, } impl RootFS { fn load(cmdline: &CmdLine) -> Result<Self> { let root = cmdline.lookup("phinit.root") .ok_or(Error::NoRootVar)?; let fstype = cmdline.lookup("phinit.rootfstype") .ok_or(Error::NoRootFsVar)?; let rootflags = cmdline.lookup("phinit.rootflags"); let readonly = !cmdline.has_var("phinit.root_rw"); Ok(RootFS { root, fstype, rootflags, readonly }) } fn read_only(&self) -> bool { self.readonly } fn mount(&self, target: &str) -> Result<()> { let options = self.rootflags.as_ref().map(|s| s.as_str()); let mut flags = libc::MS_NOATIME; if self.readonly { flags |= libc::MS_RDONLY; } mount(&self.root, target, &self.fstype, flags, options) .map_err(|e| Error::RootFsMount(self.root.clone(), e)) } }
{ let mut octets = ip.octets(); octets[3] = 1; let gw = Ipv4Addr::from(octets); let nl = NetlinkSocket::open()?; if !nl.interface_exists("eth0") { } nl.add_ip_address("eth0", ip, 24)?; nl.set_interface_up("eth0")?; nl.add_default_route(gw)?; Ok(()) }
identifier_body
init.rs
use crate::{Error, Result, Logger, LogLevel, netlink, sys}; use crate::cmdline::CmdLine; use crate::sys::{sethostname, setsid, set_controlling_tty, mount_devtmpfs, mount_tmpfs, mkdir, umount, mount_sysfs, mount_procfs, mount_devpts, chown, chmod, create_directories, mount_overlay, move_mount, pivot_root, mount_9p, mount, waitpid, reboot, getpid, mount_tmpdir, mount_cgroup, umask, _chown}; use std::path::Path; use std::{fs, process, io, env}; use crate::service::{Service, ServiceLaunch}; use std::collections::BTreeMap; use std::io::Read; use std::net::Ipv4Addr; use std::str::FromStr; use crate::audio::AudioSupport; use crate::netlink::NetlinkSocket; const BASHRC: &str = r#" export PS1="airwolf > " umask 022 shopt -s checkwinsize alias ls='ls --color=auto' if [ -f /usr/share/bash-completion/bash_completion ]; then . /usr/share/bash-completion/bash_completion elif [ -f /etc/bash_completion ]; then . /etc/bash_completion fi "#; pub struct InitServer { hostname: String, homedir: String, cmdline: CmdLine, rootfs: RootFS, services: BTreeMap<u32, Service>, } impl InitServer { fn new(hostname: &str) -> Result<InitServer> { Self::check_pid1()?; let hostname = hostname.to_string(); let cmdline = CmdLine::load()?; let homedir = cmdline.lookup("phinit.home") .unwrap_or("/home/user".to_string()); let rootfs = RootFS::load(&cmdline)?; let services = BTreeMap::new(); Ok(InitServer { hostname, homedir, cmdline, rootfs, services, }) } pub fn create(hostname: &str) -> Result<InitServer> { let init = Self::new(hostname)?; init.initialize()?; Ok(init) } fn initialize(&self) -> Result<()> { self.set_loglevel(); umask(0); sethostname(&self.hostname)?; setsid()?; set_controlling_tty(0, true)?; Ok(()) } fn check_pid1() -> Result<()> { if getpid() == 1 { Ok(()) } else { Err(Error::Pid1) } } fn homedir(&self) -> &str { &self.homedir } pub fn set_loglevel(&self) { if self.cmdline.has_var("phinit.verbose") { Logger::set_log_level(LogLevel::Verbose); } else if self.cmdline.has_var("phinit.debug") { Logger::set_log_level(LogLevel::Debug); } else
} pub fn setup_filesystem(&self) -> Result<()> { sys::set_umask(0o022); //mount_devtmpfs()?; mount_tmpfs("/tmp")?; mkdir("/tmp/sysroot")?; if self.rootfs.read_only() { self.setup_readonly_root()?; } else { self.setup_writeable_root()?; } fs::write("/etc/hosts", format!("127.0.0.1 {} localhost\n", self.hostname)) .map_err(Error::WriteEtcHosts)?; umount("/opt/ph/tmp")?; umount("/opt/ph/proc")?; umount("/opt/ph/dev")?; mount_sysfs()?; mount_cgroup()?; mount_procfs()?; mount_devtmpfs()?; mount_devpts()?; mount_tmpfs("/run")?; mount_tmpdir("/tmp")?; mkdir("/dev/shm")?; mount_tmpdir("/dev/shm")?; mkdir("/run/user")?; mkdir("/run/user/1000")?; chown("/run/user/1000", 1000,1000)?; AudioSupport::setup()?; self.mount_home_if_exists()?; Logger::set_file_output("/run/phinit.log") .map_err(Error::OpenLogFailed)?; Ok(()) } fn setup_readonly_root(&self) -> Result<()> { create_directories(&[ "/tmp/ro", "/tmp/rw", "/tmp/rw/upper", "/tmp/rw/work", ])?; mount_tmpfs("/tmp/rw")?; create_directories(&["/tmp/rw/upper", "/tmp/rw/work"])?; self.rootfs.mount("/tmp/ro")?; mount_overlay("/tmp/sysroot", "lowerdir=/tmp/ro,upperdir=/tmp/rw/upper,workdir=/tmp/rw/work")?; create_directories(&[ "/tmp/sysroot/ro", "/tmp/sysroot/rw" ])?; move_mount("/tmp/ro", "/tmp/sysroot/ro")?; move_mount("/tmp/rw", "/tmp/sysroot/rw")?; let toolsdir = Path::new("/tmp/sysroot/opt/ph"); if !toolsdir.exists() { fs::create_dir_all(toolsdir) .map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?; } pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?; Ok(()) } fn setup_writeable_root(&self) -> Result<()> { self.rootfs.mount("/tmp/sysroot")?; let toolsdir = Path::new("/tmp/sysroot/opt/ph"); if !toolsdir.exists() { fs::create_dir_all(toolsdir) .map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?; } pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?; Ok(()) } fn has_9p_home(&self) -> bool { // XXX // /sys/bus/virtio/drivers/9pnet_virtio/virtio*/mount_tag true } pub fn mount_home_if_exists(&self) -> Result<()> { if self.has_9p_home() { let homedir = Path::new(self.homedir()); if !homedir.exists() { mkdir(homedir)?; } mount_9p("home", self.homedir())?; } Ok(()) } pub fn run_daemons(&mut self) -> Result<()> { if !Path::new("/dev/wl0").exists() { return Ok(()); } chmod("/dev/wl0", 0o666)?; let dbus = ServiceLaunch::new("dbus-daemon", "/usr/bin/dbus-daemon") .base_environment() .uidgid(1000,1000) .env("HOME", self.homedir()) .env("NO_AT_BRIDGE", "1") .env("QT_ACCESSIBILITY", "1") .env("SHELL", "/bin/bash") .env("USER", "user") .env("WAYLAND_DISPLAY", "wayland-0") .arg("--session") .arg("--nosyslog") .arg("--address=unix:path=/run/user/1000/bus") .arg("--print-address") .pipe_output() .launch()?; self.services.insert(dbus.pid(), dbus); let sommelier = ServiceLaunch::new("sommelier", "/opt/ph/usr/bin/sommelier") .base_environment() .uidgid(1000,1000) .arg("--parent") .pipe_output() .launch()?; self.services.insert(sommelier.pid(), sommelier); if self.cmdline.has_var("phinit.no_x11") { return Ok(()); } mkdir("/tmp/.X11-unix")?; chmod("/tmp/.X11-unix", 0o1777)?; self.write_xauth().map_err(Error::XAuthFail)?; let sommelierx = ServiceLaunch::new("sommelier-x", "/opt/ph/usr/bin/sommelier") .base_environment() .uidgid(1000,1000) .arg("-X") .arg("--x-display=0") .arg("--no-exit-with-child") .arg(format!("--x-auth={}/.Xauthority", self.homedir())) .arg("/bin/true") .pipe_output() .launch()?; self.services.insert(sommelierx.pid(), sommelierx); Ok(()) } pub fn setup_network(&self) -> Result<()> { if let Some(val) = self.cmdline.lookup("phinit.ip") { if let Ok(ip) = Ipv4Addr::from_str(&val) { self.configure_network(ip) .map_err(Error::NetworkConfigure)?; } sys::bind_mount("/opt/ph/etc/resolv.conf", "/etc/resolv.conf")?; } Ok(()) } fn configure_network(&self, ip: Ipv4Addr) -> netlink::Result<()> { let mut octets = ip.octets(); octets[3] = 1; let gw = Ipv4Addr::from(octets); let nl = NetlinkSocket::open()?; if !nl.interface_exists("eth0") { } nl.add_ip_address("eth0", ip, 24)?; nl.set_interface_up("eth0")?; nl.add_default_route(gw)?; Ok(()) } fn write_xauth(&self) -> io::Result<()> { let xauth_path = format!("{}/.Xauthority", self.homedir()); let mut randbuf = [0; 16]; let mut file = fs::File::open("/dev/urandom")?; file.read_exact(&mut randbuf)?; let mut v: Vec<u8> = Vec::new(); // ??? v.extend_from_slice(&[0x01, 0x00]); // "airwolf".len() v.extend_from_slice(&[0x00, 0x07]); v.extend_from_slice(b"airwolf"); // "0".len() (DISPLAY=:0) v.extend_from_slice(&[0x00, 0x01]); v.extend_from_slice(b"0"); // "MIT-MAGIC-COOKIE-a".len() v.extend_from_slice(&[0x00, 0x12]); v.extend_from_slice(b"MIT-MAGIC-COOKIE-1"); // randbuf.len() v.extend_from_slice(&[0x00, 0x10]); v.extend_from_slice(&randbuf); fs::write(&xauth_path, v)?; _chown(&xauth_path, 1000, 1000)?; Ok(()) } pub fn launch_console_shell(&mut self, splash: &'static str) -> Result<()> { fs::write("/run/bashrc", BASHRC).map_err(Error::WriteBashrc)?; let root = self.cmdline.has_var("phinit.rootshell"); let realm = self.cmdline.lookup("phinit.realm"); let home = if root { "/".to_string() } else { self.homedir().to_string() }; let shell = ServiceLaunch::new_shell(root, &home, realm) .arg("--rcfile").arg("/run/bashrc") .launch_with_preexec(move || { // set_controlling_tty(0, true)?; env::set_current_dir(&home)?; println!("{}", splash); Ok(()) })?; self.services.insert(shell.pid(), shell); Ok(()) } fn wait_for_next_child(&mut self) -> Result<()> { if let Some(child) = self.wait_for_child() { info!("Service exited: {}", child.name()); if child.name() == "shell" { reboot(libc::RB_AUTOBOOT) .map_err(Error::RebootFailed)?; } } Ok(()) } pub fn run(&mut self) -> Result<()> { loop { self.wait_for_next_child()?; } } fn handle_waitpid_err(err: io::Error) -> ! { if let Some(errno) = err.raw_os_error() { if errno == libc::ECHILD { if let Err(err) = reboot(libc::RB_AUTOBOOT) { warn!("reboot() failed: {:?}", err); process::exit(-1); } } } warn!("error on waitpid: {:?}", err); process::exit(-1); } fn wait_for_child(&mut self) -> Option<Service> { match waitpid(-1, 0) { Ok((pid,_status)) => self.services.remove(&(pid as u32)), Err(err) => Self::handle_waitpid_err(err) } } } struct RootFS { root: String, fstype: String, rootflags: Option<String>, readonly: bool, } impl RootFS { fn load(cmdline: &CmdLine) -> Result<Self> { let root = cmdline.lookup("phinit.root") .ok_or(Error::NoRootVar)?; let fstype = cmdline.lookup("phinit.rootfstype") .ok_or(Error::NoRootFsVar)?; let rootflags = cmdline.lookup("phinit.rootflags"); let readonly = !cmdline.has_var("phinit.root_rw"); Ok(RootFS { root, fstype, rootflags, readonly }) } fn read_only(&self) -> bool { self.readonly } fn mount(&self, target: &str) -> Result<()> { let options = self.rootflags.as_ref().map(|s| s.as_str()); let mut flags = libc::MS_NOATIME; if self.readonly { flags |= libc::MS_RDONLY; } mount(&self.root, target, &self.fstype, flags, options) .map_err(|e| Error::RootFsMount(self.root.clone(), e)) } }
{ Logger::set_log_level(LogLevel::Info); }
conditional_block
init.rs
use crate::{Error, Result, Logger, LogLevel, netlink, sys}; use crate::cmdline::CmdLine; use crate::sys::{sethostname, setsid, set_controlling_tty, mount_devtmpfs, mount_tmpfs, mkdir, umount, mount_sysfs, mount_procfs, mount_devpts, chown, chmod, create_directories, mount_overlay, move_mount, pivot_root, mount_9p, mount, waitpid, reboot, getpid, mount_tmpdir, mount_cgroup, umask, _chown}; use std::path::Path; use std::{fs, process, io, env}; use crate::service::{Service, ServiceLaunch}; use std::collections::BTreeMap; use std::io::Read; use std::net::Ipv4Addr; use std::str::FromStr; use crate::audio::AudioSupport; use crate::netlink::NetlinkSocket; const BASHRC: &str = r#" export PS1="airwolf > " umask 022 shopt -s checkwinsize alias ls='ls --color=auto' if [ -f /usr/share/bash-completion/bash_completion ]; then . /usr/share/bash-completion/bash_completion elif [ -f /etc/bash_completion ]; then . /etc/bash_completion fi "#; pub struct InitServer { hostname: String, homedir: String, cmdline: CmdLine, rootfs: RootFS, services: BTreeMap<u32, Service>, } impl InitServer { fn new(hostname: &str) -> Result<InitServer> { Self::check_pid1()?; let hostname = hostname.to_string(); let cmdline = CmdLine::load()?; let homedir = cmdline.lookup("phinit.home") .unwrap_or("/home/user".to_string()); let rootfs = RootFS::load(&cmdline)?; let services = BTreeMap::new(); Ok(InitServer { hostname, homedir, cmdline, rootfs, services, }) } pub fn create(hostname: &str) -> Result<InitServer> { let init = Self::new(hostname)?; init.initialize()?; Ok(init) } fn initialize(&self) -> Result<()> { self.set_loglevel(); umask(0); sethostname(&self.hostname)?; setsid()?; set_controlling_tty(0, true)?; Ok(()) } fn check_pid1() -> Result<()> { if getpid() == 1 { Ok(()) } else { Err(Error::Pid1) } } fn homedir(&self) -> &str { &self.homedir } pub fn set_loglevel(&self) { if self.cmdline.has_var("phinit.verbose") { Logger::set_log_level(LogLevel::Verbose); } else if self.cmdline.has_var("phinit.debug") { Logger::set_log_level(LogLevel::Debug); } else { Logger::set_log_level(LogLevel::Info); } } pub fn setup_filesystem(&self) -> Result<()> { sys::set_umask(0o022); //mount_devtmpfs()?; mount_tmpfs("/tmp")?; mkdir("/tmp/sysroot")?; if self.rootfs.read_only() { self.setup_readonly_root()?; } else { self.setup_writeable_root()?; } fs::write("/etc/hosts", format!("127.0.0.1 {} localhost\n", self.hostname)) .map_err(Error::WriteEtcHosts)?; umount("/opt/ph/tmp")?; umount("/opt/ph/proc")?; umount("/opt/ph/dev")?; mount_sysfs()?; mount_cgroup()?; mount_procfs()?; mount_devtmpfs()?; mount_devpts()?; mount_tmpfs("/run")?; mount_tmpdir("/tmp")?; mkdir("/dev/shm")?; mount_tmpdir("/dev/shm")?; mkdir("/run/user")?; mkdir("/run/user/1000")?; chown("/run/user/1000", 1000,1000)?; AudioSupport::setup()?; self.mount_home_if_exists()?; Logger::set_file_output("/run/phinit.log") .map_err(Error::OpenLogFailed)?; Ok(()) } fn setup_readonly_root(&self) -> Result<()> { create_directories(&[ "/tmp/ro", "/tmp/rw", "/tmp/rw/upper", "/tmp/rw/work", ])?; mount_tmpfs("/tmp/rw")?; create_directories(&["/tmp/rw/upper", "/tmp/rw/work"])?; self.rootfs.mount("/tmp/ro")?; mount_overlay("/tmp/sysroot", "lowerdir=/tmp/ro,upperdir=/tmp/rw/upper,workdir=/tmp/rw/work")?; create_directories(&[ "/tmp/sysroot/ro", "/tmp/sysroot/rw" ])?; move_mount("/tmp/ro", "/tmp/sysroot/ro")?; move_mount("/tmp/rw", "/tmp/sysroot/rw")?; let toolsdir = Path::new("/tmp/sysroot/opt/ph"); if !toolsdir.exists() { fs::create_dir_all(toolsdir) .map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?; } pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?; Ok(()) } fn setup_writeable_root(&self) -> Result<()> { self.rootfs.mount("/tmp/sysroot")?; let toolsdir = Path::new("/tmp/sysroot/opt/ph"); if !toolsdir.exists() { fs::create_dir_all(toolsdir) .map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?; } pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?; Ok(()) } fn has_9p_home(&self) -> bool { // XXX // /sys/bus/virtio/drivers/9pnet_virtio/virtio*/mount_tag true } pub fn mount_home_if_exists(&self) -> Result<()> { if self.has_9p_home() { let homedir = Path::new(self.homedir()); if !homedir.exists() { mkdir(homedir)?; } mount_9p("home", self.homedir())?; } Ok(()) } pub fn run_daemons(&mut self) -> Result<()> { if !Path::new("/dev/wl0").exists() { return Ok(());
chmod("/dev/wl0", 0o666)?; let dbus = ServiceLaunch::new("dbus-daemon", "/usr/bin/dbus-daemon") .base_environment() .uidgid(1000,1000) .env("HOME", self.homedir()) .env("NO_AT_BRIDGE", "1") .env("QT_ACCESSIBILITY", "1") .env("SHELL", "/bin/bash") .env("USER", "user") .env("WAYLAND_DISPLAY", "wayland-0") .arg("--session") .arg("--nosyslog") .arg("--address=unix:path=/run/user/1000/bus") .arg("--print-address") .pipe_output() .launch()?; self.services.insert(dbus.pid(), dbus); let sommelier = ServiceLaunch::new("sommelier", "/opt/ph/usr/bin/sommelier") .base_environment() .uidgid(1000,1000) .arg("--parent") .pipe_output() .launch()?; self.services.insert(sommelier.pid(), sommelier); if self.cmdline.has_var("phinit.no_x11") { return Ok(()); } mkdir("/tmp/.X11-unix")?; chmod("/tmp/.X11-unix", 0o1777)?; self.write_xauth().map_err(Error::XAuthFail)?; let sommelierx = ServiceLaunch::new("sommelier-x", "/opt/ph/usr/bin/sommelier") .base_environment() .uidgid(1000,1000) .arg("-X") .arg("--x-display=0") .arg("--no-exit-with-child") .arg(format!("--x-auth={}/.Xauthority", self.homedir())) .arg("/bin/true") .pipe_output() .launch()?; self.services.insert(sommelierx.pid(), sommelierx); Ok(()) } pub fn setup_network(&self) -> Result<()> { if let Some(val) = self.cmdline.lookup("phinit.ip") { if let Ok(ip) = Ipv4Addr::from_str(&val) { self.configure_network(ip) .map_err(Error::NetworkConfigure)?; } sys::bind_mount("/opt/ph/etc/resolv.conf", "/etc/resolv.conf")?; } Ok(()) } fn configure_network(&self, ip: Ipv4Addr) -> netlink::Result<()> { let mut octets = ip.octets(); octets[3] = 1; let gw = Ipv4Addr::from(octets); let nl = NetlinkSocket::open()?; if !nl.interface_exists("eth0") { } nl.add_ip_address("eth0", ip, 24)?; nl.set_interface_up("eth0")?; nl.add_default_route(gw)?; Ok(()) } fn write_xauth(&self) -> io::Result<()> { let xauth_path = format!("{}/.Xauthority", self.homedir()); let mut randbuf = [0; 16]; let mut file = fs::File::open("/dev/urandom")?; file.read_exact(&mut randbuf)?; let mut v: Vec<u8> = Vec::new(); // ??? v.extend_from_slice(&[0x01, 0x00]); // "airwolf".len() v.extend_from_slice(&[0x00, 0x07]); v.extend_from_slice(b"airwolf"); // "0".len() (DISPLAY=:0) v.extend_from_slice(&[0x00, 0x01]); v.extend_from_slice(b"0"); // "MIT-MAGIC-COOKIE-a".len() v.extend_from_slice(&[0x00, 0x12]); v.extend_from_slice(b"MIT-MAGIC-COOKIE-1"); // randbuf.len() v.extend_from_slice(&[0x00, 0x10]); v.extend_from_slice(&randbuf); fs::write(&xauth_path, v)?; _chown(&xauth_path, 1000, 1000)?; Ok(()) } pub fn launch_console_shell(&mut self, splash: &'static str) -> Result<()> { fs::write("/run/bashrc", BASHRC).map_err(Error::WriteBashrc)?; let root = self.cmdline.has_var("phinit.rootshell"); let realm = self.cmdline.lookup("phinit.realm"); let home = if root { "/".to_string() } else { self.homedir().to_string() }; let shell = ServiceLaunch::new_shell(root, &home, realm) .arg("--rcfile").arg("/run/bashrc") .launch_with_preexec(move || { // set_controlling_tty(0, true)?; env::set_current_dir(&home)?; println!("{}", splash); Ok(()) })?; self.services.insert(shell.pid(), shell); Ok(()) } fn wait_for_next_child(&mut self) -> Result<()> { if let Some(child) = self.wait_for_child() { info!("Service exited: {}", child.name()); if child.name() == "shell" { reboot(libc::RB_AUTOBOOT) .map_err(Error::RebootFailed)?; } } Ok(()) } pub fn run(&mut self) -> Result<()> { loop { self.wait_for_next_child()?; } } fn handle_waitpid_err(err: io::Error) -> ! { if let Some(errno) = err.raw_os_error() { if errno == libc::ECHILD { if let Err(err) = reboot(libc::RB_AUTOBOOT) { warn!("reboot() failed: {:?}", err); process::exit(-1); } } } warn!("error on waitpid: {:?}", err); process::exit(-1); } fn wait_for_child(&mut self) -> Option<Service> { match waitpid(-1, 0) { Ok((pid,_status)) => self.services.remove(&(pid as u32)), Err(err) => Self::handle_waitpid_err(err) } } } struct RootFS { root: String, fstype: String, rootflags: Option<String>, readonly: bool, } impl RootFS { fn load(cmdline: &CmdLine) -> Result<Self> { let root = cmdline.lookup("phinit.root") .ok_or(Error::NoRootVar)?; let fstype = cmdline.lookup("phinit.rootfstype") .ok_or(Error::NoRootFsVar)?; let rootflags = cmdline.lookup("phinit.rootflags"); let readonly = !cmdline.has_var("phinit.root_rw"); Ok(RootFS { root, fstype, rootflags, readonly }) } fn read_only(&self) -> bool { self.readonly } fn mount(&self, target: &str) -> Result<()> { let options = self.rootflags.as_ref().map(|s| s.as_str()); let mut flags = libc::MS_NOATIME; if self.readonly { flags |= libc::MS_RDONLY; } mount(&self.root, target, &self.fstype, flags, options) .map_err(|e| Error::RootFsMount(self.root.clone(), e)) } }
}
random_line_split
init.rs
use crate::{Error, Result, Logger, LogLevel, netlink, sys}; use crate::cmdline::CmdLine; use crate::sys::{sethostname, setsid, set_controlling_tty, mount_devtmpfs, mount_tmpfs, mkdir, umount, mount_sysfs, mount_procfs, mount_devpts, chown, chmod, create_directories, mount_overlay, move_mount, pivot_root, mount_9p, mount, waitpid, reboot, getpid, mount_tmpdir, mount_cgroup, umask, _chown}; use std::path::Path; use std::{fs, process, io, env}; use crate::service::{Service, ServiceLaunch}; use std::collections::BTreeMap; use std::io::Read; use std::net::Ipv4Addr; use std::str::FromStr; use crate::audio::AudioSupport; use crate::netlink::NetlinkSocket; const BASHRC: &str = r#" export PS1="airwolf > " umask 022 shopt -s checkwinsize alias ls='ls --color=auto' if [ -f /usr/share/bash-completion/bash_completion ]; then . /usr/share/bash-completion/bash_completion elif [ -f /etc/bash_completion ]; then . /etc/bash_completion fi "#; pub struct InitServer { hostname: String, homedir: String, cmdline: CmdLine, rootfs: RootFS, services: BTreeMap<u32, Service>, } impl InitServer { fn new(hostname: &str) -> Result<InitServer> { Self::check_pid1()?; let hostname = hostname.to_string(); let cmdline = CmdLine::load()?; let homedir = cmdline.lookup("phinit.home") .unwrap_or("/home/user".to_string()); let rootfs = RootFS::load(&cmdline)?; let services = BTreeMap::new(); Ok(InitServer { hostname, homedir, cmdline, rootfs, services, }) } pub fn create(hostname: &str) -> Result<InitServer> { let init = Self::new(hostname)?; init.initialize()?; Ok(init) } fn initialize(&self) -> Result<()> { self.set_loglevel(); umask(0); sethostname(&self.hostname)?; setsid()?; set_controlling_tty(0, true)?; Ok(()) } fn check_pid1() -> Result<()> { if getpid() == 1 { Ok(()) } else { Err(Error::Pid1) } } fn homedir(&self) -> &str { &self.homedir } pub fn set_loglevel(&self) { if self.cmdline.has_var("phinit.verbose") { Logger::set_log_level(LogLevel::Verbose); } else if self.cmdline.has_var("phinit.debug") { Logger::set_log_level(LogLevel::Debug); } else { Logger::set_log_level(LogLevel::Info); } } pub fn
(&self) -> Result<()> { sys::set_umask(0o022); //mount_devtmpfs()?; mount_tmpfs("/tmp")?; mkdir("/tmp/sysroot")?; if self.rootfs.read_only() { self.setup_readonly_root()?; } else { self.setup_writeable_root()?; } fs::write("/etc/hosts", format!("127.0.0.1 {} localhost\n", self.hostname)) .map_err(Error::WriteEtcHosts)?; umount("/opt/ph/tmp")?; umount("/opt/ph/proc")?; umount("/opt/ph/dev")?; mount_sysfs()?; mount_cgroup()?; mount_procfs()?; mount_devtmpfs()?; mount_devpts()?; mount_tmpfs("/run")?; mount_tmpdir("/tmp")?; mkdir("/dev/shm")?; mount_tmpdir("/dev/shm")?; mkdir("/run/user")?; mkdir("/run/user/1000")?; chown("/run/user/1000", 1000,1000)?; AudioSupport::setup()?; self.mount_home_if_exists()?; Logger::set_file_output("/run/phinit.log") .map_err(Error::OpenLogFailed)?; Ok(()) } fn setup_readonly_root(&self) -> Result<()> { create_directories(&[ "/tmp/ro", "/tmp/rw", "/tmp/rw/upper", "/tmp/rw/work", ])?; mount_tmpfs("/tmp/rw")?; create_directories(&["/tmp/rw/upper", "/tmp/rw/work"])?; self.rootfs.mount("/tmp/ro")?; mount_overlay("/tmp/sysroot", "lowerdir=/tmp/ro,upperdir=/tmp/rw/upper,workdir=/tmp/rw/work")?; create_directories(&[ "/tmp/sysroot/ro", "/tmp/sysroot/rw" ])?; move_mount("/tmp/ro", "/tmp/sysroot/ro")?; move_mount("/tmp/rw", "/tmp/sysroot/rw")?; let toolsdir = Path::new("/tmp/sysroot/opt/ph"); if !toolsdir.exists() { fs::create_dir_all(toolsdir) .map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?; } pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?; Ok(()) } fn setup_writeable_root(&self) -> Result<()> { self.rootfs.mount("/tmp/sysroot")?; let toolsdir = Path::new("/tmp/sysroot/opt/ph"); if !toolsdir.exists() { fs::create_dir_all(toolsdir) .map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?; } pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?; Ok(()) } fn has_9p_home(&self) -> bool { // XXX // /sys/bus/virtio/drivers/9pnet_virtio/virtio*/mount_tag true } pub fn mount_home_if_exists(&self) -> Result<()> { if self.has_9p_home() { let homedir = Path::new(self.homedir()); if !homedir.exists() { mkdir(homedir)?; } mount_9p("home", self.homedir())?; } Ok(()) } pub fn run_daemons(&mut self) -> Result<()> { if !Path::new("/dev/wl0").exists() { return Ok(()); } chmod("/dev/wl0", 0o666)?; let dbus = ServiceLaunch::new("dbus-daemon", "/usr/bin/dbus-daemon") .base_environment() .uidgid(1000,1000) .env("HOME", self.homedir()) .env("NO_AT_BRIDGE", "1") .env("QT_ACCESSIBILITY", "1") .env("SHELL", "/bin/bash") .env("USER", "user") .env("WAYLAND_DISPLAY", "wayland-0") .arg("--session") .arg("--nosyslog") .arg("--address=unix:path=/run/user/1000/bus") .arg("--print-address") .pipe_output() .launch()?; self.services.insert(dbus.pid(), dbus); let sommelier = ServiceLaunch::new("sommelier", "/opt/ph/usr/bin/sommelier") .base_environment() .uidgid(1000,1000) .arg("--parent") .pipe_output() .launch()?; self.services.insert(sommelier.pid(), sommelier); if self.cmdline.has_var("phinit.no_x11") { return Ok(()); } mkdir("/tmp/.X11-unix")?; chmod("/tmp/.X11-unix", 0o1777)?; self.write_xauth().map_err(Error::XAuthFail)?; let sommelierx = ServiceLaunch::new("sommelier-x", "/opt/ph/usr/bin/sommelier") .base_environment() .uidgid(1000,1000) .arg("-X") .arg("--x-display=0") .arg("--no-exit-with-child") .arg(format!("--x-auth={}/.Xauthority", self.homedir())) .arg("/bin/true") .pipe_output() .launch()?; self.services.insert(sommelierx.pid(), sommelierx); Ok(()) } pub fn setup_network(&self) -> Result<()> { if let Some(val) = self.cmdline.lookup("phinit.ip") { if let Ok(ip) = Ipv4Addr::from_str(&val) { self.configure_network(ip) .map_err(Error::NetworkConfigure)?; } sys::bind_mount("/opt/ph/etc/resolv.conf", "/etc/resolv.conf")?; } Ok(()) } fn configure_network(&self, ip: Ipv4Addr) -> netlink::Result<()> { let mut octets = ip.octets(); octets[3] = 1; let gw = Ipv4Addr::from(octets); let nl = NetlinkSocket::open()?; if !nl.interface_exists("eth0") { } nl.add_ip_address("eth0", ip, 24)?; nl.set_interface_up("eth0")?; nl.add_default_route(gw)?; Ok(()) } fn write_xauth(&self) -> io::Result<()> { let xauth_path = format!("{}/.Xauthority", self.homedir()); let mut randbuf = [0; 16]; let mut file = fs::File::open("/dev/urandom")?; file.read_exact(&mut randbuf)?; let mut v: Vec<u8> = Vec::new(); // ??? v.extend_from_slice(&[0x01, 0x00]); // "airwolf".len() v.extend_from_slice(&[0x00, 0x07]); v.extend_from_slice(b"airwolf"); // "0".len() (DISPLAY=:0) v.extend_from_slice(&[0x00, 0x01]); v.extend_from_slice(b"0"); // "MIT-MAGIC-COOKIE-a".len() v.extend_from_slice(&[0x00, 0x12]); v.extend_from_slice(b"MIT-MAGIC-COOKIE-1"); // randbuf.len() v.extend_from_slice(&[0x00, 0x10]); v.extend_from_slice(&randbuf); fs::write(&xauth_path, v)?; _chown(&xauth_path, 1000, 1000)?; Ok(()) } pub fn launch_console_shell(&mut self, splash: &'static str) -> Result<()> { fs::write("/run/bashrc", BASHRC).map_err(Error::WriteBashrc)?; let root = self.cmdline.has_var("phinit.rootshell"); let realm = self.cmdline.lookup("phinit.realm"); let home = if root { "/".to_string() } else { self.homedir().to_string() }; let shell = ServiceLaunch::new_shell(root, &home, realm) .arg("--rcfile").arg("/run/bashrc") .launch_with_preexec(move || { // set_controlling_tty(0, true)?; env::set_current_dir(&home)?; println!("{}", splash); Ok(()) })?; self.services.insert(shell.pid(), shell); Ok(()) } fn wait_for_next_child(&mut self) -> Result<()> { if let Some(child) = self.wait_for_child() { info!("Service exited: {}", child.name()); if child.name() == "shell" { reboot(libc::RB_AUTOBOOT) .map_err(Error::RebootFailed)?; } } Ok(()) } pub fn run(&mut self) -> Result<()> { loop { self.wait_for_next_child()?; } } fn handle_waitpid_err(err: io::Error) -> ! { if let Some(errno) = err.raw_os_error() { if errno == libc::ECHILD { if let Err(err) = reboot(libc::RB_AUTOBOOT) { warn!("reboot() failed: {:?}", err); process::exit(-1); } } } warn!("error on waitpid: {:?}", err); process::exit(-1); } fn wait_for_child(&mut self) -> Option<Service> { match waitpid(-1, 0) { Ok((pid,_status)) => self.services.remove(&(pid as u32)), Err(err) => Self::handle_waitpid_err(err) } } } struct RootFS { root: String, fstype: String, rootflags: Option<String>, readonly: bool, } impl RootFS { fn load(cmdline: &CmdLine) -> Result<Self> { let root = cmdline.lookup("phinit.root") .ok_or(Error::NoRootVar)?; let fstype = cmdline.lookup("phinit.rootfstype") .ok_or(Error::NoRootFsVar)?; let rootflags = cmdline.lookup("phinit.rootflags"); let readonly = !cmdline.has_var("phinit.root_rw"); Ok(RootFS { root, fstype, rootflags, readonly }) } fn read_only(&self) -> bool { self.readonly } fn mount(&self, target: &str) -> Result<()> { let options = self.rootflags.as_ref().map(|s| s.as_str()); let mut flags = libc::MS_NOATIME; if self.readonly { flags |= libc::MS_RDONLY; } mount(&self.root, target, &self.fstype, flags, options) .map_err(|e| Error::RootFsMount(self.root.clone(), e)) } }
setup_filesystem
identifier_name
main.go
//go:generate go-bindata -prefix ../../migrations/ -pkg migrations -o ../../internal/migrations/migrations_gen.go ../../migrations/ package main import ( "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "net" "os" "os/signal" "strings" "syscall" "time" log "github.com/sirupsen/logrus" "github.com/codegangsta/cli" "github.com/pkg/errors" migrate "github.com/rubenv/sql-migrate" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "github.com/brocaar/loraserver/api/as" "github.com/brocaar/loraserver/api/gw" "github.com/brocaar/loraserver/api/nc" "github.com/brocaar/loraserver/api/ns" "github.com/brocaar/loraserver/internal/api" "github.com/brocaar/loraserver/internal/api/auth" "github.com/brocaar/loraserver/internal/backend/controller" gwBackend "github.com/brocaar/loraserver/internal/backend/gateway" "github.com/brocaar/loraserver/internal/common" "github.com/brocaar/loraserver/internal/migrations" // TODO: merge backend/gateway into internal/gateway? "github.com/brocaar/loraserver/internal/gateway" "github.com/brocaar/loraserver/internal/uplink" "github.com/brocaar/lorawan" "github.com/brocaar/lorawan/band" ) func init() { grpclog.SetLogger(log.StandardLogger()) } var version string // set by the compiler var bands = []string{ string(band.AS_923), string(band.AU_915_928), string(band.CN_470_510), string(band.CN_779_787), string(band.EU_433), string(band.EU_863_870), string(band.IN_865_867), string(band.KR_920_923), string(band.US_902_928), } func run(c *cli.Context) error { var server *uplink.Server var gwStats *gateway.StatsHandler tasks := []func(*cli.Context) error{ setNetID, setBandConfig, setDeduplicationDelay, setGetDownlinkDataDelay, setCreateGatewayOnStats, setNodeSessionTTL, setLogNodeFrames, setGatewayServerJWTSecret, setStatsAggregationIntervals, setTimezone, printStartMessage, enableUplinkChannels, setRedisPool, setPostgreSQLConnection, setGatewayBackend, setApplicationServer, setNetworkController, runDatabaseMigrations, startAPIServer, startGatewayAPIServer, startLoRaServer(server), startStatsServer(gwStats), } for _, t := range tasks { if err := t(c); err != nil { log.Fatal(err) } } sigChan := make(chan os.Signal) exitChan := make(chan struct{}) signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) log.WithField("signal", <-sigChan).Info("signal received") go func() { log.Warning("stopping loraserver") if err := server.Stop(); err != nil { log.Fatal(err) } if err := gwStats.Stop(); err != nil { log.Fatal(err) } exitChan <- struct{}{} }() select { case <-exitChan: case s := <-sigChan: log.WithField("signal", s).Info("signal received, stopping immediately") } return nil } func setNetID(c *cli.Context) error { var netID lorawan.NetID if err := netID.UnmarshalText([]byte(c.String("net-id"))); err != nil { return errors.Wrap(err, "NetID parse error") } common.NetID = netID return nil } func setBandConfig(c *cli.Context) error { if c.String("band") == "" { return fmt.Errorf("--band is undefined, valid options are: %s", strings.Join(bands, ", ")) } dwellTime := lorawan.DwellTimeNoLimit if c.Bool("band-dwell-time-400ms") { dwellTime = lorawan.DwellTime400ms } bandConfig, err := band.GetConfig(band.Name(c.String("band")), c.Bool("band-repeater-compatible"), dwellTime) if err != nil { return errors.Wrap(err, "get band config error") } for _, f := range c.IntSlice("extra-frequencies") { if err := bandConfig.AddChannel(f); err != nil { return errors.Wrap(err, "add channel error") } } common.Band = bandConfig common.BandName = band.Name(c.String("band")) return nil } func setDeduplicationDelay(c *cli.Context) error { common.DeduplicationDelay = c.Duration("deduplication-delay") return nil } func setGetDownlinkDataDelay(c *cli.Context) error { common.GetDownlinkDataDelay = c.Duration("get-downlink-data-delay") return nil } func setCreateGatewayOnStats(c *cli.Context) error { common.CreateGatewayOnStats = c.Bool("gw-create-on-stats") return nil } func setNodeSessionTTL(c *cli.Context) error { common.NodeSessionTTL = c.Duration("node-session-ttl") return nil } func setLogNodeFrames(c *cli.Context) error { common.LogNodeFrames = c.Bool("log-node-frames") return nil } func setGatewayServerJWTSecret(c *cli.Context) error { common.GatewayServerJWTSecret = c.String("gw-server-jwt-secret") return nil } func setStatsAggregationIntervals(c *cli.Context) error { // get the gw stats aggregation intervals gateway.MustSetStatsAggregationIntervals(strings.Split(c.String("gw-stats-aggregation-intervals"), ",")) return nil } func setTimezone(c *cli.Context) error { // get the timezone if c.String("timezone") != "" { l, err := time.LoadLocation(c.String("timezone")) if err != nil { return errors.Wrap(err, "load timezone location error") } common.TimeLocation = l } return nil } func printStartMessage(c *cli.Context) error { log.WithFields(log.Fields{ "version": version, "net_id": common.NetID.String(), "band": c.String("band"), "docs": "https://docs.loraserver.io/", }).Info("starting LoRa Server") return nil } func enableUplinkChannels(c *cli.Context) error { if c.String("enable-uplink-channels") == "" { return nil } log.Info("disabling all channels") for _, c := range common.Band.GetEnabledUplinkChannels() { if err := common.Band.DisableUplinkChannel(c); err != nil { return errors.Wrap(err, "disable uplink channel error") } } blocks := strings.Split(c.String("enable-uplink-channels"), ",") for _, block := range blocks { block = strings.Trim(block, " ") var start, end int if _, err := fmt.Sscanf(block, "%d-%d", &start, &end); err != nil { if _, err := fmt.Sscanf(block, "%d", &start); err != nil { return errors.Wrap(err, "parse channel range error") } end = start } log.WithFields(log.Fields{ "first_channel": start, "last_channel": end, }).Info("enabling channel block") for ; start <= end; start++ { if err := common.Band.EnableUplinkChannel(start); err != nil { errors.Wrap(err, "enable uplink channel error") } } } return nil } func setRedisPool(c *cli.Context) error { log.WithField("url", c.String("redis-url")).Info("setup redis connection pool") common.RedisPool = common.NewRedisPool(c.String("redis-url")) return nil } func setPostgreSQLConnection(c *cli.Context) error { log.Info("connecting to postgresql") db, err := common.OpenDatabase(c.String("postgres-dsn")) if err != nil { return errors.Wrap(err, "database connection error") } common.DB = db return nil } func setGatewayBackend(c *cli.Context) error { gw, err := gwBackend.NewBackend(c.String("gw-mqtt-server"), c.String("gw-mqtt-username"), c.String("gw-mqtt-password"), c.String("gw-mqtt-ca-cert")) if err != nil { return errors.Wrap(err, "gateway-backend setup failed") } common.Gateway = gw return nil } func setApplicationServer(c *cli.Context) error { log.WithFields(log.Fields{ "server": c.String("as-server"), "ca-cert": c.String("as-ca-cert"), "tls-cert": c.String("as-tls-cert"), "tls-key": c.String("as-tls-key"), }).Info("connecting to application-server") var asDialOptions []grpc.DialOption if c.String("as-tls-cert") != "" && c.String("as-tls-key") != "" { asDialOptions = append(asDialOptions, grpc.WithTransportCredentials( mustGetTransportCredentials(c.String("as-tls-cert"), c.String("as-tls-key"), c.String("as-ca-cert"), false), )) } else { asDialOptions = append(asDialOptions, grpc.WithInsecure()) } asConn, err := grpc.Dial(c.String("as-server"), asDialOptions...) if err != nil { return errors.Wrap(err, "application-server dial error") } common.Application = as.NewApplicationServerClient(asConn) return nil } func setNetworkController(c *cli.Context) error { var ncClient nc.NetworkControllerClient if c.String("nc-server") != "" { // setup network-controller client log.WithFields(log.Fields{ "server": c.String("nc-server"), "ca-cert": c.String("nc-ca-cert"), "tls-cert": c.String("nc-tls-cert"), "tls-key": c.String("nc-tls-key"), }).Info("connecting to network-controller") var ncDialOptions []grpc.DialOption if c.String("nc-tls-cert") != "" && c.String("nc-tls-key") != "" { ncDialOptions = append(ncDialOptions, grpc.WithTransportCredentials( mustGetTransportCredentials(c.String("nc-tls-cert"), c.String("nc-tls-key"), c.String("nc-ca-cert"), false), )) } else { ncDialOptions = append(ncDialOptions, grpc.WithInsecure()) } ncConn, err := grpc.Dial(c.String("nc-server"), ncDialOptions...) if err != nil { return errors.Wrap(err, "network-controller dial error") } ncClient = nc.NewNetworkControllerClient(ncConn) } else { log.Info("no network-controller configured") ncClient = &controller.NopNetworkControllerClient{} } common.Controller = ncClient return nil } func runDatabaseMigrations(c *cli.Context) error { if c.Bool("db-automigrate") { log.Info("applying database migrations") m := &migrate.AssetMigrationSource{ Asset: migrations.Asset, AssetDir: migrations.AssetDir, Dir: "", } n, err := migrate.Exec(common.DB.DB, "postgres", m, migrate.Up) if err != nil { return errors.Wrap(err, "applying migrations failed") } log.WithField("count", n).Info("migrations applied") } return nil } func startAPIServer(c *cli.Context) error { log.WithFields(log.Fields{ "bind": c.String("bind"), "ca-cert": c.String("ca-cert"), "tls-cert": c.String("tls-cert"), "tls-key": c.String("tls-key"), }).Info("starting api server") var opts []grpc.ServerOption if c.String("tls-cert") != "" && c.String("tls-key") != ""
gs := grpc.NewServer(opts...) nsAPI := api.NewNetworkServerAPI() ns.RegisterNetworkServerServer(gs, nsAPI) ln, err := net.Listen("tcp", c.String("bind")) if err != nil { return errors.Wrap(err, "start api listener error") } go gs.Serve(ln) return nil } func startGatewayAPIServer(c *cli.Context) error { log.WithFields(log.Fields{ "bind": c.String("gw-server-bind"), "ca-cert": c.String("gw-server-ca-cert"), "tls-cert": c.String("gw-server-tls-cert"), "tls-key": c.String("gw-server-tls-key"), }).Info("starting gateway api server") var validator auth.Validator if c.String("gw-server-jwt-secret") != "" { validator = auth.NewJWTValidator("HS256", c.String("gw-server-jwt-secret")) } else { return errors.New("--gw-server-jwt-secret must be set") } var opts []grpc.ServerOption if c.String("gw-server-tls-cert") != "" && c.String("gw-server-tls-key") != "" { creds := mustGetTransportCredentials(c.String("gw-server-tls-cert"), c.String("gw-server-tls-key"), c.String("gw-server-ca-cert"), false) opts = append(opts, grpc.Creds(creds)) } gs := grpc.NewServer(opts...) gwAPI := api.NewGatewayAPI(validator) gw.RegisterGatewayServer(gs, gwAPI) gwServerLn, err := net.Listen("tcp", c.String("gw-server-bind")) if err != nil { return errors.Wrap(err, "start gateway api server listener error") } go gs.Serve(gwServerLn) return nil } func startLoRaServer(server *uplink.Server) func(*cli.Context) error { return func(c *cli.Context) error { server = uplink.NewServer() if err := server.Start(); err != nil { return err } return nil } } func startStatsServer(gwStats *gateway.StatsHandler) func(*cli.Context) error { return func(c *cli.Context) error { gwStats = gateway.NewStatsHandler() if err := gwStats.Start(); err != nil { log.Fatal(err) } return nil } } func mustGetTransportCredentials(tlsCert, tlsKey, caCert string, verifyClientCert bool) credentials.TransportCredentials { var caCertPool *x509.CertPool cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) if err != nil { log.WithFields(log.Fields{ "cert": tlsCert, "key": tlsKey, }).Fatalf("load key-pair error: %s", err) } if caCert != "" { rawCaCert, err := ioutil.ReadFile(caCert) if err != nil { log.WithField("ca", caCert).Fatalf("load ca cert error: %s", err) } caCertPool = x509.NewCertPool() caCertPool.AppendCertsFromPEM(rawCaCert) } if verifyClientCert { return credentials.NewTLS(&tls.Config{ Certificates: []tls.Certificate{cert}, RootCAs: caCertPool, ClientAuth: tls.RequireAndVerifyClientCert, }) } return credentials.NewTLS(&tls.Config{ Certificates: []tls.Certificate{cert}, RootCAs: caCertPool, }) } func main() { app := cli.NewApp() app.Name = "loraserver" app.Usage = "network-server for LoRaWAN networks" app.Version = version app.Copyright = "See http://github.com/brocaar/loraserver for copyright information" app.Action = run app.Flags = []cli.Flag{ cli.StringFlag{ Name: "net-id", Usage: "network identifier (NetID, 3 bytes) encoded as HEX (e.g. 010203)", EnvVar: "NET_ID", }, cli.StringFlag{ Name: "band", Usage: fmt.Sprintf("ism band configuration to use (options: %s)", strings.Join(bands, ", ")), EnvVar: "BAND", }, cli.BoolFlag{ Name: "band-dwell-time-400ms", Usage: "band configuration takes 400ms dwell-time into account", EnvVar: "BAND_DWELL_TIME_400ms", }, cli.BoolFlag{ Name: "band-repeater-compatible", Usage: "band configuration takes repeater encapsulation layer into account", EnvVar: "BAND_REPEATER_COMPATIBLE", }, // TODO refactor to NS_SERVER_CA_CERT? cli.StringFlag{ Name: "ca-cert", Usage: "ca certificate used by the api server (optional)", EnvVar: "CA_CERT", }, // TODO refactor to NS_SERVER_TLS_CERT? cli.StringFlag{ Name: "tls-cert", Usage: "tls certificate used by the api server (optional)", EnvVar: "TLS_CERT", }, // TODO refactor to NS_SERVER_TLS_KEY? cli.StringFlag{ Name: "tls-key", Usage: "tls key used by the api server (optional)", EnvVar: "TLS_KEY", }, // TODO refactor to NS_SERVER_BIND? cli.StringFlag{ Name: "bind", Usage: "ip:port to bind the api server", Value: "0.0.0.0:8000", EnvVar: "BIND", }, cli.StringFlag{ Name: "gw-server-ca-cert", Usage: "ca certificate used by the gateway api server (optional)", EnvVar: "GW_SERVER_CA_CERT", }, cli.StringFlag{ Name: "gw-server-tls-cert", Usage: "tls certificate used by the gateway api server (optional)", EnvVar: "GW_SERVER_TLS_CERT", }, cli.StringFlag{ Name: "gw-server-tls-key", Usage: "tls key used by the gateway api server (optional)", EnvVar: "GW_SERVER_TLS_KEY", }, cli.StringFlag{ Name: "gw-server-jwt-secret", Usage: "JWT secret used by the gateway api server for gateway authentication / authorization", EnvVar: "GW_SERVER_JWT_SECRET", }, cli.StringFlag{ Name: "gw-server-bind", Usage: "ip:port to bind the gateway api server", Value: "0.0.0.0:8002", EnvVar: "GW_SERVER_BIND", }, cli.StringFlag{ Name: "redis-url", Usage: "redis url (e.g. redis://user:password@hostname:port/0)", Value: "redis://localhost:6379", EnvVar: "REDIS_URL", }, cli.StringFlag{ Name: "postgres-dsn", Usage: "postgresql dsn (e.g.: postgres://user:password@hostname/database?sslmode=disable)", Value: "postgres://localhost/loraserver_ns?sslmode=disable", EnvVar: "POSTGRES_DSN", }, cli.BoolFlag{ Name: "db-automigrate", Usage: "automatically apply database migrations", EnvVar: "DB_AUTOMIGRATE", }, cli.StringFlag{ Name: "gw-mqtt-server", Usage: "mqtt broker server used by the gateway backend (e.g. scheme://host:port where scheme is tcp, ssl or ws)", Value: "tcp://localhost:1883", EnvVar: "GW_MQTT_SERVER", }, cli.StringFlag{ Name: "gw-mqtt-username", Usage: "mqtt username used by the gateway backend (optional)", EnvVar: "GW_MQTT_USERNAME", }, cli.StringFlag{ Name: "gw-mqtt-password", Usage: "mqtt password used by the gateway backend (optional)", EnvVar: "GW_MQTT_PASSWORD", }, cli.StringFlag{ Name: "gw-mqtt-ca-cert", Usage: "mqtt CA certificate file used by the gateway backend (optional)", EnvVar: "GW_MQTT_CA_CERT", }, cli.StringFlag{ Name: "as-server", Usage: "hostname:port of the application-server api server (optional)", Value: "127.0.0.1:8001", EnvVar: "AS_SERVER", }, cli.StringFlag{ Name: "as-ca-cert", Usage: "ca certificate used by the application-server client (optional)", EnvVar: "AS_CA_CERT", }, cli.StringFlag{ Name: "as-tls-cert", Usage: "tls certificate used by the application-server client (optional)", EnvVar: "AS_TLS_CERT", }, cli.StringFlag{ Name: "as-tls-key", Usage: "tls key used by the application-server client (optional)", EnvVar: "AS_TLS_KEY", }, cli.StringFlag{ Name: "nc-server", Usage: "hostname:port of the network-controller api server (optional)", EnvVar: "NC_SERVER", }, cli.StringFlag{ Name: "nc-ca-cert", Usage: "ca certificate used by the network-controller client (optional)", EnvVar: "NC_CA_CERT", }, cli.StringFlag{ Name: "nc-tls-cert", Usage: "tls certificate used by the network-controller client (optional)", EnvVar: "NC_TLS_CERT", }, cli.StringFlag{ Name: "nc-tls-key", Usage: "tls key used by the network-controller client (optional)", EnvVar: "NC_TLS_KEY", }, cli.DurationFlag{ Name: "deduplication-delay", Usage: "time to wait for uplink de-duplication", EnvVar: "DEDUPLICATION_DELAY", Value: 200 * time.Millisecond, }, cli.DurationFlag{ Name: "get-downlink-data-delay", Usage: "delay between uplink delivery to the app server and getting the downlink data from the app server (if any)", EnvVar: "GET_DOWNLINK_DATA_DELAY", Value: 100 * time.Millisecond, }, cli.StringFlag{ Name: "gw-stats-aggregation-intervals", Usage: "aggregation intervals to use for aggregating the gateway stats (valid options: second, minute, hour, day, week, month, quarter, year)", EnvVar: "GW_STATS_AGGREGATION_INTERVALS", Value: "minute,hour,day", }, cli.StringFlag{ Name: "timezone", Usage: "timezone to use when aggregating data (e.g. 'Europe/Amsterdam') (optional, by default the db timezone is used)", EnvVar: "TIMEZONE", }, cli.BoolFlag{ Name: "gw-create-on-stats", Usage: "create non-existing gateways on receiving of stats", EnvVar: "GW_CREATE_ON_STATS", }, cli.IntSliceFlag{ Name: "extra-frequencies", Usage: "extra frequencies to use for ISM bands that implement the CFList", EnvVar: "EXTRA_FREQUENCIES", }, cli.StringFlag{ Name: "enable-uplink-channels", Usage: "enable only a given sub-set of channels (e.g. '0-7,8-15')", EnvVar: "ENABLE_UPLINK_CHANNELS", }, cli.DurationFlag{ Name: "node-session-ttl", Usage: "the ttl after which a node-session expires after no activity", EnvVar: "NODE_SESSION_TTL", Value: time.Hour * 24 * 31, }, cli.BoolFlag{ Name: "log-node-frames", Usage: "log uplink and downlink frames to the database", EnvVar: "LOG_NODE_FRAMES", }, } app.Run(os.Args) }
{ creds := mustGetTransportCredentials(c.String("tls-cert"), c.String("tls-key"), c.String("ca-cert"), false) opts = append(opts, grpc.Creds(creds)) }
conditional_block
main.go
//go:generate go-bindata -prefix ../../migrations/ -pkg migrations -o ../../internal/migrations/migrations_gen.go ../../migrations/ package main import ( "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "net" "os" "os/signal" "strings" "syscall" "time" log "github.com/sirupsen/logrus" "github.com/codegangsta/cli" "github.com/pkg/errors" migrate "github.com/rubenv/sql-migrate" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "github.com/brocaar/loraserver/api/as" "github.com/brocaar/loraserver/api/gw" "github.com/brocaar/loraserver/api/nc" "github.com/brocaar/loraserver/api/ns" "github.com/brocaar/loraserver/internal/api" "github.com/brocaar/loraserver/internal/api/auth" "github.com/brocaar/loraserver/internal/backend/controller" gwBackend "github.com/brocaar/loraserver/internal/backend/gateway"
"github.com/brocaar/lorawan" "github.com/brocaar/lorawan/band" ) func init() { grpclog.SetLogger(log.StandardLogger()) } var version string // set by the compiler var bands = []string{ string(band.AS_923), string(band.AU_915_928), string(band.CN_470_510), string(band.CN_779_787), string(band.EU_433), string(band.EU_863_870), string(band.IN_865_867), string(band.KR_920_923), string(band.US_902_928), } func run(c *cli.Context) error { var server *uplink.Server var gwStats *gateway.StatsHandler tasks := []func(*cli.Context) error{ setNetID, setBandConfig, setDeduplicationDelay, setGetDownlinkDataDelay, setCreateGatewayOnStats, setNodeSessionTTL, setLogNodeFrames, setGatewayServerJWTSecret, setStatsAggregationIntervals, setTimezone, printStartMessage, enableUplinkChannels, setRedisPool, setPostgreSQLConnection, setGatewayBackend, setApplicationServer, setNetworkController, runDatabaseMigrations, startAPIServer, startGatewayAPIServer, startLoRaServer(server), startStatsServer(gwStats), } for _, t := range tasks { if err := t(c); err != nil { log.Fatal(err) } } sigChan := make(chan os.Signal) exitChan := make(chan struct{}) signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) log.WithField("signal", <-sigChan).Info("signal received") go func() { log.Warning("stopping loraserver") if err := server.Stop(); err != nil { log.Fatal(err) } if err := gwStats.Stop(); err != nil { log.Fatal(err) } exitChan <- struct{}{} }() select { case <-exitChan: case s := <-sigChan: log.WithField("signal", s).Info("signal received, stopping immediately") } return nil } func setNetID(c *cli.Context) error { var netID lorawan.NetID if err := netID.UnmarshalText([]byte(c.String("net-id"))); err != nil { return errors.Wrap(err, "NetID parse error") } common.NetID = netID return nil } func setBandConfig(c *cli.Context) error { if c.String("band") == "" { return fmt.Errorf("--band is undefined, valid options are: %s", strings.Join(bands, ", ")) } dwellTime := lorawan.DwellTimeNoLimit if c.Bool("band-dwell-time-400ms") { dwellTime = lorawan.DwellTime400ms } bandConfig, err := band.GetConfig(band.Name(c.String("band")), c.Bool("band-repeater-compatible"), dwellTime) if err != nil { return errors.Wrap(err, "get band config error") } for _, f := range c.IntSlice("extra-frequencies") { if err := bandConfig.AddChannel(f); err != nil { return errors.Wrap(err, "add channel error") } } common.Band = bandConfig common.BandName = band.Name(c.String("band")) return nil } func setDeduplicationDelay(c *cli.Context) error { common.DeduplicationDelay = c.Duration("deduplication-delay") return nil } func setGetDownlinkDataDelay(c *cli.Context) error { common.GetDownlinkDataDelay = c.Duration("get-downlink-data-delay") return nil } func setCreateGatewayOnStats(c *cli.Context) error { common.CreateGatewayOnStats = c.Bool("gw-create-on-stats") return nil } func setNodeSessionTTL(c *cli.Context) error { common.NodeSessionTTL = c.Duration("node-session-ttl") return nil } func setLogNodeFrames(c *cli.Context) error { common.LogNodeFrames = c.Bool("log-node-frames") return nil } func setGatewayServerJWTSecret(c *cli.Context) error { common.GatewayServerJWTSecret = c.String("gw-server-jwt-secret") return nil } func setStatsAggregationIntervals(c *cli.Context) error { // get the gw stats aggregation intervals gateway.MustSetStatsAggregationIntervals(strings.Split(c.String("gw-stats-aggregation-intervals"), ",")) return nil } func setTimezone(c *cli.Context) error { // get the timezone if c.String("timezone") != "" { l, err := time.LoadLocation(c.String("timezone")) if err != nil { return errors.Wrap(err, "load timezone location error") } common.TimeLocation = l } return nil } func printStartMessage(c *cli.Context) error { log.WithFields(log.Fields{ "version": version, "net_id": common.NetID.String(), "band": c.String("band"), "docs": "https://docs.loraserver.io/", }).Info("starting LoRa Server") return nil } func enableUplinkChannels(c *cli.Context) error { if c.String("enable-uplink-channels") == "" { return nil } log.Info("disabling all channels") for _, c := range common.Band.GetEnabledUplinkChannels() { if err := common.Band.DisableUplinkChannel(c); err != nil { return errors.Wrap(err, "disable uplink channel error") } } blocks := strings.Split(c.String("enable-uplink-channels"), ",") for _, block := range blocks { block = strings.Trim(block, " ") var start, end int if _, err := fmt.Sscanf(block, "%d-%d", &start, &end); err != nil { if _, err := fmt.Sscanf(block, "%d", &start); err != nil { return errors.Wrap(err, "parse channel range error") } end = start } log.WithFields(log.Fields{ "first_channel": start, "last_channel": end, }).Info("enabling channel block") for ; start <= end; start++ { if err := common.Band.EnableUplinkChannel(start); err != nil { errors.Wrap(err, "enable uplink channel error") } } } return nil } func setRedisPool(c *cli.Context) error { log.WithField("url", c.String("redis-url")).Info("setup redis connection pool") common.RedisPool = common.NewRedisPool(c.String("redis-url")) return nil } func setPostgreSQLConnection(c *cli.Context) error { log.Info("connecting to postgresql") db, err := common.OpenDatabase(c.String("postgres-dsn")) if err != nil { return errors.Wrap(err, "database connection error") } common.DB = db return nil } func setGatewayBackend(c *cli.Context) error { gw, err := gwBackend.NewBackend(c.String("gw-mqtt-server"), c.String("gw-mqtt-username"), c.String("gw-mqtt-password"), c.String("gw-mqtt-ca-cert")) if err != nil { return errors.Wrap(err, "gateway-backend setup failed") } common.Gateway = gw return nil } func setApplicationServer(c *cli.Context) error { log.WithFields(log.Fields{ "server": c.String("as-server"), "ca-cert": c.String("as-ca-cert"), "tls-cert": c.String("as-tls-cert"), "tls-key": c.String("as-tls-key"), }).Info("connecting to application-server") var asDialOptions []grpc.DialOption if c.String("as-tls-cert") != "" && c.String("as-tls-key") != "" { asDialOptions = append(asDialOptions, grpc.WithTransportCredentials( mustGetTransportCredentials(c.String("as-tls-cert"), c.String("as-tls-key"), c.String("as-ca-cert"), false), )) } else { asDialOptions = append(asDialOptions, grpc.WithInsecure()) } asConn, err := grpc.Dial(c.String("as-server"), asDialOptions...) if err != nil { return errors.Wrap(err, "application-server dial error") } common.Application = as.NewApplicationServerClient(asConn) return nil } func setNetworkController(c *cli.Context) error { var ncClient nc.NetworkControllerClient if c.String("nc-server") != "" { // setup network-controller client log.WithFields(log.Fields{ "server": c.String("nc-server"), "ca-cert": c.String("nc-ca-cert"), "tls-cert": c.String("nc-tls-cert"), "tls-key": c.String("nc-tls-key"), }).Info("connecting to network-controller") var ncDialOptions []grpc.DialOption if c.String("nc-tls-cert") != "" && c.String("nc-tls-key") != "" { ncDialOptions = append(ncDialOptions, grpc.WithTransportCredentials( mustGetTransportCredentials(c.String("nc-tls-cert"), c.String("nc-tls-key"), c.String("nc-ca-cert"), false), )) } else { ncDialOptions = append(ncDialOptions, grpc.WithInsecure()) } ncConn, err := grpc.Dial(c.String("nc-server"), ncDialOptions...) if err != nil { return errors.Wrap(err, "network-controller dial error") } ncClient = nc.NewNetworkControllerClient(ncConn) } else { log.Info("no network-controller configured") ncClient = &controller.NopNetworkControllerClient{} } common.Controller = ncClient return nil } func runDatabaseMigrations(c *cli.Context) error { if c.Bool("db-automigrate") { log.Info("applying database migrations") m := &migrate.AssetMigrationSource{ Asset: migrations.Asset, AssetDir: migrations.AssetDir, Dir: "", } n, err := migrate.Exec(common.DB.DB, "postgres", m, migrate.Up) if err != nil { return errors.Wrap(err, "applying migrations failed") } log.WithField("count", n).Info("migrations applied") } return nil } func startAPIServer(c *cli.Context) error { log.WithFields(log.Fields{ "bind": c.String("bind"), "ca-cert": c.String("ca-cert"), "tls-cert": c.String("tls-cert"), "tls-key": c.String("tls-key"), }).Info("starting api server") var opts []grpc.ServerOption if c.String("tls-cert") != "" && c.String("tls-key") != "" { creds := mustGetTransportCredentials(c.String("tls-cert"), c.String("tls-key"), c.String("ca-cert"), false) opts = append(opts, grpc.Creds(creds)) } gs := grpc.NewServer(opts...) nsAPI := api.NewNetworkServerAPI() ns.RegisterNetworkServerServer(gs, nsAPI) ln, err := net.Listen("tcp", c.String("bind")) if err != nil { return errors.Wrap(err, "start api listener error") } go gs.Serve(ln) return nil } func startGatewayAPIServer(c *cli.Context) error { log.WithFields(log.Fields{ "bind": c.String("gw-server-bind"), "ca-cert": c.String("gw-server-ca-cert"), "tls-cert": c.String("gw-server-tls-cert"), "tls-key": c.String("gw-server-tls-key"), }).Info("starting gateway api server") var validator auth.Validator if c.String("gw-server-jwt-secret") != "" { validator = auth.NewJWTValidator("HS256", c.String("gw-server-jwt-secret")) } else { return errors.New("--gw-server-jwt-secret must be set") } var opts []grpc.ServerOption if c.String("gw-server-tls-cert") != "" && c.String("gw-server-tls-key") != "" { creds := mustGetTransportCredentials(c.String("gw-server-tls-cert"), c.String("gw-server-tls-key"), c.String("gw-server-ca-cert"), false) opts = append(opts, grpc.Creds(creds)) } gs := grpc.NewServer(opts...) gwAPI := api.NewGatewayAPI(validator) gw.RegisterGatewayServer(gs, gwAPI) gwServerLn, err := net.Listen("tcp", c.String("gw-server-bind")) if err != nil { return errors.Wrap(err, "start gateway api server listener error") } go gs.Serve(gwServerLn) return nil } func startLoRaServer(server *uplink.Server) func(*cli.Context) error { return func(c *cli.Context) error { server = uplink.NewServer() if err := server.Start(); err != nil { return err } return nil } } func startStatsServer(gwStats *gateway.StatsHandler) func(*cli.Context) error { return func(c *cli.Context) error { gwStats = gateway.NewStatsHandler() if err := gwStats.Start(); err != nil { log.Fatal(err) } return nil } } func mustGetTransportCredentials(tlsCert, tlsKey, caCert string, verifyClientCert bool) credentials.TransportCredentials { var caCertPool *x509.CertPool cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) if err != nil { log.WithFields(log.Fields{ "cert": tlsCert, "key": tlsKey, }).Fatalf("load key-pair error: %s", err) } if caCert != "" { rawCaCert, err := ioutil.ReadFile(caCert) if err != nil { log.WithField("ca", caCert).Fatalf("load ca cert error: %s", err) } caCertPool = x509.NewCertPool() caCertPool.AppendCertsFromPEM(rawCaCert) } if verifyClientCert { return credentials.NewTLS(&tls.Config{ Certificates: []tls.Certificate{cert}, RootCAs: caCertPool, ClientAuth: tls.RequireAndVerifyClientCert, }) } return credentials.NewTLS(&tls.Config{ Certificates: []tls.Certificate{cert}, RootCAs: caCertPool, }) } func main() { app := cli.NewApp() app.Name = "loraserver" app.Usage = "network-server for LoRaWAN networks" app.Version = version app.Copyright = "See http://github.com/brocaar/loraserver for copyright information" app.Action = run app.Flags = []cli.Flag{ cli.StringFlag{ Name: "net-id", Usage: "network identifier (NetID, 3 bytes) encoded as HEX (e.g. 010203)", EnvVar: "NET_ID", }, cli.StringFlag{ Name: "band", Usage: fmt.Sprintf("ism band configuration to use (options: %s)", strings.Join(bands, ", ")), EnvVar: "BAND", }, cli.BoolFlag{ Name: "band-dwell-time-400ms", Usage: "band configuration takes 400ms dwell-time into account", EnvVar: "BAND_DWELL_TIME_400ms", }, cli.BoolFlag{ Name: "band-repeater-compatible", Usage: "band configuration takes repeater encapsulation layer into account", EnvVar: "BAND_REPEATER_COMPATIBLE", }, // TODO refactor to NS_SERVER_CA_CERT? cli.StringFlag{ Name: "ca-cert", Usage: "ca certificate used by the api server (optional)", EnvVar: "CA_CERT", }, // TODO refactor to NS_SERVER_TLS_CERT? cli.StringFlag{ Name: "tls-cert", Usage: "tls certificate used by the api server (optional)", EnvVar: "TLS_CERT", }, // TODO refactor to NS_SERVER_TLS_KEY? cli.StringFlag{ Name: "tls-key", Usage: "tls key used by the api server (optional)", EnvVar: "TLS_KEY", }, // TODO refactor to NS_SERVER_BIND? cli.StringFlag{ Name: "bind", Usage: "ip:port to bind the api server", Value: "0.0.0.0:8000", EnvVar: "BIND", }, cli.StringFlag{ Name: "gw-server-ca-cert", Usage: "ca certificate used by the gateway api server (optional)", EnvVar: "GW_SERVER_CA_CERT", }, cli.StringFlag{ Name: "gw-server-tls-cert", Usage: "tls certificate used by the gateway api server (optional)", EnvVar: "GW_SERVER_TLS_CERT", }, cli.StringFlag{ Name: "gw-server-tls-key", Usage: "tls key used by the gateway api server (optional)", EnvVar: "GW_SERVER_TLS_KEY", }, cli.StringFlag{ Name: "gw-server-jwt-secret", Usage: "JWT secret used by the gateway api server for gateway authentication / authorization", EnvVar: "GW_SERVER_JWT_SECRET", }, cli.StringFlag{ Name: "gw-server-bind", Usage: "ip:port to bind the gateway api server", Value: "0.0.0.0:8002", EnvVar: "GW_SERVER_BIND", }, cli.StringFlag{ Name: "redis-url", Usage: "redis url (e.g. redis://user:password@hostname:port/0)", Value: "redis://localhost:6379", EnvVar: "REDIS_URL", }, cli.StringFlag{ Name: "postgres-dsn", Usage: "postgresql dsn (e.g.: postgres://user:password@hostname/database?sslmode=disable)", Value: "postgres://localhost/loraserver_ns?sslmode=disable", EnvVar: "POSTGRES_DSN", }, cli.BoolFlag{ Name: "db-automigrate", Usage: "automatically apply database migrations", EnvVar: "DB_AUTOMIGRATE", }, cli.StringFlag{ Name: "gw-mqtt-server", Usage: "mqtt broker server used by the gateway backend (e.g. scheme://host:port where scheme is tcp, ssl or ws)", Value: "tcp://localhost:1883", EnvVar: "GW_MQTT_SERVER", }, cli.StringFlag{ Name: "gw-mqtt-username", Usage: "mqtt username used by the gateway backend (optional)", EnvVar: "GW_MQTT_USERNAME", }, cli.StringFlag{ Name: "gw-mqtt-password", Usage: "mqtt password used by the gateway backend (optional)", EnvVar: "GW_MQTT_PASSWORD", }, cli.StringFlag{ Name: "gw-mqtt-ca-cert", Usage: "mqtt CA certificate file used by the gateway backend (optional)", EnvVar: "GW_MQTT_CA_CERT", }, cli.StringFlag{ Name: "as-server", Usage: "hostname:port of the application-server api server (optional)", Value: "127.0.0.1:8001", EnvVar: "AS_SERVER", }, cli.StringFlag{ Name: "as-ca-cert", Usage: "ca certificate used by the application-server client (optional)", EnvVar: "AS_CA_CERT", }, cli.StringFlag{ Name: "as-tls-cert", Usage: "tls certificate used by the application-server client (optional)", EnvVar: "AS_TLS_CERT", }, cli.StringFlag{ Name: "as-tls-key", Usage: "tls key used by the application-server client (optional)", EnvVar: "AS_TLS_KEY", }, cli.StringFlag{ Name: "nc-server", Usage: "hostname:port of the network-controller api server (optional)", EnvVar: "NC_SERVER", }, cli.StringFlag{ Name: "nc-ca-cert", Usage: "ca certificate used by the network-controller client (optional)", EnvVar: "NC_CA_CERT", }, cli.StringFlag{ Name: "nc-tls-cert", Usage: "tls certificate used by the network-controller client (optional)", EnvVar: "NC_TLS_CERT", }, cli.StringFlag{ Name: "nc-tls-key", Usage: "tls key used by the network-controller client (optional)", EnvVar: "NC_TLS_KEY", }, cli.DurationFlag{ Name: "deduplication-delay", Usage: "time to wait for uplink de-duplication", EnvVar: "DEDUPLICATION_DELAY", Value: 200 * time.Millisecond, }, cli.DurationFlag{ Name: "get-downlink-data-delay", Usage: "delay between uplink delivery to the app server and getting the downlink data from the app server (if any)", EnvVar: "GET_DOWNLINK_DATA_DELAY", Value: 100 * time.Millisecond, }, cli.StringFlag{ Name: "gw-stats-aggregation-intervals", Usage: "aggregation intervals to use for aggregating the gateway stats (valid options: second, minute, hour, day, week, month, quarter, year)", EnvVar: "GW_STATS_AGGREGATION_INTERVALS", Value: "minute,hour,day", }, cli.StringFlag{ Name: "timezone", Usage: "timezone to use when aggregating data (e.g. 'Europe/Amsterdam') (optional, by default the db timezone is used)", EnvVar: "TIMEZONE", }, cli.BoolFlag{ Name: "gw-create-on-stats", Usage: "create non-existing gateways on receiving of stats", EnvVar: "GW_CREATE_ON_STATS", }, cli.IntSliceFlag{ Name: "extra-frequencies", Usage: "extra frequencies to use for ISM bands that implement the CFList", EnvVar: "EXTRA_FREQUENCIES", }, cli.StringFlag{ Name: "enable-uplink-channels", Usage: "enable only a given sub-set of channels (e.g. '0-7,8-15')", EnvVar: "ENABLE_UPLINK_CHANNELS", }, cli.DurationFlag{ Name: "node-session-ttl", Usage: "the ttl after which a node-session expires after no activity", EnvVar: "NODE_SESSION_TTL", Value: time.Hour * 24 * 31, }, cli.BoolFlag{ Name: "log-node-frames", Usage: "log uplink and downlink frames to the database", EnvVar: "LOG_NODE_FRAMES", }, } app.Run(os.Args) }
"github.com/brocaar/loraserver/internal/common" "github.com/brocaar/loraserver/internal/migrations" // TODO: merge backend/gateway into internal/gateway? "github.com/brocaar/loraserver/internal/gateway" "github.com/brocaar/loraserver/internal/uplink"
random_line_split
main.go
//go:generate go-bindata -prefix ../../migrations/ -pkg migrations -o ../../internal/migrations/migrations_gen.go ../../migrations/ package main import ( "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "net" "os" "os/signal" "strings" "syscall" "time" log "github.com/sirupsen/logrus" "github.com/codegangsta/cli" "github.com/pkg/errors" migrate "github.com/rubenv/sql-migrate" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "github.com/brocaar/loraserver/api/as" "github.com/brocaar/loraserver/api/gw" "github.com/brocaar/loraserver/api/nc" "github.com/brocaar/loraserver/api/ns" "github.com/brocaar/loraserver/internal/api" "github.com/brocaar/loraserver/internal/api/auth" "github.com/brocaar/loraserver/internal/backend/controller" gwBackend "github.com/brocaar/loraserver/internal/backend/gateway" "github.com/brocaar/loraserver/internal/common" "github.com/brocaar/loraserver/internal/migrations" // TODO: merge backend/gateway into internal/gateway? "github.com/brocaar/loraserver/internal/gateway" "github.com/brocaar/loraserver/internal/uplink" "github.com/brocaar/lorawan" "github.com/brocaar/lorawan/band" ) func init() { grpclog.SetLogger(log.StandardLogger()) } var version string // set by the compiler var bands = []string{ string(band.AS_923), string(band.AU_915_928), string(band.CN_470_510), string(band.CN_779_787), string(band.EU_433), string(band.EU_863_870), string(band.IN_865_867), string(band.KR_920_923), string(band.US_902_928), } func run(c *cli.Context) error { var server *uplink.Server var gwStats *gateway.StatsHandler tasks := []func(*cli.Context) error{ setNetID, setBandConfig, setDeduplicationDelay, setGetDownlinkDataDelay, setCreateGatewayOnStats, setNodeSessionTTL, setLogNodeFrames, setGatewayServerJWTSecret, setStatsAggregationIntervals, setTimezone, printStartMessage, enableUplinkChannels, setRedisPool, setPostgreSQLConnection, setGatewayBackend, setApplicationServer, setNetworkController, runDatabaseMigrations, startAPIServer, startGatewayAPIServer, startLoRaServer(server), startStatsServer(gwStats), } for _, t := range tasks { if err := t(c); err != nil { log.Fatal(err) } } sigChan := make(chan os.Signal) exitChan := make(chan struct{}) signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) log.WithField("signal", <-sigChan).Info("signal received") go func() { log.Warning("stopping loraserver") if err := server.Stop(); err != nil { log.Fatal(err) } if err := gwStats.Stop(); err != nil { log.Fatal(err) } exitChan <- struct{}{} }() select { case <-exitChan: case s := <-sigChan: log.WithField("signal", s).Info("signal received, stopping immediately") } return nil } func setNetID(c *cli.Context) error
func setBandConfig(c *cli.Context) error { if c.String("band") == "" { return fmt.Errorf("--band is undefined, valid options are: %s", strings.Join(bands, ", ")) } dwellTime := lorawan.DwellTimeNoLimit if c.Bool("band-dwell-time-400ms") { dwellTime = lorawan.DwellTime400ms } bandConfig, err := band.GetConfig(band.Name(c.String("band")), c.Bool("band-repeater-compatible"), dwellTime) if err != nil { return errors.Wrap(err, "get band config error") } for _, f := range c.IntSlice("extra-frequencies") { if err := bandConfig.AddChannel(f); err != nil { return errors.Wrap(err, "add channel error") } } common.Band = bandConfig common.BandName = band.Name(c.String("band")) return nil } func setDeduplicationDelay(c *cli.Context) error { common.DeduplicationDelay = c.Duration("deduplication-delay") return nil } func setGetDownlinkDataDelay(c *cli.Context) error { common.GetDownlinkDataDelay = c.Duration("get-downlink-data-delay") return nil } func setCreateGatewayOnStats(c *cli.Context) error { common.CreateGatewayOnStats = c.Bool("gw-create-on-stats") return nil } func setNodeSessionTTL(c *cli.Context) error { common.NodeSessionTTL = c.Duration("node-session-ttl") return nil } func setLogNodeFrames(c *cli.Context) error { common.LogNodeFrames = c.Bool("log-node-frames") return nil } func setGatewayServerJWTSecret(c *cli.Context) error { common.GatewayServerJWTSecret = c.String("gw-server-jwt-secret") return nil } func setStatsAggregationIntervals(c *cli.Context) error { // get the gw stats aggregation intervals gateway.MustSetStatsAggregationIntervals(strings.Split(c.String("gw-stats-aggregation-intervals"), ",")) return nil } func setTimezone(c *cli.Context) error { // get the timezone if c.String("timezone") != "" { l, err := time.LoadLocation(c.String("timezone")) if err != nil { return errors.Wrap(err, "load timezone location error") } common.TimeLocation = l } return nil } func printStartMessage(c *cli.Context) error { log.WithFields(log.Fields{ "version": version, "net_id": common.NetID.String(), "band": c.String("band"), "docs": "https://docs.loraserver.io/", }).Info("starting LoRa Server") return nil } func enableUplinkChannels(c *cli.Context) error { if c.String("enable-uplink-channels") == "" { return nil } log.Info("disabling all channels") for _, c := range common.Band.GetEnabledUplinkChannels() { if err := common.Band.DisableUplinkChannel(c); err != nil { return errors.Wrap(err, "disable uplink channel error") } } blocks := strings.Split(c.String("enable-uplink-channels"), ",") for _, block := range blocks { block = strings.Trim(block, " ") var start, end int if _, err := fmt.Sscanf(block, "%d-%d", &start, &end); err != nil { if _, err := fmt.Sscanf(block, "%d", &start); err != nil { return errors.Wrap(err, "parse channel range error") } end = start } log.WithFields(log.Fields{ "first_channel": start, "last_channel": end, }).Info("enabling channel block") for ; start <= end; start++ { if err := common.Band.EnableUplinkChannel(start); err != nil { errors.Wrap(err, "enable uplink channel error") } } } return nil } func setRedisPool(c *cli.Context) error { log.WithField("url", c.String("redis-url")).Info("setup redis connection pool") common.RedisPool = common.NewRedisPool(c.String("redis-url")) return nil } func setPostgreSQLConnection(c *cli.Context) error { log.Info("connecting to postgresql") db, err := common.OpenDatabase(c.String("postgres-dsn")) if err != nil { return errors.Wrap(err, "database connection error") } common.DB = db return nil } func setGatewayBackend(c *cli.Context) error { gw, err := gwBackend.NewBackend(c.String("gw-mqtt-server"), c.String("gw-mqtt-username"), c.String("gw-mqtt-password"), c.String("gw-mqtt-ca-cert")) if err != nil { return errors.Wrap(err, "gateway-backend setup failed") } common.Gateway = gw return nil } func setApplicationServer(c *cli.Context) error { log.WithFields(log.Fields{ "server": c.String("as-server"), "ca-cert": c.String("as-ca-cert"), "tls-cert": c.String("as-tls-cert"), "tls-key": c.String("as-tls-key"), }).Info("connecting to application-server") var asDialOptions []grpc.DialOption if c.String("as-tls-cert") != "" && c.String("as-tls-key") != "" { asDialOptions = append(asDialOptions, grpc.WithTransportCredentials( mustGetTransportCredentials(c.String("as-tls-cert"), c.String("as-tls-key"), c.String("as-ca-cert"), false), )) } else { asDialOptions = append(asDialOptions, grpc.WithInsecure()) } asConn, err := grpc.Dial(c.String("as-server"), asDialOptions...) if err != nil { return errors.Wrap(err, "application-server dial error") } common.Application = as.NewApplicationServerClient(asConn) return nil } func setNetworkController(c *cli.Context) error { var ncClient nc.NetworkControllerClient if c.String("nc-server") != "" { // setup network-controller client log.WithFields(log.Fields{ "server": c.String("nc-server"), "ca-cert": c.String("nc-ca-cert"), "tls-cert": c.String("nc-tls-cert"), "tls-key": c.String("nc-tls-key"), }).Info("connecting to network-controller") var ncDialOptions []grpc.DialOption if c.String("nc-tls-cert") != "" && c.String("nc-tls-key") != "" { ncDialOptions = append(ncDialOptions, grpc.WithTransportCredentials( mustGetTransportCredentials(c.String("nc-tls-cert"), c.String("nc-tls-key"), c.String("nc-ca-cert"), false), )) } else { ncDialOptions = append(ncDialOptions, grpc.WithInsecure()) } ncConn, err := grpc.Dial(c.String("nc-server"), ncDialOptions...) if err != nil { return errors.Wrap(err, "network-controller dial error") } ncClient = nc.NewNetworkControllerClient(ncConn) } else { log.Info("no network-controller configured") ncClient = &controller.NopNetworkControllerClient{} } common.Controller = ncClient return nil } func runDatabaseMigrations(c *cli.Context) error { if c.Bool("db-automigrate") { log.Info("applying database migrations") m := &migrate.AssetMigrationSource{ Asset: migrations.Asset, AssetDir: migrations.AssetDir, Dir: "", } n, err := migrate.Exec(common.DB.DB, "postgres", m, migrate.Up) if err != nil { return errors.Wrap(err, "applying migrations failed") } log.WithField("count", n).Info("migrations applied") } return nil } func startAPIServer(c *cli.Context) error { log.WithFields(log.Fields{ "bind": c.String("bind"), "ca-cert": c.String("ca-cert"), "tls-cert": c.String("tls-cert"), "tls-key": c.String("tls-key"), }).Info("starting api server") var opts []grpc.ServerOption if c.String("tls-cert") != "" && c.String("tls-key") != "" { creds := mustGetTransportCredentials(c.String("tls-cert"), c.String("tls-key"), c.String("ca-cert"), false) opts = append(opts, grpc.Creds(creds)) } gs := grpc.NewServer(opts...) nsAPI := api.NewNetworkServerAPI() ns.RegisterNetworkServerServer(gs, nsAPI) ln, err := net.Listen("tcp", c.String("bind")) if err != nil { return errors.Wrap(err, "start api listener error") } go gs.Serve(ln) return nil } func startGatewayAPIServer(c *cli.Context) error { log.WithFields(log.Fields{ "bind": c.String("gw-server-bind"), "ca-cert": c.String("gw-server-ca-cert"), "tls-cert": c.String("gw-server-tls-cert"), "tls-key": c.String("gw-server-tls-key"), }).Info("starting gateway api server") var validator auth.Validator if c.String("gw-server-jwt-secret") != "" { validator = auth.NewJWTValidator("HS256", c.String("gw-server-jwt-secret")) } else { return errors.New("--gw-server-jwt-secret must be set") } var opts []grpc.ServerOption if c.String("gw-server-tls-cert") != "" && c.String("gw-server-tls-key") != "" { creds := mustGetTransportCredentials(c.String("gw-server-tls-cert"), c.String("gw-server-tls-key"), c.String("gw-server-ca-cert"), false) opts = append(opts, grpc.Creds(creds)) } gs := grpc.NewServer(opts...) gwAPI := api.NewGatewayAPI(validator) gw.RegisterGatewayServer(gs, gwAPI) gwServerLn, err := net.Listen("tcp", c.String("gw-server-bind")) if err != nil { return errors.Wrap(err, "start gateway api server listener error") } go gs.Serve(gwServerLn) return nil } func startLoRaServer(server *uplink.Server) func(*cli.Context) error { return func(c *cli.Context) error { server = uplink.NewServer() if err := server.Start(); err != nil { return err } return nil } } func startStatsServer(gwStats *gateway.StatsHandler) func(*cli.Context) error { return func(c *cli.Context) error { gwStats = gateway.NewStatsHandler() if err := gwStats.Start(); err != nil { log.Fatal(err) } return nil } } func mustGetTransportCredentials(tlsCert, tlsKey, caCert string, verifyClientCert bool) credentials.TransportCredentials { var caCertPool *x509.CertPool cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) if err != nil { log.WithFields(log.Fields{ "cert": tlsCert, "key": tlsKey, }).Fatalf("load key-pair error: %s", err) } if caCert != "" { rawCaCert, err := ioutil.ReadFile(caCert) if err != nil { log.WithField("ca", caCert).Fatalf("load ca cert error: %s", err) } caCertPool = x509.NewCertPool() caCertPool.AppendCertsFromPEM(rawCaCert) } if verifyClientCert { return credentials.NewTLS(&tls.Config{ Certificates: []tls.Certificate{cert}, RootCAs: caCertPool, ClientAuth: tls.RequireAndVerifyClientCert, }) } return credentials.NewTLS(&tls.Config{ Certificates: []tls.Certificate{cert}, RootCAs: caCertPool, }) } func main() { app := cli.NewApp() app.Name = "loraserver" app.Usage = "network-server for LoRaWAN networks" app.Version = version app.Copyright = "See http://github.com/brocaar/loraserver for copyright information" app.Action = run app.Flags = []cli.Flag{ cli.StringFlag{ Name: "net-id", Usage: "network identifier (NetID, 3 bytes) encoded as HEX (e.g. 010203)", EnvVar: "NET_ID", }, cli.StringFlag{ Name: "band", Usage: fmt.Sprintf("ism band configuration to use (options: %s)", strings.Join(bands, ", ")), EnvVar: "BAND", }, cli.BoolFlag{ Name: "band-dwell-time-400ms", Usage: "band configuration takes 400ms dwell-time into account", EnvVar: "BAND_DWELL_TIME_400ms", }, cli.BoolFlag{ Name: "band-repeater-compatible", Usage: "band configuration takes repeater encapsulation layer into account", EnvVar: "BAND_REPEATER_COMPATIBLE", }, // TODO refactor to NS_SERVER_CA_CERT? cli.StringFlag{ Name: "ca-cert", Usage: "ca certificate used by the api server (optional)", EnvVar: "CA_CERT", }, // TODO refactor to NS_SERVER_TLS_CERT? cli.StringFlag{ Name: "tls-cert", Usage: "tls certificate used by the api server (optional)", EnvVar: "TLS_CERT", }, // TODO refactor to NS_SERVER_TLS_KEY? cli.StringFlag{ Name: "tls-key", Usage: "tls key used by the api server (optional)", EnvVar: "TLS_KEY", }, // TODO refactor to NS_SERVER_BIND? cli.StringFlag{ Name: "bind", Usage: "ip:port to bind the api server", Value: "0.0.0.0:8000", EnvVar: "BIND", }, cli.StringFlag{ Name: "gw-server-ca-cert", Usage: "ca certificate used by the gateway api server (optional)", EnvVar: "GW_SERVER_CA_CERT", }, cli.StringFlag{ Name: "gw-server-tls-cert", Usage: "tls certificate used by the gateway api server (optional)", EnvVar: "GW_SERVER_TLS_CERT", }, cli.StringFlag{ Name: "gw-server-tls-key", Usage: "tls key used by the gateway api server (optional)", EnvVar: "GW_SERVER_TLS_KEY", }, cli.StringFlag{ Name: "gw-server-jwt-secret", Usage: "JWT secret used by the gateway api server for gateway authentication / authorization", EnvVar: "GW_SERVER_JWT_SECRET", }, cli.StringFlag{ Name: "gw-server-bind", Usage: "ip:port to bind the gateway api server", Value: "0.0.0.0:8002", EnvVar: "GW_SERVER_BIND", }, cli.StringFlag{ Name: "redis-url", Usage: "redis url (e.g. redis://user:password@hostname:port/0)", Value: "redis://localhost:6379", EnvVar: "REDIS_URL", }, cli.StringFlag{ Name: "postgres-dsn", Usage: "postgresql dsn (e.g.: postgres://user:password@hostname/database?sslmode=disable)", Value: "postgres://localhost/loraserver_ns?sslmode=disable", EnvVar: "POSTGRES_DSN", }, cli.BoolFlag{ Name: "db-automigrate", Usage: "automatically apply database migrations", EnvVar: "DB_AUTOMIGRATE", }, cli.StringFlag{ Name: "gw-mqtt-server", Usage: "mqtt broker server used by the gateway backend (e.g. scheme://host:port where scheme is tcp, ssl or ws)", Value: "tcp://localhost:1883", EnvVar: "GW_MQTT_SERVER", }, cli.StringFlag{ Name: "gw-mqtt-username", Usage: "mqtt username used by the gateway backend (optional)", EnvVar: "GW_MQTT_USERNAME", }, cli.StringFlag{ Name: "gw-mqtt-password", Usage: "mqtt password used by the gateway backend (optional)", EnvVar: "GW_MQTT_PASSWORD", }, cli.StringFlag{ Name: "gw-mqtt-ca-cert", Usage: "mqtt CA certificate file used by the gateway backend (optional)", EnvVar: "GW_MQTT_CA_CERT", }, cli.StringFlag{ Name: "as-server", Usage: "hostname:port of the application-server api server (optional)", Value: "127.0.0.1:8001", EnvVar: "AS_SERVER", }, cli.StringFlag{ Name: "as-ca-cert", Usage: "ca certificate used by the application-server client (optional)", EnvVar: "AS_CA_CERT", }, cli.StringFlag{ Name: "as-tls-cert", Usage: "tls certificate used by the application-server client (optional)", EnvVar: "AS_TLS_CERT", }, cli.StringFlag{ Name: "as-tls-key", Usage: "tls key used by the application-server client (optional)", EnvVar: "AS_TLS_KEY", }, cli.StringFlag{ Name: "nc-server", Usage: "hostname:port of the network-controller api server (optional)", EnvVar: "NC_SERVER", }, cli.StringFlag{ Name: "nc-ca-cert", Usage: "ca certificate used by the network-controller client (optional)", EnvVar: "NC_CA_CERT", }, cli.StringFlag{ Name: "nc-tls-cert", Usage: "tls certificate used by the network-controller client (optional)", EnvVar: "NC_TLS_CERT", }, cli.StringFlag{ Name: "nc-tls-key", Usage: "tls key used by the network-controller client (optional)", EnvVar: "NC_TLS_KEY", }, cli.DurationFlag{ Name: "deduplication-delay", Usage: "time to wait for uplink de-duplication", EnvVar: "DEDUPLICATION_DELAY", Value: 200 * time.Millisecond, }, cli.DurationFlag{ Name: "get-downlink-data-delay", Usage: "delay between uplink delivery to the app server and getting the downlink data from the app server (if any)", EnvVar: "GET_DOWNLINK_DATA_DELAY", Value: 100 * time.Millisecond, }, cli.StringFlag{ Name: "gw-stats-aggregation-intervals", Usage: "aggregation intervals to use for aggregating the gateway stats (valid options: second, minute, hour, day, week, month, quarter, year)", EnvVar: "GW_STATS_AGGREGATION_INTERVALS", Value: "minute,hour,day", }, cli.StringFlag{ Name: "timezone", Usage: "timezone to use when aggregating data (e.g. 'Europe/Amsterdam') (optional, by default the db timezone is used)", EnvVar: "TIMEZONE", }, cli.BoolFlag{ Name: "gw-create-on-stats", Usage: "create non-existing gateways on receiving of stats", EnvVar: "GW_CREATE_ON_STATS", }, cli.IntSliceFlag{ Name: "extra-frequencies", Usage: "extra frequencies to use for ISM bands that implement the CFList", EnvVar: "EXTRA_FREQUENCIES", }, cli.StringFlag{ Name: "enable-uplink-channels", Usage: "enable only a given sub-set of channels (e.g. '0-7,8-15')", EnvVar: "ENABLE_UPLINK_CHANNELS", }, cli.DurationFlag{ Name: "node-session-ttl", Usage: "the ttl after which a node-session expires after no activity", EnvVar: "NODE_SESSION_TTL", Value: time.Hour * 24 * 31, }, cli.BoolFlag{ Name: "log-node-frames", Usage: "log uplink and downlink frames to the database", EnvVar: "LOG_NODE_FRAMES", }, } app.Run(os.Args) }
{ var netID lorawan.NetID if err := netID.UnmarshalText([]byte(c.String("net-id"))); err != nil { return errors.Wrap(err, "NetID parse error") } common.NetID = netID return nil }
identifier_body
main.go
//go:generate go-bindata -prefix ../../migrations/ -pkg migrations -o ../../internal/migrations/migrations_gen.go ../../migrations/ package main import ( "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "net" "os" "os/signal" "strings" "syscall" "time" log "github.com/sirupsen/logrus" "github.com/codegangsta/cli" "github.com/pkg/errors" migrate "github.com/rubenv/sql-migrate" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "github.com/brocaar/loraserver/api/as" "github.com/brocaar/loraserver/api/gw" "github.com/brocaar/loraserver/api/nc" "github.com/brocaar/loraserver/api/ns" "github.com/brocaar/loraserver/internal/api" "github.com/brocaar/loraserver/internal/api/auth" "github.com/brocaar/loraserver/internal/backend/controller" gwBackend "github.com/brocaar/loraserver/internal/backend/gateway" "github.com/brocaar/loraserver/internal/common" "github.com/brocaar/loraserver/internal/migrations" // TODO: merge backend/gateway into internal/gateway? "github.com/brocaar/loraserver/internal/gateway" "github.com/brocaar/loraserver/internal/uplink" "github.com/brocaar/lorawan" "github.com/brocaar/lorawan/band" ) func init() { grpclog.SetLogger(log.StandardLogger()) } var version string // set by the compiler var bands = []string{ string(band.AS_923), string(band.AU_915_928), string(band.CN_470_510), string(band.CN_779_787), string(band.EU_433), string(band.EU_863_870), string(band.IN_865_867), string(band.KR_920_923), string(band.US_902_928), } func run(c *cli.Context) error { var server *uplink.Server var gwStats *gateway.StatsHandler tasks := []func(*cli.Context) error{ setNetID, setBandConfig, setDeduplicationDelay, setGetDownlinkDataDelay, setCreateGatewayOnStats, setNodeSessionTTL, setLogNodeFrames, setGatewayServerJWTSecret, setStatsAggregationIntervals, setTimezone, printStartMessage, enableUplinkChannels, setRedisPool, setPostgreSQLConnection, setGatewayBackend, setApplicationServer, setNetworkController, runDatabaseMigrations, startAPIServer, startGatewayAPIServer, startLoRaServer(server), startStatsServer(gwStats), } for _, t := range tasks { if err := t(c); err != nil { log.Fatal(err) } } sigChan := make(chan os.Signal) exitChan := make(chan struct{}) signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) log.WithField("signal", <-sigChan).Info("signal received") go func() { log.Warning("stopping loraserver") if err := server.Stop(); err != nil { log.Fatal(err) } if err := gwStats.Stop(); err != nil { log.Fatal(err) } exitChan <- struct{}{} }() select { case <-exitChan: case s := <-sigChan: log.WithField("signal", s).Info("signal received, stopping immediately") } return nil } func setNetID(c *cli.Context) error { var netID lorawan.NetID if err := netID.UnmarshalText([]byte(c.String("net-id"))); err != nil { return errors.Wrap(err, "NetID parse error") } common.NetID = netID return nil } func setBandConfig(c *cli.Context) error { if c.String("band") == "" { return fmt.Errorf("--band is undefined, valid options are: %s", strings.Join(bands, ", ")) } dwellTime := lorawan.DwellTimeNoLimit if c.Bool("band-dwell-time-400ms") { dwellTime = lorawan.DwellTime400ms } bandConfig, err := band.GetConfig(band.Name(c.String("band")), c.Bool("band-repeater-compatible"), dwellTime) if err != nil { return errors.Wrap(err, "get band config error") } for _, f := range c.IntSlice("extra-frequencies") { if err := bandConfig.AddChannel(f); err != nil { return errors.Wrap(err, "add channel error") } } common.Band = bandConfig common.BandName = band.Name(c.String("band")) return nil } func setDeduplicationDelay(c *cli.Context) error { common.DeduplicationDelay = c.Duration("deduplication-delay") return nil } func setGetDownlinkDataDelay(c *cli.Context) error { common.GetDownlinkDataDelay = c.Duration("get-downlink-data-delay") return nil } func setCreateGatewayOnStats(c *cli.Context) error { common.CreateGatewayOnStats = c.Bool("gw-create-on-stats") return nil } func setNodeSessionTTL(c *cli.Context) error { common.NodeSessionTTL = c.Duration("node-session-ttl") return nil } func setLogNodeFrames(c *cli.Context) error { common.LogNodeFrames = c.Bool("log-node-frames") return nil } func setGatewayServerJWTSecret(c *cli.Context) error { common.GatewayServerJWTSecret = c.String("gw-server-jwt-secret") return nil } func setStatsAggregationIntervals(c *cli.Context) error { // get the gw stats aggregation intervals gateway.MustSetStatsAggregationIntervals(strings.Split(c.String("gw-stats-aggregation-intervals"), ",")) return nil } func setTimezone(c *cli.Context) error { // get the timezone if c.String("timezone") != "" { l, err := time.LoadLocation(c.String("timezone")) if err != nil { return errors.Wrap(err, "load timezone location error") } common.TimeLocation = l } return nil } func printStartMessage(c *cli.Context) error { log.WithFields(log.Fields{ "version": version, "net_id": common.NetID.String(), "band": c.String("band"), "docs": "https://docs.loraserver.io/", }).Info("starting LoRa Server") return nil } func enableUplinkChannels(c *cli.Context) error { if c.String("enable-uplink-channels") == "" { return nil } log.Info("disabling all channels") for _, c := range common.Band.GetEnabledUplinkChannels() { if err := common.Band.DisableUplinkChannel(c); err != nil { return errors.Wrap(err, "disable uplink channel error") } } blocks := strings.Split(c.String("enable-uplink-channels"), ",") for _, block := range blocks { block = strings.Trim(block, " ") var start, end int if _, err := fmt.Sscanf(block, "%d-%d", &start, &end); err != nil { if _, err := fmt.Sscanf(block, "%d", &start); err != nil { return errors.Wrap(err, "parse channel range error") } end = start } log.WithFields(log.Fields{ "first_channel": start, "last_channel": end, }).Info("enabling channel block") for ; start <= end; start++ { if err := common.Band.EnableUplinkChannel(start); err != nil { errors.Wrap(err, "enable uplink channel error") } } } return nil } func setRedisPool(c *cli.Context) error { log.WithField("url", c.String("redis-url")).Info("setup redis connection pool") common.RedisPool = common.NewRedisPool(c.String("redis-url")) return nil } func
(c *cli.Context) error { log.Info("connecting to postgresql") db, err := common.OpenDatabase(c.String("postgres-dsn")) if err != nil { return errors.Wrap(err, "database connection error") } common.DB = db return nil } func setGatewayBackend(c *cli.Context) error { gw, err := gwBackend.NewBackend(c.String("gw-mqtt-server"), c.String("gw-mqtt-username"), c.String("gw-mqtt-password"), c.String("gw-mqtt-ca-cert")) if err != nil { return errors.Wrap(err, "gateway-backend setup failed") } common.Gateway = gw return nil } func setApplicationServer(c *cli.Context) error { log.WithFields(log.Fields{ "server": c.String("as-server"), "ca-cert": c.String("as-ca-cert"), "tls-cert": c.String("as-tls-cert"), "tls-key": c.String("as-tls-key"), }).Info("connecting to application-server") var asDialOptions []grpc.DialOption if c.String("as-tls-cert") != "" && c.String("as-tls-key") != "" { asDialOptions = append(asDialOptions, grpc.WithTransportCredentials( mustGetTransportCredentials(c.String("as-tls-cert"), c.String("as-tls-key"), c.String("as-ca-cert"), false), )) } else { asDialOptions = append(asDialOptions, grpc.WithInsecure()) } asConn, err := grpc.Dial(c.String("as-server"), asDialOptions...) if err != nil { return errors.Wrap(err, "application-server dial error") } common.Application = as.NewApplicationServerClient(asConn) return nil } func setNetworkController(c *cli.Context) error { var ncClient nc.NetworkControllerClient if c.String("nc-server") != "" { // setup network-controller client log.WithFields(log.Fields{ "server": c.String("nc-server"), "ca-cert": c.String("nc-ca-cert"), "tls-cert": c.String("nc-tls-cert"), "tls-key": c.String("nc-tls-key"), }).Info("connecting to network-controller") var ncDialOptions []grpc.DialOption if c.String("nc-tls-cert") != "" && c.String("nc-tls-key") != "" { ncDialOptions = append(ncDialOptions, grpc.WithTransportCredentials( mustGetTransportCredentials(c.String("nc-tls-cert"), c.String("nc-tls-key"), c.String("nc-ca-cert"), false), )) } else { ncDialOptions = append(ncDialOptions, grpc.WithInsecure()) } ncConn, err := grpc.Dial(c.String("nc-server"), ncDialOptions...) if err != nil { return errors.Wrap(err, "network-controller dial error") } ncClient = nc.NewNetworkControllerClient(ncConn) } else { log.Info("no network-controller configured") ncClient = &controller.NopNetworkControllerClient{} } common.Controller = ncClient return nil } func runDatabaseMigrations(c *cli.Context) error { if c.Bool("db-automigrate") { log.Info("applying database migrations") m := &migrate.AssetMigrationSource{ Asset: migrations.Asset, AssetDir: migrations.AssetDir, Dir: "", } n, err := migrate.Exec(common.DB.DB, "postgres", m, migrate.Up) if err != nil { return errors.Wrap(err, "applying migrations failed") } log.WithField("count", n).Info("migrations applied") } return nil } func startAPIServer(c *cli.Context) error { log.WithFields(log.Fields{ "bind": c.String("bind"), "ca-cert": c.String("ca-cert"), "tls-cert": c.String("tls-cert"), "tls-key": c.String("tls-key"), }).Info("starting api server") var opts []grpc.ServerOption if c.String("tls-cert") != "" && c.String("tls-key") != "" { creds := mustGetTransportCredentials(c.String("tls-cert"), c.String("tls-key"), c.String("ca-cert"), false) opts = append(opts, grpc.Creds(creds)) } gs := grpc.NewServer(opts...) nsAPI := api.NewNetworkServerAPI() ns.RegisterNetworkServerServer(gs, nsAPI) ln, err := net.Listen("tcp", c.String("bind")) if err != nil { return errors.Wrap(err, "start api listener error") } go gs.Serve(ln) return nil } func startGatewayAPIServer(c *cli.Context) error { log.WithFields(log.Fields{ "bind": c.String("gw-server-bind"), "ca-cert": c.String("gw-server-ca-cert"), "tls-cert": c.String("gw-server-tls-cert"), "tls-key": c.String("gw-server-tls-key"), }).Info("starting gateway api server") var validator auth.Validator if c.String("gw-server-jwt-secret") != "" { validator = auth.NewJWTValidator("HS256", c.String("gw-server-jwt-secret")) } else { return errors.New("--gw-server-jwt-secret must be set") } var opts []grpc.ServerOption if c.String("gw-server-tls-cert") != "" && c.String("gw-server-tls-key") != "" { creds := mustGetTransportCredentials(c.String("gw-server-tls-cert"), c.String("gw-server-tls-key"), c.String("gw-server-ca-cert"), false) opts = append(opts, grpc.Creds(creds)) } gs := grpc.NewServer(opts...) gwAPI := api.NewGatewayAPI(validator) gw.RegisterGatewayServer(gs, gwAPI) gwServerLn, err := net.Listen("tcp", c.String("gw-server-bind")) if err != nil { return errors.Wrap(err, "start gateway api server listener error") } go gs.Serve(gwServerLn) return nil } func startLoRaServer(server *uplink.Server) func(*cli.Context) error { return func(c *cli.Context) error { server = uplink.NewServer() if err := server.Start(); err != nil { return err } return nil } } func startStatsServer(gwStats *gateway.StatsHandler) func(*cli.Context) error { return func(c *cli.Context) error { gwStats = gateway.NewStatsHandler() if err := gwStats.Start(); err != nil { log.Fatal(err) } return nil } } func mustGetTransportCredentials(tlsCert, tlsKey, caCert string, verifyClientCert bool) credentials.TransportCredentials { var caCertPool *x509.CertPool cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) if err != nil { log.WithFields(log.Fields{ "cert": tlsCert, "key": tlsKey, }).Fatalf("load key-pair error: %s", err) } if caCert != "" { rawCaCert, err := ioutil.ReadFile(caCert) if err != nil { log.WithField("ca", caCert).Fatalf("load ca cert error: %s", err) } caCertPool = x509.NewCertPool() caCertPool.AppendCertsFromPEM(rawCaCert) } if verifyClientCert { return credentials.NewTLS(&tls.Config{ Certificates: []tls.Certificate{cert}, RootCAs: caCertPool, ClientAuth: tls.RequireAndVerifyClientCert, }) } return credentials.NewTLS(&tls.Config{ Certificates: []tls.Certificate{cert}, RootCAs: caCertPool, }) } func main() { app := cli.NewApp() app.Name = "loraserver" app.Usage = "network-server for LoRaWAN networks" app.Version = version app.Copyright = "See http://github.com/brocaar/loraserver for copyright information" app.Action = run app.Flags = []cli.Flag{ cli.StringFlag{ Name: "net-id", Usage: "network identifier (NetID, 3 bytes) encoded as HEX (e.g. 010203)", EnvVar: "NET_ID", }, cli.StringFlag{ Name: "band", Usage: fmt.Sprintf("ism band configuration to use (options: %s)", strings.Join(bands, ", ")), EnvVar: "BAND", }, cli.BoolFlag{ Name: "band-dwell-time-400ms", Usage: "band configuration takes 400ms dwell-time into account", EnvVar: "BAND_DWELL_TIME_400ms", }, cli.BoolFlag{ Name: "band-repeater-compatible", Usage: "band configuration takes repeater encapsulation layer into account", EnvVar: "BAND_REPEATER_COMPATIBLE", }, // TODO refactor to NS_SERVER_CA_CERT? cli.StringFlag{ Name: "ca-cert", Usage: "ca certificate used by the api server (optional)", EnvVar: "CA_CERT", }, // TODO refactor to NS_SERVER_TLS_CERT? cli.StringFlag{ Name: "tls-cert", Usage: "tls certificate used by the api server (optional)", EnvVar: "TLS_CERT", }, // TODO refactor to NS_SERVER_TLS_KEY? cli.StringFlag{ Name: "tls-key", Usage: "tls key used by the api server (optional)", EnvVar: "TLS_KEY", }, // TODO refactor to NS_SERVER_BIND? cli.StringFlag{ Name: "bind", Usage: "ip:port to bind the api server", Value: "0.0.0.0:8000", EnvVar: "BIND", }, cli.StringFlag{ Name: "gw-server-ca-cert", Usage: "ca certificate used by the gateway api server (optional)", EnvVar: "GW_SERVER_CA_CERT", }, cli.StringFlag{ Name: "gw-server-tls-cert", Usage: "tls certificate used by the gateway api server (optional)", EnvVar: "GW_SERVER_TLS_CERT", }, cli.StringFlag{ Name: "gw-server-tls-key", Usage: "tls key used by the gateway api server (optional)", EnvVar: "GW_SERVER_TLS_KEY", }, cli.StringFlag{ Name: "gw-server-jwt-secret", Usage: "JWT secret used by the gateway api server for gateway authentication / authorization", EnvVar: "GW_SERVER_JWT_SECRET", }, cli.StringFlag{ Name: "gw-server-bind", Usage: "ip:port to bind the gateway api server", Value: "0.0.0.0:8002", EnvVar: "GW_SERVER_BIND", }, cli.StringFlag{ Name: "redis-url", Usage: "redis url (e.g. redis://user:password@hostname:port/0)", Value: "redis://localhost:6379", EnvVar: "REDIS_URL", }, cli.StringFlag{ Name: "postgres-dsn", Usage: "postgresql dsn (e.g.: postgres://user:password@hostname/database?sslmode=disable)", Value: "postgres://localhost/loraserver_ns?sslmode=disable", EnvVar: "POSTGRES_DSN", }, cli.BoolFlag{ Name: "db-automigrate", Usage: "automatically apply database migrations", EnvVar: "DB_AUTOMIGRATE", }, cli.StringFlag{ Name: "gw-mqtt-server", Usage: "mqtt broker server used by the gateway backend (e.g. scheme://host:port where scheme is tcp, ssl or ws)", Value: "tcp://localhost:1883", EnvVar: "GW_MQTT_SERVER", }, cli.StringFlag{ Name: "gw-mqtt-username", Usage: "mqtt username used by the gateway backend (optional)", EnvVar: "GW_MQTT_USERNAME", }, cli.StringFlag{ Name: "gw-mqtt-password", Usage: "mqtt password used by the gateway backend (optional)", EnvVar: "GW_MQTT_PASSWORD", }, cli.StringFlag{ Name: "gw-mqtt-ca-cert", Usage: "mqtt CA certificate file used by the gateway backend (optional)", EnvVar: "GW_MQTT_CA_CERT", }, cli.StringFlag{ Name: "as-server", Usage: "hostname:port of the application-server api server (optional)", Value: "127.0.0.1:8001", EnvVar: "AS_SERVER", }, cli.StringFlag{ Name: "as-ca-cert", Usage: "ca certificate used by the application-server client (optional)", EnvVar: "AS_CA_CERT", }, cli.StringFlag{ Name: "as-tls-cert", Usage: "tls certificate used by the application-server client (optional)", EnvVar: "AS_TLS_CERT", }, cli.StringFlag{ Name: "as-tls-key", Usage: "tls key used by the application-server client (optional)", EnvVar: "AS_TLS_KEY", }, cli.StringFlag{ Name: "nc-server", Usage: "hostname:port of the network-controller api server (optional)", EnvVar: "NC_SERVER", }, cli.StringFlag{ Name: "nc-ca-cert", Usage: "ca certificate used by the network-controller client (optional)", EnvVar: "NC_CA_CERT", }, cli.StringFlag{ Name: "nc-tls-cert", Usage: "tls certificate used by the network-controller client (optional)", EnvVar: "NC_TLS_CERT", }, cli.StringFlag{ Name: "nc-tls-key", Usage: "tls key used by the network-controller client (optional)", EnvVar: "NC_TLS_KEY", }, cli.DurationFlag{ Name: "deduplication-delay", Usage: "time to wait for uplink de-duplication", EnvVar: "DEDUPLICATION_DELAY", Value: 200 * time.Millisecond, }, cli.DurationFlag{ Name: "get-downlink-data-delay", Usage: "delay between uplink delivery to the app server and getting the downlink data from the app server (if any)", EnvVar: "GET_DOWNLINK_DATA_DELAY", Value: 100 * time.Millisecond, }, cli.StringFlag{ Name: "gw-stats-aggregation-intervals", Usage: "aggregation intervals to use for aggregating the gateway stats (valid options: second, minute, hour, day, week, month, quarter, year)", EnvVar: "GW_STATS_AGGREGATION_INTERVALS", Value: "minute,hour,day", }, cli.StringFlag{ Name: "timezone", Usage: "timezone to use when aggregating data (e.g. 'Europe/Amsterdam') (optional, by default the db timezone is used)", EnvVar: "TIMEZONE", }, cli.BoolFlag{ Name: "gw-create-on-stats", Usage: "create non-existing gateways on receiving of stats", EnvVar: "GW_CREATE_ON_STATS", }, cli.IntSliceFlag{ Name: "extra-frequencies", Usage: "extra frequencies to use for ISM bands that implement the CFList", EnvVar: "EXTRA_FREQUENCIES", }, cli.StringFlag{ Name: "enable-uplink-channels", Usage: "enable only a given sub-set of channels (e.g. '0-7,8-15')", EnvVar: "ENABLE_UPLINK_CHANNELS", }, cli.DurationFlag{ Name: "node-session-ttl", Usage: "the ttl after which a node-session expires after no activity", EnvVar: "NODE_SESSION_TTL", Value: time.Hour * 24 * 31, }, cli.BoolFlag{ Name: "log-node-frames", Usage: "log uplink and downlink frames to the database", EnvVar: "LOG_NODE_FRAMES", }, } app.Run(os.Args) }
setPostgreSQLConnection
identifier_name
SVD_getMatrixCompletion.py
#http://radimrehurek.com/2014/03/data-streaming-in-python-generators-iterators-iterables/ import json import gensim import fio import numpy, scipy.sparse from scipy.sparse.linalg import svds as sparsesvd import re import porter import pickle import softImputeWrapper import ILP_baseline as ILP import os phraseext = ".key" #a list studentext = ".keys.source" #json countext = ".dict" #a dictionary lpext = ".lp" lpsolext = ".sol" ngramext = ".ngram.json" corpusdictexe = ".corpus.dict" cscexe = ".mat.txt" mcexe = ".mc.txt" ngramTag = "___" def ProcessLine(line,ngrams=[1]): #tokens = list(gensim.utils.tokenize(line, lower=True, errors='ignore')) tokens = line.lower().split() new_tokens = [] for n in ngrams: ngram = ILP.getNgramTokenized(tokens, n, NoStopWords=True, Stemmed=True, ngramTag=ngramTag) new_tokens = new_tokens + ngram return " ".join(new_tokens) def iter_folder(folder, extension, ngrams=[1]): for subdir, dirs, files in os.walk(folder): for file in sorted(files): if not file.endswith(extension): continue print file document = open(file).readlines() for line in document: line = ProcessLine(line, ngrams) #print line # break document into utf8 tokens yield gensim.utils.tokenize(line, lower=True, errors='ignore') def iter_documents(outdir, types, sheets = range(0,25), np='syntax', ngrams=[1]): """ Generator: iterate over all relevant documents, yielding one document (=list of utf8 tokens) at a time. """ print "types:", types # find all .txt documents, no matter how deep under top_directory for i, sheet in enumerate(sheets): week = i + 1 dir = outdir + str(week) + '/' for question in types: prefix = dir + question + "." + np filename = prefix + phraseext if not fio.IsExist(filename): continue document = open(prefix + phraseext).readlines() for line in document: line = ProcessLine(line,ngrams) #print line # break document into utf8 tokens yield gensim.utils.tokenize(line, lower=True, errors='ignore') def readbook(path, ngrams=[1]): document = open(path).readlines() for line in document: line = re.sub( '\s+', ' ', line).strip() if len(line) == 0: continue line = ProcessLine(line, ngrams) # break document into utf8 tokens yield gensim.utils.tokenize(line, lower=True, errors='ignore') class TxtSubdirsCorpus(object): """ Iterable: on each iteration, return bag-of-words vectors, one vector for each document. Process one document at a time using generators, never load the entire corpus into RAM. """ def __init__(self, top_dir, types=['POI', 'MP', 'LP'], sheets = range(0,25), np='syntax', ngrams=[1]): self.types = types self.top_dir = top_dir self.np = np self.ngrams = ngrams self.sheets = sheets # create dictionary = mapping for documents => sparse vectors self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, types, sheets, np, ngrams)) def __iter__(self): """ Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable. """ for tokens in iter_documents(self.top_dir, self.types, self.sheets, self.np, self.ngrams): # transform tokens (strings) into a sparse vector, one at a time yield self.dictionary.doc2bow(tokens) class TacCorpus(object): def __init__(self, top_dir, ngrams=[1]): self.top_dir = top_dir self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, ngrams)) def __iter__(self): """ Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable. """ for tokens in iter_folder(self.top_dir, self.ngrams): # transform tokens (strings) into a sparse vector, one at a time yield self.dictionary.doc2bow(tokens) class BookCorpus(object): """ Iterable: on each iteration, return bag-of-words vectors, one vector for each document. Process one document at a time using generators, never load the entire corpus into RAM. """ def __init__(self, path, ngrams=[1]): self.path = path self.ngrams = ngrams # create dictionary = mapping for documents => sparse vectors self.dictionary = gensim.corpora.Dictionary(readbook(path, ngrams)) def __iter__(self): """ Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable. """ for tokens in readbook(self.path, self.ngrams): # transform tokens (strings) into a sparse vector, one at a time yield self.dictionary.doc2bow(tokens) def SaveCSC2(csc, filename): s = csc.shape m = s[0] n = s[1] body = [] for i in range(m): row = [] for j in range(n): row.append(csc[i, j]) body.append(row) fio.WriteMatrix(filename, body, header=None) def SaveCSC(csc, filename): A = csc.toarray() s = csc.shape m = s[0] n = s[1] data = [] for i in range(m): row = [] for j in range(n): x = A[i][j] if x != 0: row.append([j, A[i][j]]) data.append(row) with open(filename, 'w') as fin: json.dump(data, fin, indent = 2) def SaveSparseMatrix(A, filename): m = len(A) n = len(A[0]) data = [] for i in range(m): row = [] for j in range(n): x = A[i][j] if x != 0: row.append([j, A[i][j]]) data.append(row) with open(filename, 'w') as fin: json.dump(data, fin, indent = 2) def SaveNewA(A, dict, path, ngrams, prefixname="", sheets = range(0,25), np='sentence', types=['POI', 'MP', 'LP']): TotoalLine = 0 for i in sheets: week = i + 1 dir = path + str(week) + '/' for type in types: prefix = dir + type + "." + np print prefix if not fio.IsExist(prefix + phraseext): print prefix + phraseext continue document = open(prefix + phraseext).readlines() LineRange = range(TotoalLine, TotoalLine + len(document)) TotoalLine = TotoalLine + len(document) Bigrams = [] for line in document: line = ProcessLine(line, ngrams) tokens = list(gensim.utils.tokenize(line, lower=True, errors='ignore')) Bigrams = Bigrams + tokens PartA = {} for bigram in set(Bigrams): if bigram not in dict: print "error", bigram id = dict[bigram] row = A[id] PartA[bigram] = [row[x] for x in LineRange] svdAname = dir + type + '.' +prefixname + '.softA' print svdAname with open(svdAname, 'w') as fout: json.dump(PartA, fout, indent=2) def ToBinary(csc): A = csc.toarray() s = csc.shape m = s[0] n = s[1] m = len(A) n = len(A[0]) for i in range(m): row = [] for j in range(n): if A[i][j] >= 1: A[i][j] = 1 return A def CheckBinary(A): m = len(A) n = len(A[0]) for i in range(m):
return True def getSVD(prefix, np, corpusname, ngrams, rank_max, softImpute_lambda, binary_matrix, output, types = ['POI', 'MP', 'LP']): #types = ['POI', 'MP', 'LP'] path = prefix sheets = range(0,26) dictname = output + "_".join(types) + '_' + corpusname + corpusdictexe # # that's it! the streamed corpus of sparse vectors is ready # if corpusname=='book': # corpus = BookCorpus(np, ngrams) # elif corpusname == 'tac': # corpus = TacCorpus(prefix, ngrams) # dictname = path + '_' + corpusname + corpusdictexe # else: # corpus = TxtSubdirsCorpus(prefix, types, sheets, np, ngrams) # # fio.SaveDict2Json(corpus.dictionary.token2id, dictname) # # # or run truncated Singular Value Decomposition (SVD) on the streamed corpus # #from gensim.models.lsimodel import stochastic_svd as svd # #u, s = svd(corpus, rank=300, num_terms=len(corpus.dictionary), chunksize=5000) # # #https://pypi.python.org/pypi/sparsesvd/ # scipy_csc_matrix = gensim.matutils.corpus2csc(corpus) # print scipy_csc_matrix.shape # # print "binary_matrix: ", binary_matrix # # A = ToBinary(scipy_csc_matrix) # # rank = rank_max # print rank # # name = 'X' # newA = softImputeWrapper.SoftImpute(A.T, rank=rank, Lambda=softImpute_lambda, name=name, folder=output) prefix = str("500_2.0") newA = softImputeWrapper.LoadMC(Lambda=prefix, name='newX', folder=output) if newA != None: print newA.shape prefix = '2.0' token2id = fio.LoadDictJson(dictname) SaveNewA(newA, token2id, path, ngrams, prefix, np=np, types=types) def TestProcessLine(): line = "how to determine the answers to part iii , in the activity ." print ProcessLine(line, [1, 2]).split() tokens = line.lower().split() ngrams = [] for n in [1,2]: grams = ILP.getNgramTokenized(tokens, n, NoStopWords=True, Stemmed=True) ngrams = ngrams + grams print ngrams def getMC_IE256(): ILP_dir = "../../data/IE256/MC/" outdir = "../../data/matrix/exp8/" #TestProcessLine() from config import ConfigFile config = ConfigFile(config_file_name='config_IE256.txt') for np in ['sentence']: getSVD(ILP_dir, np, corpusname='corpus', ngrams=config.get_ngrams(), rank_max = config.get_rank_max(), softImpute_lambda = config.get_softImpute_lambda(), binary_matrix = config.get_binary_matrix(), output=outdir, types=['q1','q2']) print "done" if __name__ == '__main__': getMC_IE256() exit(-1) excelfile = "../../data/2011Spring_norm.xls" sennadatadir = "../../data/senna/" ILP_dir = "../../data/IE256/MC/" outdir = ILP_dir #TestProcessLine() from config import ConfigFile config = ConfigFile(config_file_name='tac_config.txt') for np in ['sentence']: getSVD(ILP_dir, np, corpusname='corpus', ngrams=config.get_ngrams(), rank_max = config.get_rank_max(), softImpute_lambda = config.get_softImpute_lambda(), binary_matrix = config.get_binary_matrix(), output=outdir) print "done"
row = [] for j in range(n): if A[i][j] != 0 and A[i][j] != 1: return False
conditional_block
SVD_getMatrixCompletion.py
#http://radimrehurek.com/2014/03/data-streaming-in-python-generators-iterators-iterables/ import json import gensim import fio import numpy, scipy.sparse from scipy.sparse.linalg import svds as sparsesvd import re import porter import pickle import softImputeWrapper import ILP_baseline as ILP import os phraseext = ".key" #a list studentext = ".keys.source" #json countext = ".dict" #a dictionary lpext = ".lp" lpsolext = ".sol" ngramext = ".ngram.json" corpusdictexe = ".corpus.dict" cscexe = ".mat.txt" mcexe = ".mc.txt" ngramTag = "___" def ProcessLine(line,ngrams=[1]): #tokens = list(gensim.utils.tokenize(line, lower=True, errors='ignore')) tokens = line.lower().split() new_tokens = [] for n in ngrams: ngram = ILP.getNgramTokenized(tokens, n, NoStopWords=True, Stemmed=True, ngramTag=ngramTag) new_tokens = new_tokens + ngram return " ".join(new_tokens) def iter_folder(folder, extension, ngrams=[1]): for subdir, dirs, files in os.walk(folder): for file in sorted(files): if not file.endswith(extension): continue print file document = open(file).readlines() for line in document: line = ProcessLine(line, ngrams) #print line # break document into utf8 tokens yield gensim.utils.tokenize(line, lower=True, errors='ignore') def iter_documents(outdir, types, sheets = range(0,25), np='syntax', ngrams=[1]): """ Generator: iterate over all relevant documents, yielding one document (=list of utf8 tokens) at a time. """ print "types:", types # find all .txt documents, no matter how deep under top_directory for i, sheet in enumerate(sheets): week = i + 1 dir = outdir + str(week) + '/' for question in types: prefix = dir + question + "." + np filename = prefix + phraseext if not fio.IsExist(filename): continue document = open(prefix + phraseext).readlines() for line in document: line = ProcessLine(line,ngrams) #print line # break document into utf8 tokens yield gensim.utils.tokenize(line, lower=True, errors='ignore') def readbook(path, ngrams=[1]): document = open(path).readlines() for line in document: line = re.sub( '\s+', ' ', line).strip() if len(line) == 0: continue line = ProcessLine(line, ngrams) # break document into utf8 tokens yield gensim.utils.tokenize(line, lower=True, errors='ignore') class TxtSubdirsCorpus(object): """ Iterable: on each iteration, return bag-of-words vectors, one vector for each document.
Process one document at a time using generators, never load the entire corpus into RAM. """ def __init__(self, top_dir, types=['POI', 'MP', 'LP'], sheets = range(0,25), np='syntax', ngrams=[1]): self.types = types self.top_dir = top_dir self.np = np self.ngrams = ngrams self.sheets = sheets # create dictionary = mapping for documents => sparse vectors self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, types, sheets, np, ngrams)) def __iter__(self): """ Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable. """ for tokens in iter_documents(self.top_dir, self.types, self.sheets, self.np, self.ngrams): # transform tokens (strings) into a sparse vector, one at a time yield self.dictionary.doc2bow(tokens) class TacCorpus(object): def __init__(self, top_dir, ngrams=[1]): self.top_dir = top_dir self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, ngrams)) def __iter__(self): """ Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable. """ for tokens in iter_folder(self.top_dir, self.ngrams): # transform tokens (strings) into a sparse vector, one at a time yield self.dictionary.doc2bow(tokens) class BookCorpus(object): """ Iterable: on each iteration, return bag-of-words vectors, one vector for each document. Process one document at a time using generators, never load the entire corpus into RAM. """ def __init__(self, path, ngrams=[1]): self.path = path self.ngrams = ngrams # create dictionary = mapping for documents => sparse vectors self.dictionary = gensim.corpora.Dictionary(readbook(path, ngrams)) def __iter__(self): """ Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable. """ for tokens in readbook(self.path, self.ngrams): # transform tokens (strings) into a sparse vector, one at a time yield self.dictionary.doc2bow(tokens) def SaveCSC2(csc, filename): s = csc.shape m = s[0] n = s[1] body = [] for i in range(m): row = [] for j in range(n): row.append(csc[i, j]) body.append(row) fio.WriteMatrix(filename, body, header=None) def SaveCSC(csc, filename): A = csc.toarray() s = csc.shape m = s[0] n = s[1] data = [] for i in range(m): row = [] for j in range(n): x = A[i][j] if x != 0: row.append([j, A[i][j]]) data.append(row) with open(filename, 'w') as fin: json.dump(data, fin, indent = 2) def SaveSparseMatrix(A, filename): m = len(A) n = len(A[0]) data = [] for i in range(m): row = [] for j in range(n): x = A[i][j] if x != 0: row.append([j, A[i][j]]) data.append(row) with open(filename, 'w') as fin: json.dump(data, fin, indent = 2) def SaveNewA(A, dict, path, ngrams, prefixname="", sheets = range(0,25), np='sentence', types=['POI', 'MP', 'LP']): TotoalLine = 0 for i in sheets: week = i + 1 dir = path + str(week) + '/' for type in types: prefix = dir + type + "." + np print prefix if not fio.IsExist(prefix + phraseext): print prefix + phraseext continue document = open(prefix + phraseext).readlines() LineRange = range(TotoalLine, TotoalLine + len(document)) TotoalLine = TotoalLine + len(document) Bigrams = [] for line in document: line = ProcessLine(line, ngrams) tokens = list(gensim.utils.tokenize(line, lower=True, errors='ignore')) Bigrams = Bigrams + tokens PartA = {} for bigram in set(Bigrams): if bigram not in dict: print "error", bigram id = dict[bigram] row = A[id] PartA[bigram] = [row[x] for x in LineRange] svdAname = dir + type + '.' +prefixname + '.softA' print svdAname with open(svdAname, 'w') as fout: json.dump(PartA, fout, indent=2) def ToBinary(csc): A = csc.toarray() s = csc.shape m = s[0] n = s[1] m = len(A) n = len(A[0]) for i in range(m): row = [] for j in range(n): if A[i][j] >= 1: A[i][j] = 1 return A def CheckBinary(A): m = len(A) n = len(A[0]) for i in range(m): row = [] for j in range(n): if A[i][j] != 0 and A[i][j] != 1: return False return True def getSVD(prefix, np, corpusname, ngrams, rank_max, softImpute_lambda, binary_matrix, output, types = ['POI', 'MP', 'LP']): #types = ['POI', 'MP', 'LP'] path = prefix sheets = range(0,26) dictname = output + "_".join(types) + '_' + corpusname + corpusdictexe # # that's it! the streamed corpus of sparse vectors is ready # if corpusname=='book': # corpus = BookCorpus(np, ngrams) # elif corpusname == 'tac': # corpus = TacCorpus(prefix, ngrams) # dictname = path + '_' + corpusname + corpusdictexe # else: # corpus = TxtSubdirsCorpus(prefix, types, sheets, np, ngrams) # # fio.SaveDict2Json(corpus.dictionary.token2id, dictname) # # # or run truncated Singular Value Decomposition (SVD) on the streamed corpus # #from gensim.models.lsimodel import stochastic_svd as svd # #u, s = svd(corpus, rank=300, num_terms=len(corpus.dictionary), chunksize=5000) # # #https://pypi.python.org/pypi/sparsesvd/ # scipy_csc_matrix = gensim.matutils.corpus2csc(corpus) # print scipy_csc_matrix.shape # # print "binary_matrix: ", binary_matrix # # A = ToBinary(scipy_csc_matrix) # # rank = rank_max # print rank # # name = 'X' # newA = softImputeWrapper.SoftImpute(A.T, rank=rank, Lambda=softImpute_lambda, name=name, folder=output) prefix = str("500_2.0") newA = softImputeWrapper.LoadMC(Lambda=prefix, name='newX', folder=output) if newA != None: print newA.shape prefix = '2.0' token2id = fio.LoadDictJson(dictname) SaveNewA(newA, token2id, path, ngrams, prefix, np=np, types=types) def TestProcessLine(): line = "how to determine the answers to part iii , in the activity ." print ProcessLine(line, [1, 2]).split() tokens = line.lower().split() ngrams = [] for n in [1,2]: grams = ILP.getNgramTokenized(tokens, n, NoStopWords=True, Stemmed=True) ngrams = ngrams + grams print ngrams def getMC_IE256(): ILP_dir = "../../data/IE256/MC/" outdir = "../../data/matrix/exp8/" #TestProcessLine() from config import ConfigFile config = ConfigFile(config_file_name='config_IE256.txt') for np in ['sentence']: getSVD(ILP_dir, np, corpusname='corpus', ngrams=config.get_ngrams(), rank_max = config.get_rank_max(), softImpute_lambda = config.get_softImpute_lambda(), binary_matrix = config.get_binary_matrix(), output=outdir, types=['q1','q2']) print "done" if __name__ == '__main__': getMC_IE256() exit(-1) excelfile = "../../data/2011Spring_norm.xls" sennadatadir = "../../data/senna/" ILP_dir = "../../data/IE256/MC/" outdir = ILP_dir #TestProcessLine() from config import ConfigFile config = ConfigFile(config_file_name='tac_config.txt') for np in ['sentence']: getSVD(ILP_dir, np, corpusname='corpus', ngrams=config.get_ngrams(), rank_max = config.get_rank_max(), softImpute_lambda = config.get_softImpute_lambda(), binary_matrix = config.get_binary_matrix(), output=outdir) print "done"
random_line_split
SVD_getMatrixCompletion.py
#http://radimrehurek.com/2014/03/data-streaming-in-python-generators-iterators-iterables/ import json import gensim import fio import numpy, scipy.sparse from scipy.sparse.linalg import svds as sparsesvd import re import porter import pickle import softImputeWrapper import ILP_baseline as ILP import os phraseext = ".key" #a list studentext = ".keys.source" #json countext = ".dict" #a dictionary lpext = ".lp" lpsolext = ".sol" ngramext = ".ngram.json" corpusdictexe = ".corpus.dict" cscexe = ".mat.txt" mcexe = ".mc.txt" ngramTag = "___" def ProcessLine(line,ngrams=[1]): #tokens = list(gensim.utils.tokenize(line, lower=True, errors='ignore')) tokens = line.lower().split() new_tokens = [] for n in ngrams: ngram = ILP.getNgramTokenized(tokens, n, NoStopWords=True, Stemmed=True, ngramTag=ngramTag) new_tokens = new_tokens + ngram return " ".join(new_tokens) def iter_folder(folder, extension, ngrams=[1]): for subdir, dirs, files in os.walk(folder): for file in sorted(files): if not file.endswith(extension): continue print file document = open(file).readlines() for line in document: line = ProcessLine(line, ngrams) #print line # break document into utf8 tokens yield gensim.utils.tokenize(line, lower=True, errors='ignore') def iter_documents(outdir, types, sheets = range(0,25), np='syntax', ngrams=[1]): """ Generator: iterate over all relevant documents, yielding one document (=list of utf8 tokens) at a time. """ print "types:", types # find all .txt documents, no matter how deep under top_directory for i, sheet in enumerate(sheets): week = i + 1 dir = outdir + str(week) + '/' for question in types: prefix = dir + question + "." + np filename = prefix + phraseext if not fio.IsExist(filename): continue document = open(prefix + phraseext).readlines() for line in document: line = ProcessLine(line,ngrams) #print line # break document into utf8 tokens yield gensim.utils.tokenize(line, lower=True, errors='ignore') def readbook(path, ngrams=[1]):
class TxtSubdirsCorpus(object): """ Iterable: on each iteration, return bag-of-words vectors, one vector for each document. Process one document at a time using generators, never load the entire corpus into RAM. """ def __init__(self, top_dir, types=['POI', 'MP', 'LP'], sheets = range(0,25), np='syntax', ngrams=[1]): self.types = types self.top_dir = top_dir self.np = np self.ngrams = ngrams self.sheets = sheets # create dictionary = mapping for documents => sparse vectors self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, types, sheets, np, ngrams)) def __iter__(self): """ Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable. """ for tokens in iter_documents(self.top_dir, self.types, self.sheets, self.np, self.ngrams): # transform tokens (strings) into a sparse vector, one at a time yield self.dictionary.doc2bow(tokens) class TacCorpus(object): def __init__(self, top_dir, ngrams=[1]): self.top_dir = top_dir self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, ngrams)) def __iter__(self): """ Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable. """ for tokens in iter_folder(self.top_dir, self.ngrams): # transform tokens (strings) into a sparse vector, one at a time yield self.dictionary.doc2bow(tokens) class BookCorpus(object): """ Iterable: on each iteration, return bag-of-words vectors, one vector for each document. Process one document at a time using generators, never load the entire corpus into RAM. """ def __init__(self, path, ngrams=[1]): self.path = path self.ngrams = ngrams # create dictionary = mapping for documents => sparse vectors self.dictionary = gensim.corpora.Dictionary(readbook(path, ngrams)) def __iter__(self): """ Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable. """ for tokens in readbook(self.path, self.ngrams): # transform tokens (strings) into a sparse vector, one at a time yield self.dictionary.doc2bow(tokens) def SaveCSC2(csc, filename): s = csc.shape m = s[0] n = s[1] body = [] for i in range(m): row = [] for j in range(n): row.append(csc[i, j]) body.append(row) fio.WriteMatrix(filename, body, header=None) def SaveCSC(csc, filename): A = csc.toarray() s = csc.shape m = s[0] n = s[1] data = [] for i in range(m): row = [] for j in range(n): x = A[i][j] if x != 0: row.append([j, A[i][j]]) data.append(row) with open(filename, 'w') as fin: json.dump(data, fin, indent = 2) def SaveSparseMatrix(A, filename): m = len(A) n = len(A[0]) data = [] for i in range(m): row = [] for j in range(n): x = A[i][j] if x != 0: row.append([j, A[i][j]]) data.append(row) with open(filename, 'w') as fin: json.dump(data, fin, indent = 2) def SaveNewA(A, dict, path, ngrams, prefixname="", sheets = range(0,25), np='sentence', types=['POI', 'MP', 'LP']): TotoalLine = 0 for i in sheets: week = i + 1 dir = path + str(week) + '/' for type in types: prefix = dir + type + "." + np print prefix if not fio.IsExist(prefix + phraseext): print prefix + phraseext continue document = open(prefix + phraseext).readlines() LineRange = range(TotoalLine, TotoalLine + len(document)) TotoalLine = TotoalLine + len(document) Bigrams = [] for line in document: line = ProcessLine(line, ngrams) tokens = list(gensim.utils.tokenize(line, lower=True, errors='ignore')) Bigrams = Bigrams + tokens PartA = {} for bigram in set(Bigrams): if bigram not in dict: print "error", bigram id = dict[bigram] row = A[id] PartA[bigram] = [row[x] for x in LineRange] svdAname = dir + type + '.' +prefixname + '.softA' print svdAname with open(svdAname, 'w') as fout: json.dump(PartA, fout, indent=2) def ToBinary(csc): A = csc.toarray() s = csc.shape m = s[0] n = s[1] m = len(A) n = len(A[0]) for i in range(m): row = [] for j in range(n): if A[i][j] >= 1: A[i][j] = 1 return A def CheckBinary(A): m = len(A) n = len(A[0]) for i in range(m): row = [] for j in range(n): if A[i][j] != 0 and A[i][j] != 1: return False return True def getSVD(prefix, np, corpusname, ngrams, rank_max, softImpute_lambda, binary_matrix, output, types = ['POI', 'MP', 'LP']): #types = ['POI', 'MP', 'LP'] path = prefix sheets = range(0,26) dictname = output + "_".join(types) + '_' + corpusname + corpusdictexe # # that's it! the streamed corpus of sparse vectors is ready # if corpusname=='book': # corpus = BookCorpus(np, ngrams) # elif corpusname == 'tac': # corpus = TacCorpus(prefix, ngrams) # dictname = path + '_' + corpusname + corpusdictexe # else: # corpus = TxtSubdirsCorpus(prefix, types, sheets, np, ngrams) # # fio.SaveDict2Json(corpus.dictionary.token2id, dictname) # # # or run truncated Singular Value Decomposition (SVD) on the streamed corpus # #from gensim.models.lsimodel import stochastic_svd as svd # #u, s = svd(corpus, rank=300, num_terms=len(corpus.dictionary), chunksize=5000) # # #https://pypi.python.org/pypi/sparsesvd/ # scipy_csc_matrix = gensim.matutils.corpus2csc(corpus) # print scipy_csc_matrix.shape # # print "binary_matrix: ", binary_matrix # # A = ToBinary(scipy_csc_matrix) # # rank = rank_max # print rank # # name = 'X' # newA = softImputeWrapper.SoftImpute(A.T, rank=rank, Lambda=softImpute_lambda, name=name, folder=output) prefix = str("500_2.0") newA = softImputeWrapper.LoadMC(Lambda=prefix, name='newX', folder=output) if newA != None: print newA.shape prefix = '2.0' token2id = fio.LoadDictJson(dictname) SaveNewA(newA, token2id, path, ngrams, prefix, np=np, types=types) def TestProcessLine(): line = "how to determine the answers to part iii , in the activity ." print ProcessLine(line, [1, 2]).split() tokens = line.lower().split() ngrams = [] for n in [1,2]: grams = ILP.getNgramTokenized(tokens, n, NoStopWords=True, Stemmed=True) ngrams = ngrams + grams print ngrams def getMC_IE256(): ILP_dir = "../../data/IE256/MC/" outdir = "../../data/matrix/exp8/" #TestProcessLine() from config import ConfigFile config = ConfigFile(config_file_name='config_IE256.txt') for np in ['sentence']: getSVD(ILP_dir, np, corpusname='corpus', ngrams=config.get_ngrams(), rank_max = config.get_rank_max(), softImpute_lambda = config.get_softImpute_lambda(), binary_matrix = config.get_binary_matrix(), output=outdir, types=['q1','q2']) print "done" if __name__ == '__main__': getMC_IE256() exit(-1) excelfile = "../../data/2011Spring_norm.xls" sennadatadir = "../../data/senna/" ILP_dir = "../../data/IE256/MC/" outdir = ILP_dir #TestProcessLine() from config import ConfigFile config = ConfigFile(config_file_name='tac_config.txt') for np in ['sentence']: getSVD(ILP_dir, np, corpusname='corpus', ngrams=config.get_ngrams(), rank_max = config.get_rank_max(), softImpute_lambda = config.get_softImpute_lambda(), binary_matrix = config.get_binary_matrix(), output=outdir) print "done"
document = open(path).readlines() for line in document: line = re.sub( '\s+', ' ', line).strip() if len(line) == 0: continue line = ProcessLine(line, ngrams) # break document into utf8 tokens yield gensim.utils.tokenize(line, lower=True, errors='ignore')
identifier_body
SVD_getMatrixCompletion.py
#http://radimrehurek.com/2014/03/data-streaming-in-python-generators-iterators-iterables/ import json import gensim import fio import numpy, scipy.sparse from scipy.sparse.linalg import svds as sparsesvd import re import porter import pickle import softImputeWrapper import ILP_baseline as ILP import os phraseext = ".key" #a list studentext = ".keys.source" #json countext = ".dict" #a dictionary lpext = ".lp" lpsolext = ".sol" ngramext = ".ngram.json" corpusdictexe = ".corpus.dict" cscexe = ".mat.txt" mcexe = ".mc.txt" ngramTag = "___" def ProcessLine(line,ngrams=[1]): #tokens = list(gensim.utils.tokenize(line, lower=True, errors='ignore')) tokens = line.lower().split() new_tokens = [] for n in ngrams: ngram = ILP.getNgramTokenized(tokens, n, NoStopWords=True, Stemmed=True, ngramTag=ngramTag) new_tokens = new_tokens + ngram return " ".join(new_tokens) def iter_folder(folder, extension, ngrams=[1]): for subdir, dirs, files in os.walk(folder): for file in sorted(files): if not file.endswith(extension): continue print file document = open(file).readlines() for line in document: line = ProcessLine(line, ngrams) #print line # break document into utf8 tokens yield gensim.utils.tokenize(line, lower=True, errors='ignore') def iter_documents(outdir, types, sheets = range(0,25), np='syntax', ngrams=[1]): """ Generator: iterate over all relevant documents, yielding one document (=list of utf8 tokens) at a time. """ print "types:", types # find all .txt documents, no matter how deep under top_directory for i, sheet in enumerate(sheets): week = i + 1 dir = outdir + str(week) + '/' for question in types: prefix = dir + question + "." + np filename = prefix + phraseext if not fio.IsExist(filename): continue document = open(prefix + phraseext).readlines() for line in document: line = ProcessLine(line,ngrams) #print line # break document into utf8 tokens yield gensim.utils.tokenize(line, lower=True, errors='ignore') def readbook(path, ngrams=[1]): document = open(path).readlines() for line in document: line = re.sub( '\s+', ' ', line).strip() if len(line) == 0: continue line = ProcessLine(line, ngrams) # break document into utf8 tokens yield gensim.utils.tokenize(line, lower=True, errors='ignore') class TxtSubdirsCorpus(object): """ Iterable: on each iteration, return bag-of-words vectors, one vector for each document. Process one document at a time using generators, never load the entire corpus into RAM. """ def __init__(self, top_dir, types=['POI', 'MP', 'LP'], sheets = range(0,25), np='syntax', ngrams=[1]): self.types = types self.top_dir = top_dir self.np = np self.ngrams = ngrams self.sheets = sheets # create dictionary = mapping for documents => sparse vectors self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, types, sheets, np, ngrams)) def __iter__(self): """ Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable. """ for tokens in iter_documents(self.top_dir, self.types, self.sheets, self.np, self.ngrams): # transform tokens (strings) into a sparse vector, one at a time yield self.dictionary.doc2bow(tokens) class TacCorpus(object): def __init__(self, top_dir, ngrams=[1]): self.top_dir = top_dir self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, ngrams)) def __iter__(self): """ Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable. """ for tokens in iter_folder(self.top_dir, self.ngrams): # transform tokens (strings) into a sparse vector, one at a time yield self.dictionary.doc2bow(tokens) class BookCorpus(object): """ Iterable: on each iteration, return bag-of-words vectors, one vector for each document. Process one document at a time using generators, never load the entire corpus into RAM. """ def __init__(self, path, ngrams=[1]): self.path = path self.ngrams = ngrams # create dictionary = mapping for documents => sparse vectors self.dictionary = gensim.corpora.Dictionary(readbook(path, ngrams)) def __iter__(self): """ Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable. """ for tokens in readbook(self.path, self.ngrams): # transform tokens (strings) into a sparse vector, one at a time yield self.dictionary.doc2bow(tokens) def SaveCSC2(csc, filename): s = csc.shape m = s[0] n = s[1] body = [] for i in range(m): row = [] for j in range(n): row.append(csc[i, j]) body.append(row) fio.WriteMatrix(filename, body, header=None) def SaveCSC(csc, filename): A = csc.toarray() s = csc.shape m = s[0] n = s[1] data = [] for i in range(m): row = [] for j in range(n): x = A[i][j] if x != 0: row.append([j, A[i][j]]) data.append(row) with open(filename, 'w') as fin: json.dump(data, fin, indent = 2) def SaveSparseMatrix(A, filename): m = len(A) n = len(A[0]) data = [] for i in range(m): row = [] for j in range(n): x = A[i][j] if x != 0: row.append([j, A[i][j]]) data.append(row) with open(filename, 'w') as fin: json.dump(data, fin, indent = 2) def SaveNewA(A, dict, path, ngrams, prefixname="", sheets = range(0,25), np='sentence', types=['POI', 'MP', 'LP']): TotoalLine = 0 for i in sheets: week = i + 1 dir = path + str(week) + '/' for type in types: prefix = dir + type + "." + np print prefix if not fio.IsExist(prefix + phraseext): print prefix + phraseext continue document = open(prefix + phraseext).readlines() LineRange = range(TotoalLine, TotoalLine + len(document)) TotoalLine = TotoalLine + len(document) Bigrams = [] for line in document: line = ProcessLine(line, ngrams) tokens = list(gensim.utils.tokenize(line, lower=True, errors='ignore')) Bigrams = Bigrams + tokens PartA = {} for bigram in set(Bigrams): if bigram not in dict: print "error", bigram id = dict[bigram] row = A[id] PartA[bigram] = [row[x] for x in LineRange] svdAname = dir + type + '.' +prefixname + '.softA' print svdAname with open(svdAname, 'w') as fout: json.dump(PartA, fout, indent=2) def ToBinary(csc): A = csc.toarray() s = csc.shape m = s[0] n = s[1] m = len(A) n = len(A[0]) for i in range(m): row = [] for j in range(n): if A[i][j] >= 1: A[i][j] = 1 return A def CheckBinary(A): m = len(A) n = len(A[0]) for i in range(m): row = [] for j in range(n): if A[i][j] != 0 and A[i][j] != 1: return False return True def
(prefix, np, corpusname, ngrams, rank_max, softImpute_lambda, binary_matrix, output, types = ['POI', 'MP', 'LP']): #types = ['POI', 'MP', 'LP'] path = prefix sheets = range(0,26) dictname = output + "_".join(types) + '_' + corpusname + corpusdictexe # # that's it! the streamed corpus of sparse vectors is ready # if corpusname=='book': # corpus = BookCorpus(np, ngrams) # elif corpusname == 'tac': # corpus = TacCorpus(prefix, ngrams) # dictname = path + '_' + corpusname + corpusdictexe # else: # corpus = TxtSubdirsCorpus(prefix, types, sheets, np, ngrams) # # fio.SaveDict2Json(corpus.dictionary.token2id, dictname) # # # or run truncated Singular Value Decomposition (SVD) on the streamed corpus # #from gensim.models.lsimodel import stochastic_svd as svd # #u, s = svd(corpus, rank=300, num_terms=len(corpus.dictionary), chunksize=5000) # # #https://pypi.python.org/pypi/sparsesvd/ # scipy_csc_matrix = gensim.matutils.corpus2csc(corpus) # print scipy_csc_matrix.shape # # print "binary_matrix: ", binary_matrix # # A = ToBinary(scipy_csc_matrix) # # rank = rank_max # print rank # # name = 'X' # newA = softImputeWrapper.SoftImpute(A.T, rank=rank, Lambda=softImpute_lambda, name=name, folder=output) prefix = str("500_2.0") newA = softImputeWrapper.LoadMC(Lambda=prefix, name='newX', folder=output) if newA != None: print newA.shape prefix = '2.0' token2id = fio.LoadDictJson(dictname) SaveNewA(newA, token2id, path, ngrams, prefix, np=np, types=types) def TestProcessLine(): line = "how to determine the answers to part iii , in the activity ." print ProcessLine(line, [1, 2]).split() tokens = line.lower().split() ngrams = [] for n in [1,2]: grams = ILP.getNgramTokenized(tokens, n, NoStopWords=True, Stemmed=True) ngrams = ngrams + grams print ngrams def getMC_IE256(): ILP_dir = "../../data/IE256/MC/" outdir = "../../data/matrix/exp8/" #TestProcessLine() from config import ConfigFile config = ConfigFile(config_file_name='config_IE256.txt') for np in ['sentence']: getSVD(ILP_dir, np, corpusname='corpus', ngrams=config.get_ngrams(), rank_max = config.get_rank_max(), softImpute_lambda = config.get_softImpute_lambda(), binary_matrix = config.get_binary_matrix(), output=outdir, types=['q1','q2']) print "done" if __name__ == '__main__': getMC_IE256() exit(-1) excelfile = "../../data/2011Spring_norm.xls" sennadatadir = "../../data/senna/" ILP_dir = "../../data/IE256/MC/" outdir = ILP_dir #TestProcessLine() from config import ConfigFile config = ConfigFile(config_file_name='tac_config.txt') for np in ['sentence']: getSVD(ILP_dir, np, corpusname='corpus', ngrams=config.get_ngrams(), rank_max = config.get_rank_max(), softImpute_lambda = config.get_softImpute_lambda(), binary_matrix = config.get_binary_matrix(), output=outdir) print "done"
getSVD
identifier_name
get.go
package main import ( "bytes" "errors" "io" "log" "sort" "strconv" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/configservice" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/caarlos0/env" ) const ( maxRetries = 20 snapshotFrequency = 3 // frequency of ConfigSnapshots in hours ) // sortItemSlices ... Sorts attributes of ConfigrationItems that are slices func sortItemSlices(i []*configservice.ConfigurationItem) { for _, v := range i { e := v.RelatedEvents r := v.Relationships sort.SliceStable(e, func(i, j int) bool { return sliceSorter(e[i], e[j]) }) sort.SliceStable(r, func(i, j int) bool { return sliceSorter(r[i], r[j]) }) v.RelatedEvents = e v.Relationships = r } } // GetLastExecution ... gets the time of the most recent execution of all config services func (c *CfgSvc) GetLastExecution() (time.Time, error) { t := time.Time{} stats, err := c.GetStatus() if err != nil { return t, err } if len(stats) == 0 { return t, errors.New("empty config rule evaluation status array") } for _, s := range stats { if t.Before(aws.TimeValue(s.LastSuccessfulEvaluationTime)) { t = aws.TimeValue(s.LastSuccessfulEvaluationTime) } } return t, nil } // GetItems ... gets AWS Config Service Configuration Items from resource history pages func (c *CfgSvc) GetItems(lastExecution time.Time) (items []*configservice.ConfigurationItem, err error) { res, err := c.GetDiscoveredResources() if err != nil { log.Fatalf("Error getting discovered resources: %v\n", err) return nil, err } for _, r := range res { var results []*configservice.ConfigurationItem input := &configservice.GetResourceConfigHistoryInput{ ResourceType: r.ResourceType, ResourceId: r.ResourceId, EarlierTime: aws.Time(lastExecution.Add(time.Minute * time.Duration(-window))), LaterTime: aws.Time(lastExecution.Add(time.Minute * time.Duration(window))), } err := c.Client.GetResourceConfigHistoryPages(input, func(page *configservice.GetResourceConfigHistoryOutput, lastPage bool) bool { results = append(results, page.ConfigurationItems...) return !lastPage }) if err != nil { log.Fatalf("error getting resource config history (Input: %v):\n%v\n", input, err) return nil, err } items = append(items, results...) } sortItemSlices(items) return items, nil } // GetStatus ... performs DescribeConfigRuleEvaluationStatus for all config rules func (c *CfgSvc) GetStatus() ([]*configservice.ConfigRuleEvaluationStatus, error) { params := configservice.DescribeConfigRuleEvaluationStatusInput{} result, err := c.Client.DescribeConfigRuleEvaluationStatus(&params) if err != nil { return nil, err } status := result.ConfigRulesEvaluationStatus for aws.StringValue(result.NextToken) != ""
return status, nil } // GetDiscoveredResources ... loops through all specified resourceTypes // Lists all resources by Type (Will need to loop over all cfg.ResourceType* types) // https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources func (c *CfgSvc) GetDiscoveredResources() ([]*configservice.ResourceIdentifier, error) { // List of resource types pulled from // github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json var resourceTypes = [...]string{ "AWS::AppStream::DirectoryConfig", "AWS::AppStream::Application", "AWS::AppFlow::Flow", "AWS::ApiGateway::Stage", "AWS::ApiGateway::RestApi", "AWS::ApiGatewayV2::Stage", "AWS::ApiGatewayV2::Api", "AWS::Athena::WorkGroup", "AWS::Athena::DataCatalog", "AWS::CloudFront::Distribution", "AWS::CloudFront::StreamingDistribution", "AWS::CloudWatch::Alarm", "AWS::CloudWatch::MetricStream", "AWS::RUM::AppMonitor", "AWS::Evidently::Project", "AWS::CodeGuruReviewer::RepositoryAssociation", "AWS::Connect::PhoneNumber", "AWS::CustomerProfiles::Domain", "AWS::Detective::Graph", "AWS::DynamoDB::Table", "AWS::EC2::Host", "AWS::EC2::EIP", "AWS::EC2::Instance", "AWS::EC2::NetworkInterface", "AWS::EC2::SecurityGroup", "AWS::EC2::NatGateway", "AWS::EC2::EgressOnlyInternetGateway", "AWS::EC2::EC2Fleet", "AWS::EC2::SpotFleet", "AWS::EC2::PrefixList", "AWS::EC2::FlowLog", "AWS::EC2::TransitGateway", "AWS::EC2::TransitGatewayAttachment", "AWS::EC2::TransitGatewayRouteTable", "AWS::EC2::VPCEndpoint", "AWS::EC2::VPCEndpointService", "AWS::EC2::VPCPeeringConnection", "AWS::EC2::RegisteredHAInstance", "AWS::EC2::SubnetRouteTableAssociation", "AWS::EC2::LaunchTemplate", "AWS::EC2::NetworkInsightsAccessScopeAnalysis", "AWS::EC2::TrafficMirrorTarget", "AWS::EC2::TrafficMirrorSession", "AWS::EC2::DHCPOptions", "AWS::EC2::IPAM", "AWS::EC2::NetworkInsightsPath", "AWS::EC2::TrafficMirrorFilter", "AWS::EC2::Volume", "AWS::ImageBuilder::ImagePipeline", "AWS::ImageBuilder::DistributionConfiguration", "AWS::ImageBuilder::InfrastructureConfiguration", "AWS::ECR::Repository", "AWS::ECR::RegistryPolicy", "AWS::ECR::PullThroughCacheRule", "AWS::ECR::PublicRepository", "AWS::ECS::Cluster", "AWS::ECS::TaskDefinition", "AWS::ECS::Service", "AWS::ECS::TaskSet", "AWS::EFS::FileSystem", "AWS::EFS::AccessPoint", "AWS::EKS::Cluster", "AWS::EKS::FargateProfile", "AWS::EKS::IdentityProviderConfig", "AWS::EKS::Addon", "AWS::EMR::SecurityConfiguration", "AWS::Events::EventBus", "AWS::Events::ApiDestination", "AWS::Events::Archive", "AWS::Events::Endpoint", "AWS::Events::Connection", "AWS::Events::Rule", "AWS::EC2::TrafficMirrorSession", "AWS::EventSchemas::RegistryPolicy", "AWS::EventSchemas::Discoverer", "AWS::EventSchemas::Schema", "AWS::Forecast::Dataset", "AWS::FraudDetector::Label", "AWS::FraudDetector::EntityType", "AWS::FraudDetector::Variable", "AWS::FraudDetector::Outcome", "AWS::GuardDuty::Detector", "AWS::GuardDuty::ThreatIntelSet", "AWS::GuardDuty::IPSet", "AWS::GuardDuty::Filter", "AWS::HealthLake::FHIRDatastore", "AWS::Cassandra::Keyspace", "AWS::IVS::Channel", "AWS::IVS::RecordingConfiguration", "AWS::IVS::PlaybackKeyPair", "AWS::Elasticsearch::Domain", "AWS::OpenSearch::Domain", "AWS::Elasticsearch::Domain", "AWS::Pinpoint::ApplicationSettings", "AWS::Pinpoint::Segment", "AWS::Pinpoint::App", "AWS::Pinpoint::Campaign", "AWS::Pinpoint::InAppTemplate", "AWS::QLDB::Ledger", "AWS::Kinesis::Stream", "AWS::Kinesis::StreamConsumer", "AWS::KinesisAnalyticsV2::Application", "AWS::KinesisFirehose::DeliveryStream", "AWS::KinesisVideo::SignalingChannel", "AWS::Lex::BotAlias", "AWS::Lex::Bot", "AWS::Lightsail::Disk", "AWS::Lightsail::Certificate", "AWS::Lightsail::Bucket", "AWS::Lightsail::StaticIp", "AWS::LookoutMetrics::Alert", "AWS::LookoutVision::Project", "AWS::AmazonMQ::Broker", "AWS::MSK::Cluster", "AWS::Redshift::Cluster", "AWS::Redshift::ClusterParameterGroup", "AWS::Redshift::ClusterSecurityGroup", "AWS::Redshift::ScheduledAction", "AWS::Redshift::ClusterSnapshot", "AWS::Redshift::ClusterSubnetGroup", "AWS::Redshift::EventSubscription", "AWS::RDS::DBInstance", "AWS::RDS::DBSecurityGroup", "AWS::RDS::DBSnapshot", "AWS::RDS::DBSubnetGroup", "AWS::RDS::EventSubscription", "AWS::RDS::DBCluster", "AWS::RDS::DBClusterSnapshot", "AWS::RDS::GlobalCluster", "AWS::Route53::HostedZone", "AWS::Route53::HealthCheck", "AWS::Route53Resolver::ResolverEndpoint", "AWS::Route53Resolver::ResolverRule", "AWS::Route53Resolver::ResolverRuleAssociation", "AWS::Route53Resolver::FirewallDomainList", "AWS::AWS::Route53Resolver::FirewallRuleGroupAssociation", "AWS::Route53RecoveryReadiness::Cell", "AWS::Route53RecoveryReadiness::ReadinessCheck", "AWS::Route53RecoveryReadiness::RecoveryGroup", "AWS::Route53RecoveryControl::Cluster", "AWS::Route53RecoveryControl::ControlPanel", "AWS::Route53RecoveryControl::RoutingControl", "AWS::Route53RecoveryControl::SafetyRule", "AWS::Route53RecoveryReadiness::ResourceSet", "AWS::SageMaker::CodeRepository", "AWS::SageMaker::Domain", "AWS::SageMaker::AppImageConfig", "AWS::SageMaker::Image", "AWS::SageMaker::Model", "AWS::SageMaker::NotebookInstance", "AWS::SageMaker::NotebookInstanceLifecycleConfig", "AWS::SageMaker::EndpointConfig", "AWS::SageMaker::Workteam", "AWS::SES::ConfigurationSet", "AWS::SES::ContactList", "AWS::SES::Template", "AWS::SES::ReceiptFilter", "AWS::SES::ReceiptRuleSet", "AWS::SNS::Topic", "AWS::SQS::Queue", "AWS::S3::Bucket", "AWS::S3::AccountPublicAccessBlock", "AWS::S3::MultiRegionAccessPoint", "AWS::S3::StorageLens", "AWS::EC2::CustomerGateway", "AWS::EC2::InternetGateway", "AWS::EC2::NetworkAcl", "AWS::EC2::RouteTable", "AWS::EC2::Subnet", "AWS::EC2::VPC", "AWS::EC2::VPNConnection", "AWS::EC2::VPNGateway", "AWS::NetworkManager::TransitGatewayRegistration", "AWS::NetworkManager::Site", "AWS::NetworkManager::Device", "AWS::NetworkManager::Link", "AWS::NetworkManager::GlobalNetwork", "AWS::WorkSpaces::ConnectionAlias", "AWS::WorkSpaces::Workspace", "AWS::Amplify::App", "AWS::AppConfig::Application", "AWS::AppConfig::Environment", "AWS::AppConfig::ConfigurationProfile", "AWS::AppConfig::DeploymentStrategy", "AWS::AppRunner::VpcConnector", "AWS::AppMesh::VirtualNode", "AWS::AppMesh::VirtualService", "AWS::AppSync::GraphQLApi", "AWS::AuditManager::Assessment", "AWS::AutoScaling::AutoScalingGroup", "AWS::AutoScaling::LaunchConfiguration", "AWS::AutoScaling::ScalingPolicy", "AWS::AutoScaling::ScheduledAction", "AWS::AutoScaling::WarmPool", "AWS::Backup::BackupPlan", "AWS::Backup::BackupSelection", "AWS::Backup::BackupVault", "AWS::Backup::RecoveryPoint", "AWS::Backup::ReportPlan", "AWS::Backup::BackupPlan", "AWS::Backup::BackupSelection", "AWS::Backup::BackupVault", "AWS::Backup::RecoveryPoint", "AWS::Batch::JobQueue", "AWS::Batch::ComputeEnvironment", "AWS::Budgets::BudgetsAction", "AWS::ACM::Certificate", "AWS::CloudFormation::Stack", "AWS::CloudTrail::Trail", "AWS::Cloud9::EnvironmentEC2", "AWS::ServiceDiscovery::Service", "AWS::ServiceDiscovery::PublicDnsNamespace", "AWS::ServiceDiscovery::HttpNamespace", "AWS::CodeArtifact::Repository", "AWS::CodeBuild::Project", "AWS::CodeDeploy::Application", "AWS::CodeDeploy::DeploymentConfig", "AWS::CodeDeploy::DeploymentGroup", "AWS::CodePipeline::Pipeline", "AWS::Config::ResourceCompliance", "AWS::Config::ConformancePackCompliance", "AWS::Config::ConfigurationRecorder", "AWS::Config::ResourceCompliance", "AWS::Config::ConfigurationRecorder", "AWS::Config::ConformancePackCompliance", "AWS::Config::ConfigurationRecorder", "AWS::DMS::EventSubscription", "AWS::DMS::ReplicationSubnetGroup", "AWS::DMS::ReplicationInstance", "AWS::DMS::ReplicationTask", "AWS::DMS::Certificate", "AWS::DataSync::LocationSMB", "AWS::DataSync::LocationFSxLustre", "AWS::DataSync::LocationFSxWindows", "AWS::DataSync::LocationS3", "AWS::DataSync::LocationEFS", "AWS::DataSync::LocationNFS", "AWS::DataSync::LocationHDFS", "AWS::DataSync::LocationObjectStorage", "AWS::DataSync::Task", "AWS::DeviceFarm::TestGridProject", "AWS::DeviceFarm::InstanceProfile", "AWS::DeviceFarm::Project", "AWS::ElasticBeanstalk::Application", "AWS::ElasticBeanstalk::ApplicationVersion", "AWS::ElasticBeanstalk::Environment", "AWS::FIS::ExperimentTemplate", "AWS::GlobalAccelerator::Listener", "AWS::GlobalAccelerator::EndpointGroup", "AWS::GlobalAccelerator::Accelerator", "AWS::Glue::Job", "AWS::Glue::Classifier", "AWS::Glue::MLTransform", "AWS::GroundStation::Config", "AWS::IAM::User", "AWS::IAM::SAMLProvider", "AWS::IAM::ServerCertificate", "AWS::IAM::Group", "AWS::IAM::Role", "AWS::IAM::Policy", "AWS::AccessAnalyzer::Analyzer", "AWS::IoT::Authorizer", "AWS::IoT::SecurityProfile", "AWS::IoT::RoleAlias", "AWS::IoT::Dimension", "AWS::IoT::Policy", "AWS::IoT::MitigationAction", "AWS::IoT::ScheduledAudit", "AWS::IoT::AccountAuditConfiguration", "AWS::IoTSiteWise::Gateway", "AWS::IoT::CustomMetric", "AWS::IoTWireless::ServiceProfile", "AWS::IoT::FleetMetric", "AWS::IoTAnalytics::Datastore", "AWS::IoTAnalytics::Dataset", "AWS::IoTAnalytics::Pipeline", "AWS::IoTAnalytics::Channel", "AWS::IoTEvents::Input", "AWS::IoTEvents::DetectorModel", "AWS::IoTEvents::AlarmModel", "AWS::IoTTwinMaker::Workspace", "AWS::IoTTwinMaker::Entity", "AWS::IoTTwinMaker::Scene", "AWS::IoTSiteWise::Dashboard", "AWS::IoTSiteWise::Project", "AWS::IoTSiteWise::Portal", "AWS::IoTSiteWise::AssetModel", "AWS::KMS::Key", "AWS::KMS::Alias", "AWS::Lambda::Function", "AWS::Lambda::Alias", "AWS::NetworkFirewall::Firewall", "AWS::NetworkFirewall::FirewallPolicy", "AWS::NetworkFirewall::RuleGroup", "AWS::NetworkFirewall::TLSInspectionConfiguration", "AWS:Panorama::Package", "AWS::ResilienceHub::ResiliencyPolicy", "AWS::RoboMaker::RobotApplicationVersion", "AWS::RoboMaker::RobotApplication", "AWS::RoboMaker::SimulationApplication", "AWS::Signer::SigningProfile", "AWS::SecretsManager::Secret", "AWS::ServiceCatalog::CloudFormationProduct", "AWS::ServiceCatalog::CloudFormationProvisionedProduct", "AWS::ServiceCatalog::Portfolio", "AWS::Shield::Protection", "AWS::ShieldRegional::Protection", "AWS::StepFunctions::Activity", "AWS::StepFunctions::StateMachine", "AWS::SSM::ManagedInstanceInventory", "AWS::SSM::PatchCompliance", "AWS::SSM::AssociationCompliance", "AWS::SSM::FileData", "AWS::Transfer::Agreement", "AWS::Transfer::Connector", "AWS::Transfer::Workflow", "AWS::WAF::RateBasedRule", "AWS::WAF::Rule", "AWS::WAF::WebACL", "AWS::WAF::RuleGroup", "AWS::WAFRegional::RateBasedRule", "AWS::WAFRegional::Rule", "AWS::WAFRegional::WebACL", "AWS::WAFRegional::RuleGroup", "AWS::WAFv2::WebACL", "AWS::WAFv2::RuleGroup", "AWS::WAFv2::ManagedRuleSet", "AWS::WAFv2::IPSet", "AWS::WAFv2::RegexPatternSet", "AWS::XRay::EncryptionConfig", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::ElasticLoadBalancingV2::Listener", "AWS::ElasticLoadBalancing::LoadBalancer", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::MediaPackage::PackagingGroup", "AWS::MediaPackage::PackagingConfiguration", } // nolint: prealloc var res []*configservice.ResourceIdentifier for _, t := range &resourceTypes { t := t input := &configservice.ListDiscoveredResourcesInput{ ResourceType: aws.String(t), } result, err := c.Client.ListDiscoveredResources(input) if err != nil { log.Fatalf("Error ListDiscoveredResources (ResourceType: %s): %v\n", t, err) return nil, err } res = append(res, result.ResourceIdentifiers...) for aws.StringValue(result.NextToken) != "" { input.NextToken = result.NextToken result, err = c.Client.ListDiscoveredResources(input) if err != nil { log.Fatalf("Error ListDiscoveredResources (Input: %v): %v\n", input, err) return nil, err } res = append(res, result.ResourceIdentifiers...) } } return res, nil } // getSnapshotOfItem ... finds ConfigurationItem in Snaphot with matching ResourceId and ResourceType func getSnapshotOfItem(item map[string]interface{}, snapshots []map[string]interface{}) map[string]interface{} { id := item["ResourceId"] resType := item["ResourceType"] for _, s := range snapshots { m := s if id == m["ResourceId"].(string) && resType == m["ResourceType"].(string) { return m } } return nil } // getPreviousSnapshot ... gets the name of the config snapshot bucket object // created prior to the lastExecution time // Assumes snapshots are taken every three hours - gets snapshot older than // lastExecution time but less than three hours before lastExecution time func getPreviousSnapshot( items []*configservice.ConfigurationItem, t time.Time, bucket, region string, svc s3iface.S3API) (*s3.Object, string, error) { // Get time from three hours before change...since snapshots are taken every // three hours, this will ensure we are looking in the correct folder by date prevTime := t.Add(time.Hour * time.Duration(-snapshotFrequency)) year, month, day := prevTime.Date() account := aws.StringValue(items[0].AccountId) prefix := strings.Join([]string{ "awsconfig", "AWSLogs", account, "Config", region, strconv.Itoa(year), strconv.Itoa(int(month)), strconv.Itoa(day), "ConfigSnapshot", }, "/") input := &s3.ListObjectsInput{ Bucket: aws.String(bucket), Prefix: aws.String(prefix), } results, err := svc.ListObjects(input) if err != nil { return nil, "", err } for _, o := range results.Contents { m := aws.TimeValue(o.LastModified) if m.After(prevTime) && m.Before(t) { return getSnapshot(svc, bucket, o) } } return nil, "", errors.New("snapshot not found") } func getSnapshot(svc s3iface.S3API, bucket string, o *s3.Object) (*s3.Object, string, error) { input := &s3.GetObjectInput{ Bucket: aws.String(bucket), Key: o.Key, } result, err := svc.GetObject(input) if err != nil { return o, "", err } defer result.Body.Close() b := bytes.Buffer{} if _, err := io.Copy(&b, result.Body); err != nil { return o, "", err } return o, b.String(), nil } func getSess() (config, *session.Session, error) { cfg := config{} err := env.Parse(&cfg) if err != nil { log.Fatalf("error parsing env config: %v", err) return cfg, nil, err } sess, err := session.NewSession( &aws.Config{ Region: aws.String(cfg.DefaultRegion), MaxRetries: aws.Int(maxRetries), }) if err != nil { log.Fatalf("error creating new session: %v\n", err) return cfg, nil, err } return cfg, sess, nil }
{ params.NextToken = result.NextToken result, err = c.Client.DescribeConfigRuleEvaluationStatus(&params) if err != nil { return nil, err } status = append(status, result.ConfigRulesEvaluationStatus...) }
conditional_block
get.go
package main import ( "bytes" "errors" "io" "log" "sort" "strconv" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/configservice" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/caarlos0/env" ) const ( maxRetries = 20 snapshotFrequency = 3 // frequency of ConfigSnapshots in hours ) // sortItemSlices ... Sorts attributes of ConfigrationItems that are slices func sortItemSlices(i []*configservice.ConfigurationItem) { for _, v := range i { e := v.RelatedEvents r := v.Relationships sort.SliceStable(e, func(i, j int) bool { return sliceSorter(e[i], e[j]) }) sort.SliceStable(r, func(i, j int) bool { return sliceSorter(r[i], r[j]) }) v.RelatedEvents = e v.Relationships = r } } // GetLastExecution ... gets the time of the most recent execution of all config services func (c *CfgSvc) GetLastExecution() (time.Time, error) { t := time.Time{} stats, err := c.GetStatus() if err != nil { return t, err } if len(stats) == 0 { return t, errors.New("empty config rule evaluation status array") } for _, s := range stats { if t.Before(aws.TimeValue(s.LastSuccessfulEvaluationTime)) { t = aws.TimeValue(s.LastSuccessfulEvaluationTime) } } return t, nil } // GetItems ... gets AWS Config Service Configuration Items from resource history pages func (c *CfgSvc) GetItems(lastExecution time.Time) (items []*configservice.ConfigurationItem, err error) { res, err := c.GetDiscoveredResources() if err != nil { log.Fatalf("Error getting discovered resources: %v\n", err) return nil, err } for _, r := range res { var results []*configservice.ConfigurationItem input := &configservice.GetResourceConfigHistoryInput{ ResourceType: r.ResourceType, ResourceId: r.ResourceId, EarlierTime: aws.Time(lastExecution.Add(time.Minute * time.Duration(-window))), LaterTime: aws.Time(lastExecution.Add(time.Minute * time.Duration(window))), } err := c.Client.GetResourceConfigHistoryPages(input, func(page *configservice.GetResourceConfigHistoryOutput, lastPage bool) bool { results = append(results, page.ConfigurationItems...) return !lastPage }) if err != nil { log.Fatalf("error getting resource config history (Input: %v):\n%v\n", input, err) return nil, err } items = append(items, results...) } sortItemSlices(items) return items, nil } // GetStatus ... performs DescribeConfigRuleEvaluationStatus for all config rules func (c *CfgSvc) GetStatus() ([]*configservice.ConfigRuleEvaluationStatus, error) { params := configservice.DescribeConfigRuleEvaluationStatusInput{} result, err := c.Client.DescribeConfigRuleEvaluationStatus(&params) if err != nil { return nil, err } status := result.ConfigRulesEvaluationStatus for aws.StringValue(result.NextToken) != "" { params.NextToken = result.NextToken result, err = c.Client.DescribeConfigRuleEvaluationStatus(&params) if err != nil { return nil, err } status = append(status, result.ConfigRulesEvaluationStatus...) } return status, nil } // GetDiscoveredResources ... loops through all specified resourceTypes // Lists all resources by Type (Will need to loop over all cfg.ResourceType* types) // https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources func (c *CfgSvc) GetDiscoveredResources() ([]*configservice.ResourceIdentifier, error) { // List of resource types pulled from // github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json var resourceTypes = [...]string{ "AWS::AppStream::DirectoryConfig", "AWS::AppStream::Application", "AWS::AppFlow::Flow", "AWS::ApiGateway::Stage", "AWS::ApiGateway::RestApi", "AWS::ApiGatewayV2::Stage", "AWS::ApiGatewayV2::Api", "AWS::Athena::WorkGroup", "AWS::Athena::DataCatalog", "AWS::CloudFront::Distribution", "AWS::CloudFront::StreamingDistribution", "AWS::CloudWatch::Alarm", "AWS::CloudWatch::MetricStream", "AWS::RUM::AppMonitor", "AWS::Evidently::Project", "AWS::CodeGuruReviewer::RepositoryAssociation", "AWS::Connect::PhoneNumber", "AWS::CustomerProfiles::Domain", "AWS::Detective::Graph", "AWS::DynamoDB::Table", "AWS::EC2::Host", "AWS::EC2::EIP", "AWS::EC2::Instance", "AWS::EC2::NetworkInterface", "AWS::EC2::SecurityGroup", "AWS::EC2::NatGateway", "AWS::EC2::EgressOnlyInternetGateway", "AWS::EC2::EC2Fleet", "AWS::EC2::SpotFleet", "AWS::EC2::PrefixList", "AWS::EC2::FlowLog", "AWS::EC2::TransitGateway", "AWS::EC2::TransitGatewayAttachment", "AWS::EC2::TransitGatewayRouteTable", "AWS::EC2::VPCEndpoint", "AWS::EC2::VPCEndpointService", "AWS::EC2::VPCPeeringConnection", "AWS::EC2::RegisteredHAInstance", "AWS::EC2::SubnetRouteTableAssociation", "AWS::EC2::LaunchTemplate", "AWS::EC2::NetworkInsightsAccessScopeAnalysis", "AWS::EC2::TrafficMirrorTarget", "AWS::EC2::TrafficMirrorSession", "AWS::EC2::DHCPOptions", "AWS::EC2::IPAM", "AWS::EC2::NetworkInsightsPath", "AWS::EC2::TrafficMirrorFilter", "AWS::EC2::Volume", "AWS::ImageBuilder::ImagePipeline", "AWS::ImageBuilder::DistributionConfiguration", "AWS::ImageBuilder::InfrastructureConfiguration", "AWS::ECR::Repository", "AWS::ECR::RegistryPolicy", "AWS::ECR::PullThroughCacheRule", "AWS::ECR::PublicRepository", "AWS::ECS::Cluster", "AWS::ECS::TaskDefinition", "AWS::ECS::Service", "AWS::ECS::TaskSet", "AWS::EFS::FileSystem", "AWS::EFS::AccessPoint", "AWS::EKS::Cluster", "AWS::EKS::FargateProfile", "AWS::EKS::IdentityProviderConfig", "AWS::EKS::Addon", "AWS::EMR::SecurityConfiguration", "AWS::Events::EventBus", "AWS::Events::ApiDestination", "AWS::Events::Archive", "AWS::Events::Endpoint", "AWS::Events::Connection", "AWS::Events::Rule", "AWS::EC2::TrafficMirrorSession", "AWS::EventSchemas::RegistryPolicy", "AWS::EventSchemas::Discoverer", "AWS::EventSchemas::Schema", "AWS::Forecast::Dataset", "AWS::FraudDetector::Label", "AWS::FraudDetector::EntityType", "AWS::FraudDetector::Variable", "AWS::FraudDetector::Outcome", "AWS::GuardDuty::Detector", "AWS::GuardDuty::ThreatIntelSet", "AWS::GuardDuty::IPSet", "AWS::GuardDuty::Filter", "AWS::HealthLake::FHIRDatastore", "AWS::Cassandra::Keyspace", "AWS::IVS::Channel", "AWS::IVS::RecordingConfiguration", "AWS::IVS::PlaybackKeyPair", "AWS::Elasticsearch::Domain", "AWS::OpenSearch::Domain", "AWS::Elasticsearch::Domain", "AWS::Pinpoint::ApplicationSettings", "AWS::Pinpoint::Segment", "AWS::Pinpoint::App", "AWS::Pinpoint::Campaign", "AWS::Pinpoint::InAppTemplate", "AWS::QLDB::Ledger", "AWS::Kinesis::Stream", "AWS::Kinesis::StreamConsumer", "AWS::KinesisAnalyticsV2::Application", "AWS::KinesisFirehose::DeliveryStream", "AWS::KinesisVideo::SignalingChannel", "AWS::Lex::BotAlias", "AWS::Lex::Bot", "AWS::Lightsail::Disk", "AWS::Lightsail::Certificate", "AWS::Lightsail::Bucket", "AWS::Lightsail::StaticIp", "AWS::LookoutMetrics::Alert", "AWS::LookoutVision::Project", "AWS::AmazonMQ::Broker", "AWS::MSK::Cluster", "AWS::Redshift::Cluster", "AWS::Redshift::ClusterParameterGroup", "AWS::Redshift::ClusterSecurityGroup", "AWS::Redshift::ScheduledAction", "AWS::Redshift::ClusterSnapshot", "AWS::Redshift::ClusterSubnetGroup", "AWS::Redshift::EventSubscription", "AWS::RDS::DBInstance", "AWS::RDS::DBSecurityGroup", "AWS::RDS::DBSnapshot", "AWS::RDS::DBSubnetGroup", "AWS::RDS::EventSubscription", "AWS::RDS::DBCluster", "AWS::RDS::DBClusterSnapshot", "AWS::RDS::GlobalCluster", "AWS::Route53::HostedZone", "AWS::Route53::HealthCheck", "AWS::Route53Resolver::ResolverEndpoint", "AWS::Route53Resolver::ResolverRule", "AWS::Route53Resolver::ResolverRuleAssociation", "AWS::Route53Resolver::FirewallDomainList", "AWS::AWS::Route53Resolver::FirewallRuleGroupAssociation", "AWS::Route53RecoveryReadiness::Cell", "AWS::Route53RecoveryReadiness::ReadinessCheck", "AWS::Route53RecoveryReadiness::RecoveryGroup", "AWS::Route53RecoveryControl::Cluster", "AWS::Route53RecoveryControl::ControlPanel", "AWS::Route53RecoveryControl::RoutingControl", "AWS::Route53RecoveryControl::SafetyRule", "AWS::Route53RecoveryReadiness::ResourceSet", "AWS::SageMaker::CodeRepository", "AWS::SageMaker::Domain", "AWS::SageMaker::AppImageConfig", "AWS::SageMaker::Image", "AWS::SageMaker::Model", "AWS::SageMaker::NotebookInstance", "AWS::SageMaker::NotebookInstanceLifecycleConfig", "AWS::SageMaker::EndpointConfig", "AWS::SageMaker::Workteam", "AWS::SES::ConfigurationSet", "AWS::SES::ContactList", "AWS::SES::Template", "AWS::SES::ReceiptFilter", "AWS::SES::ReceiptRuleSet", "AWS::SNS::Topic", "AWS::SQS::Queue", "AWS::S3::Bucket", "AWS::S3::AccountPublicAccessBlock", "AWS::S3::MultiRegionAccessPoint", "AWS::S3::StorageLens", "AWS::EC2::CustomerGateway", "AWS::EC2::InternetGateway", "AWS::EC2::NetworkAcl", "AWS::EC2::RouteTable", "AWS::EC2::Subnet", "AWS::EC2::VPC", "AWS::EC2::VPNConnection", "AWS::EC2::VPNGateway", "AWS::NetworkManager::TransitGatewayRegistration", "AWS::NetworkManager::Site", "AWS::NetworkManager::Device", "AWS::NetworkManager::Link", "AWS::NetworkManager::GlobalNetwork", "AWS::WorkSpaces::ConnectionAlias", "AWS::WorkSpaces::Workspace", "AWS::Amplify::App", "AWS::AppConfig::Application", "AWS::AppConfig::Environment", "AWS::AppConfig::ConfigurationProfile", "AWS::AppConfig::DeploymentStrategy", "AWS::AppRunner::VpcConnector", "AWS::AppMesh::VirtualNode", "AWS::AppMesh::VirtualService", "AWS::AppSync::GraphQLApi", "AWS::AuditManager::Assessment", "AWS::AutoScaling::AutoScalingGroup", "AWS::AutoScaling::LaunchConfiguration", "AWS::AutoScaling::ScalingPolicy", "AWS::AutoScaling::ScheduledAction", "AWS::AutoScaling::WarmPool", "AWS::Backup::BackupPlan", "AWS::Backup::BackupSelection", "AWS::Backup::BackupVault", "AWS::Backup::RecoveryPoint", "AWS::Backup::ReportPlan", "AWS::Backup::BackupPlan", "AWS::Backup::BackupSelection", "AWS::Backup::BackupVault", "AWS::Backup::RecoveryPoint", "AWS::Batch::JobQueue", "AWS::Batch::ComputeEnvironment", "AWS::Budgets::BudgetsAction", "AWS::ACM::Certificate", "AWS::CloudFormation::Stack", "AWS::CloudTrail::Trail", "AWS::Cloud9::EnvironmentEC2", "AWS::ServiceDiscovery::Service", "AWS::ServiceDiscovery::PublicDnsNamespace", "AWS::ServiceDiscovery::HttpNamespace", "AWS::CodeArtifact::Repository", "AWS::CodeBuild::Project", "AWS::CodeDeploy::Application", "AWS::CodeDeploy::DeploymentConfig", "AWS::CodeDeploy::DeploymentGroup", "AWS::CodePipeline::Pipeline", "AWS::Config::ResourceCompliance", "AWS::Config::ConformancePackCompliance", "AWS::Config::ConfigurationRecorder", "AWS::Config::ResourceCompliance", "AWS::Config::ConfigurationRecorder", "AWS::Config::ConformancePackCompliance", "AWS::Config::ConfigurationRecorder", "AWS::DMS::EventSubscription", "AWS::DMS::ReplicationSubnetGroup", "AWS::DMS::ReplicationInstance", "AWS::DMS::ReplicationTask", "AWS::DMS::Certificate", "AWS::DataSync::LocationSMB", "AWS::DataSync::LocationFSxLustre", "AWS::DataSync::LocationFSxWindows", "AWS::DataSync::LocationS3", "AWS::DataSync::LocationEFS", "AWS::DataSync::LocationNFS", "AWS::DataSync::LocationHDFS",
"AWS::DeviceFarm::TestGridProject", "AWS::DeviceFarm::InstanceProfile", "AWS::DeviceFarm::Project", "AWS::ElasticBeanstalk::Application", "AWS::ElasticBeanstalk::ApplicationVersion", "AWS::ElasticBeanstalk::Environment", "AWS::FIS::ExperimentTemplate", "AWS::GlobalAccelerator::Listener", "AWS::GlobalAccelerator::EndpointGroup", "AWS::GlobalAccelerator::Accelerator", "AWS::Glue::Job", "AWS::Glue::Classifier", "AWS::Glue::MLTransform", "AWS::GroundStation::Config", "AWS::IAM::User", "AWS::IAM::SAMLProvider", "AWS::IAM::ServerCertificate", "AWS::IAM::Group", "AWS::IAM::Role", "AWS::IAM::Policy", "AWS::AccessAnalyzer::Analyzer", "AWS::IoT::Authorizer", "AWS::IoT::SecurityProfile", "AWS::IoT::RoleAlias", "AWS::IoT::Dimension", "AWS::IoT::Policy", "AWS::IoT::MitigationAction", "AWS::IoT::ScheduledAudit", "AWS::IoT::AccountAuditConfiguration", "AWS::IoTSiteWise::Gateway", "AWS::IoT::CustomMetric", "AWS::IoTWireless::ServiceProfile", "AWS::IoT::FleetMetric", "AWS::IoTAnalytics::Datastore", "AWS::IoTAnalytics::Dataset", "AWS::IoTAnalytics::Pipeline", "AWS::IoTAnalytics::Channel", "AWS::IoTEvents::Input", "AWS::IoTEvents::DetectorModel", "AWS::IoTEvents::AlarmModel", "AWS::IoTTwinMaker::Workspace", "AWS::IoTTwinMaker::Entity", "AWS::IoTTwinMaker::Scene", "AWS::IoTSiteWise::Dashboard", "AWS::IoTSiteWise::Project", "AWS::IoTSiteWise::Portal", "AWS::IoTSiteWise::AssetModel", "AWS::KMS::Key", "AWS::KMS::Alias", "AWS::Lambda::Function", "AWS::Lambda::Alias", "AWS::NetworkFirewall::Firewall", "AWS::NetworkFirewall::FirewallPolicy", "AWS::NetworkFirewall::RuleGroup", "AWS::NetworkFirewall::TLSInspectionConfiguration", "AWS:Panorama::Package", "AWS::ResilienceHub::ResiliencyPolicy", "AWS::RoboMaker::RobotApplicationVersion", "AWS::RoboMaker::RobotApplication", "AWS::RoboMaker::SimulationApplication", "AWS::Signer::SigningProfile", "AWS::SecretsManager::Secret", "AWS::ServiceCatalog::CloudFormationProduct", "AWS::ServiceCatalog::CloudFormationProvisionedProduct", "AWS::ServiceCatalog::Portfolio", "AWS::Shield::Protection", "AWS::ShieldRegional::Protection", "AWS::StepFunctions::Activity", "AWS::StepFunctions::StateMachine", "AWS::SSM::ManagedInstanceInventory", "AWS::SSM::PatchCompliance", "AWS::SSM::AssociationCompliance", "AWS::SSM::FileData", "AWS::Transfer::Agreement", "AWS::Transfer::Connector", "AWS::Transfer::Workflow", "AWS::WAF::RateBasedRule", "AWS::WAF::Rule", "AWS::WAF::WebACL", "AWS::WAF::RuleGroup", "AWS::WAFRegional::RateBasedRule", "AWS::WAFRegional::Rule", "AWS::WAFRegional::WebACL", "AWS::WAFRegional::RuleGroup", "AWS::WAFv2::WebACL", "AWS::WAFv2::RuleGroup", "AWS::WAFv2::ManagedRuleSet", "AWS::WAFv2::IPSet", "AWS::WAFv2::RegexPatternSet", "AWS::XRay::EncryptionConfig", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::ElasticLoadBalancingV2::Listener", "AWS::ElasticLoadBalancing::LoadBalancer", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::MediaPackage::PackagingGroup", "AWS::MediaPackage::PackagingConfiguration", } // nolint: prealloc var res []*configservice.ResourceIdentifier for _, t := range &resourceTypes { t := t input := &configservice.ListDiscoveredResourcesInput{ ResourceType: aws.String(t), } result, err := c.Client.ListDiscoveredResources(input) if err != nil { log.Fatalf("Error ListDiscoveredResources (ResourceType: %s): %v\n", t, err) return nil, err } res = append(res, result.ResourceIdentifiers...) for aws.StringValue(result.NextToken) != "" { input.NextToken = result.NextToken result, err = c.Client.ListDiscoveredResources(input) if err != nil { log.Fatalf("Error ListDiscoveredResources (Input: %v): %v\n", input, err) return nil, err } res = append(res, result.ResourceIdentifiers...) } } return res, nil } // getSnapshotOfItem ... finds ConfigurationItem in Snaphot with matching ResourceId and ResourceType func getSnapshotOfItem(item map[string]interface{}, snapshots []map[string]interface{}) map[string]interface{} { id := item["ResourceId"] resType := item["ResourceType"] for _, s := range snapshots { m := s if id == m["ResourceId"].(string) && resType == m["ResourceType"].(string) { return m } } return nil } // getPreviousSnapshot ... gets the name of the config snapshot bucket object // created prior to the lastExecution time // Assumes snapshots are taken every three hours - gets snapshot older than // lastExecution time but less than three hours before lastExecution time func getPreviousSnapshot( items []*configservice.ConfigurationItem, t time.Time, bucket, region string, svc s3iface.S3API) (*s3.Object, string, error) { // Get time from three hours before change...since snapshots are taken every // three hours, this will ensure we are looking in the correct folder by date prevTime := t.Add(time.Hour * time.Duration(-snapshotFrequency)) year, month, day := prevTime.Date() account := aws.StringValue(items[0].AccountId) prefix := strings.Join([]string{ "awsconfig", "AWSLogs", account, "Config", region, strconv.Itoa(year), strconv.Itoa(int(month)), strconv.Itoa(day), "ConfigSnapshot", }, "/") input := &s3.ListObjectsInput{ Bucket: aws.String(bucket), Prefix: aws.String(prefix), } results, err := svc.ListObjects(input) if err != nil { return nil, "", err } for _, o := range results.Contents { m := aws.TimeValue(o.LastModified) if m.After(prevTime) && m.Before(t) { return getSnapshot(svc, bucket, o) } } return nil, "", errors.New("snapshot not found") } func getSnapshot(svc s3iface.S3API, bucket string, o *s3.Object) (*s3.Object, string, error) { input := &s3.GetObjectInput{ Bucket: aws.String(bucket), Key: o.Key, } result, err := svc.GetObject(input) if err != nil { return o, "", err } defer result.Body.Close() b := bytes.Buffer{} if _, err := io.Copy(&b, result.Body); err != nil { return o, "", err } return o, b.String(), nil } func getSess() (config, *session.Session, error) { cfg := config{} err := env.Parse(&cfg) if err != nil { log.Fatalf("error parsing env config: %v", err) return cfg, nil, err } sess, err := session.NewSession( &aws.Config{ Region: aws.String(cfg.DefaultRegion), MaxRetries: aws.Int(maxRetries), }) if err != nil { log.Fatalf("error creating new session: %v\n", err) return cfg, nil, err } return cfg, sess, nil }
"AWS::DataSync::LocationObjectStorage", "AWS::DataSync::Task",
random_line_split
get.go
package main import ( "bytes" "errors" "io" "log" "sort" "strconv" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/configservice" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/caarlos0/env" ) const ( maxRetries = 20 snapshotFrequency = 3 // frequency of ConfigSnapshots in hours ) // sortItemSlices ... Sorts attributes of ConfigrationItems that are slices func sortItemSlices(i []*configservice.ConfigurationItem)
// GetLastExecution ... gets the time of the most recent execution of all config services func (c *CfgSvc) GetLastExecution() (time.Time, error) { t := time.Time{} stats, err := c.GetStatus() if err != nil { return t, err } if len(stats) == 0 { return t, errors.New("empty config rule evaluation status array") } for _, s := range stats { if t.Before(aws.TimeValue(s.LastSuccessfulEvaluationTime)) { t = aws.TimeValue(s.LastSuccessfulEvaluationTime) } } return t, nil } // GetItems ... gets AWS Config Service Configuration Items from resource history pages func (c *CfgSvc) GetItems(lastExecution time.Time) (items []*configservice.ConfigurationItem, err error) { res, err := c.GetDiscoveredResources() if err != nil { log.Fatalf("Error getting discovered resources: %v\n", err) return nil, err } for _, r := range res { var results []*configservice.ConfigurationItem input := &configservice.GetResourceConfigHistoryInput{ ResourceType: r.ResourceType, ResourceId: r.ResourceId, EarlierTime: aws.Time(lastExecution.Add(time.Minute * time.Duration(-window))), LaterTime: aws.Time(lastExecution.Add(time.Minute * time.Duration(window))), } err := c.Client.GetResourceConfigHistoryPages(input, func(page *configservice.GetResourceConfigHistoryOutput, lastPage bool) bool { results = append(results, page.ConfigurationItems...) return !lastPage }) if err != nil { log.Fatalf("error getting resource config history (Input: %v):\n%v\n", input, err) return nil, err } items = append(items, results...) } sortItemSlices(items) return items, nil } // GetStatus ... performs DescribeConfigRuleEvaluationStatus for all config rules func (c *CfgSvc) GetStatus() ([]*configservice.ConfigRuleEvaluationStatus, error) { params := configservice.DescribeConfigRuleEvaluationStatusInput{} result, err := c.Client.DescribeConfigRuleEvaluationStatus(&params) if err != nil { return nil, err } status := result.ConfigRulesEvaluationStatus for aws.StringValue(result.NextToken) != "" { params.NextToken = result.NextToken result, err = c.Client.DescribeConfigRuleEvaluationStatus(&params) if err != nil { return nil, err } status = append(status, result.ConfigRulesEvaluationStatus...) } return status, nil } // GetDiscoveredResources ... loops through all specified resourceTypes // Lists all resources by Type (Will need to loop over all cfg.ResourceType* types) // https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources func (c *CfgSvc) GetDiscoveredResources() ([]*configservice.ResourceIdentifier, error) { // List of resource types pulled from // github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json var resourceTypes = [...]string{ "AWS::AppStream::DirectoryConfig", "AWS::AppStream::Application", "AWS::AppFlow::Flow", "AWS::ApiGateway::Stage", "AWS::ApiGateway::RestApi", "AWS::ApiGatewayV2::Stage", "AWS::ApiGatewayV2::Api", "AWS::Athena::WorkGroup", "AWS::Athena::DataCatalog", "AWS::CloudFront::Distribution", "AWS::CloudFront::StreamingDistribution", "AWS::CloudWatch::Alarm", "AWS::CloudWatch::MetricStream", "AWS::RUM::AppMonitor", "AWS::Evidently::Project", "AWS::CodeGuruReviewer::RepositoryAssociation", "AWS::Connect::PhoneNumber", "AWS::CustomerProfiles::Domain", "AWS::Detective::Graph", "AWS::DynamoDB::Table", "AWS::EC2::Host", "AWS::EC2::EIP", "AWS::EC2::Instance", "AWS::EC2::NetworkInterface", "AWS::EC2::SecurityGroup", "AWS::EC2::NatGateway", "AWS::EC2::EgressOnlyInternetGateway", "AWS::EC2::EC2Fleet", "AWS::EC2::SpotFleet", "AWS::EC2::PrefixList", "AWS::EC2::FlowLog", "AWS::EC2::TransitGateway", "AWS::EC2::TransitGatewayAttachment", "AWS::EC2::TransitGatewayRouteTable", "AWS::EC2::VPCEndpoint", "AWS::EC2::VPCEndpointService", "AWS::EC2::VPCPeeringConnection", "AWS::EC2::RegisteredHAInstance", "AWS::EC2::SubnetRouteTableAssociation", "AWS::EC2::LaunchTemplate", "AWS::EC2::NetworkInsightsAccessScopeAnalysis", "AWS::EC2::TrafficMirrorTarget", "AWS::EC2::TrafficMirrorSession", "AWS::EC2::DHCPOptions", "AWS::EC2::IPAM", "AWS::EC2::NetworkInsightsPath", "AWS::EC2::TrafficMirrorFilter", "AWS::EC2::Volume", "AWS::ImageBuilder::ImagePipeline", "AWS::ImageBuilder::DistributionConfiguration", "AWS::ImageBuilder::InfrastructureConfiguration", "AWS::ECR::Repository", "AWS::ECR::RegistryPolicy", "AWS::ECR::PullThroughCacheRule", "AWS::ECR::PublicRepository", "AWS::ECS::Cluster", "AWS::ECS::TaskDefinition", "AWS::ECS::Service", "AWS::ECS::TaskSet", "AWS::EFS::FileSystem", "AWS::EFS::AccessPoint", "AWS::EKS::Cluster", "AWS::EKS::FargateProfile", "AWS::EKS::IdentityProviderConfig", "AWS::EKS::Addon", "AWS::EMR::SecurityConfiguration", "AWS::Events::EventBus", "AWS::Events::ApiDestination", "AWS::Events::Archive", "AWS::Events::Endpoint", "AWS::Events::Connection", "AWS::Events::Rule", "AWS::EC2::TrafficMirrorSession", "AWS::EventSchemas::RegistryPolicy", "AWS::EventSchemas::Discoverer", "AWS::EventSchemas::Schema", "AWS::Forecast::Dataset", "AWS::FraudDetector::Label", "AWS::FraudDetector::EntityType", "AWS::FraudDetector::Variable", "AWS::FraudDetector::Outcome", "AWS::GuardDuty::Detector", "AWS::GuardDuty::ThreatIntelSet", "AWS::GuardDuty::IPSet", "AWS::GuardDuty::Filter", "AWS::HealthLake::FHIRDatastore", "AWS::Cassandra::Keyspace", "AWS::IVS::Channel", "AWS::IVS::RecordingConfiguration", "AWS::IVS::PlaybackKeyPair", "AWS::Elasticsearch::Domain", "AWS::OpenSearch::Domain", "AWS::Elasticsearch::Domain", "AWS::Pinpoint::ApplicationSettings", "AWS::Pinpoint::Segment", "AWS::Pinpoint::App", "AWS::Pinpoint::Campaign", "AWS::Pinpoint::InAppTemplate", "AWS::QLDB::Ledger", "AWS::Kinesis::Stream", "AWS::Kinesis::StreamConsumer", "AWS::KinesisAnalyticsV2::Application", "AWS::KinesisFirehose::DeliveryStream", "AWS::KinesisVideo::SignalingChannel", "AWS::Lex::BotAlias", "AWS::Lex::Bot", "AWS::Lightsail::Disk", "AWS::Lightsail::Certificate", "AWS::Lightsail::Bucket", "AWS::Lightsail::StaticIp", "AWS::LookoutMetrics::Alert", "AWS::LookoutVision::Project", "AWS::AmazonMQ::Broker", "AWS::MSK::Cluster", "AWS::Redshift::Cluster", "AWS::Redshift::ClusterParameterGroup", "AWS::Redshift::ClusterSecurityGroup", "AWS::Redshift::ScheduledAction", "AWS::Redshift::ClusterSnapshot", "AWS::Redshift::ClusterSubnetGroup", "AWS::Redshift::EventSubscription", "AWS::RDS::DBInstance", "AWS::RDS::DBSecurityGroup", "AWS::RDS::DBSnapshot", "AWS::RDS::DBSubnetGroup", "AWS::RDS::EventSubscription", "AWS::RDS::DBCluster", "AWS::RDS::DBClusterSnapshot", "AWS::RDS::GlobalCluster", "AWS::Route53::HostedZone", "AWS::Route53::HealthCheck", "AWS::Route53Resolver::ResolverEndpoint", "AWS::Route53Resolver::ResolverRule", "AWS::Route53Resolver::ResolverRuleAssociation", "AWS::Route53Resolver::FirewallDomainList", "AWS::AWS::Route53Resolver::FirewallRuleGroupAssociation", "AWS::Route53RecoveryReadiness::Cell", "AWS::Route53RecoveryReadiness::ReadinessCheck", "AWS::Route53RecoveryReadiness::RecoveryGroup", "AWS::Route53RecoveryControl::Cluster", "AWS::Route53RecoveryControl::ControlPanel", "AWS::Route53RecoveryControl::RoutingControl", "AWS::Route53RecoveryControl::SafetyRule", "AWS::Route53RecoveryReadiness::ResourceSet", "AWS::SageMaker::CodeRepository", "AWS::SageMaker::Domain", "AWS::SageMaker::AppImageConfig", "AWS::SageMaker::Image", "AWS::SageMaker::Model", "AWS::SageMaker::NotebookInstance", "AWS::SageMaker::NotebookInstanceLifecycleConfig", "AWS::SageMaker::EndpointConfig", "AWS::SageMaker::Workteam", "AWS::SES::ConfigurationSet", "AWS::SES::ContactList", "AWS::SES::Template", "AWS::SES::ReceiptFilter", "AWS::SES::ReceiptRuleSet", "AWS::SNS::Topic", "AWS::SQS::Queue", "AWS::S3::Bucket", "AWS::S3::AccountPublicAccessBlock", "AWS::S3::MultiRegionAccessPoint", "AWS::S3::StorageLens", "AWS::EC2::CustomerGateway", "AWS::EC2::InternetGateway", "AWS::EC2::NetworkAcl", "AWS::EC2::RouteTable", "AWS::EC2::Subnet", "AWS::EC2::VPC", "AWS::EC2::VPNConnection", "AWS::EC2::VPNGateway", "AWS::NetworkManager::TransitGatewayRegistration", "AWS::NetworkManager::Site", "AWS::NetworkManager::Device", "AWS::NetworkManager::Link", "AWS::NetworkManager::GlobalNetwork", "AWS::WorkSpaces::ConnectionAlias", "AWS::WorkSpaces::Workspace", "AWS::Amplify::App", "AWS::AppConfig::Application", "AWS::AppConfig::Environment", "AWS::AppConfig::ConfigurationProfile", "AWS::AppConfig::DeploymentStrategy", "AWS::AppRunner::VpcConnector", "AWS::AppMesh::VirtualNode", "AWS::AppMesh::VirtualService", "AWS::AppSync::GraphQLApi", "AWS::AuditManager::Assessment", "AWS::AutoScaling::AutoScalingGroup", "AWS::AutoScaling::LaunchConfiguration", "AWS::AutoScaling::ScalingPolicy", "AWS::AutoScaling::ScheduledAction", "AWS::AutoScaling::WarmPool", "AWS::Backup::BackupPlan", "AWS::Backup::BackupSelection", "AWS::Backup::BackupVault", "AWS::Backup::RecoveryPoint", "AWS::Backup::ReportPlan", "AWS::Backup::BackupPlan", "AWS::Backup::BackupSelection", "AWS::Backup::BackupVault", "AWS::Backup::RecoveryPoint", "AWS::Batch::JobQueue", "AWS::Batch::ComputeEnvironment", "AWS::Budgets::BudgetsAction", "AWS::ACM::Certificate", "AWS::CloudFormation::Stack", "AWS::CloudTrail::Trail", "AWS::Cloud9::EnvironmentEC2", "AWS::ServiceDiscovery::Service", "AWS::ServiceDiscovery::PublicDnsNamespace", "AWS::ServiceDiscovery::HttpNamespace", "AWS::CodeArtifact::Repository", "AWS::CodeBuild::Project", "AWS::CodeDeploy::Application", "AWS::CodeDeploy::DeploymentConfig", "AWS::CodeDeploy::DeploymentGroup", "AWS::CodePipeline::Pipeline", "AWS::Config::ResourceCompliance", "AWS::Config::ConformancePackCompliance", "AWS::Config::ConfigurationRecorder", "AWS::Config::ResourceCompliance", "AWS::Config::ConfigurationRecorder", "AWS::Config::ConformancePackCompliance", "AWS::Config::ConfigurationRecorder", "AWS::DMS::EventSubscription", "AWS::DMS::ReplicationSubnetGroup", "AWS::DMS::ReplicationInstance", "AWS::DMS::ReplicationTask", "AWS::DMS::Certificate", "AWS::DataSync::LocationSMB", "AWS::DataSync::LocationFSxLustre", "AWS::DataSync::LocationFSxWindows", "AWS::DataSync::LocationS3", "AWS::DataSync::LocationEFS", "AWS::DataSync::LocationNFS", "AWS::DataSync::LocationHDFS", "AWS::DataSync::LocationObjectStorage", "AWS::DataSync::Task", "AWS::DeviceFarm::TestGridProject", "AWS::DeviceFarm::InstanceProfile", "AWS::DeviceFarm::Project", "AWS::ElasticBeanstalk::Application", "AWS::ElasticBeanstalk::ApplicationVersion", "AWS::ElasticBeanstalk::Environment", "AWS::FIS::ExperimentTemplate", "AWS::GlobalAccelerator::Listener", "AWS::GlobalAccelerator::EndpointGroup", "AWS::GlobalAccelerator::Accelerator", "AWS::Glue::Job", "AWS::Glue::Classifier", "AWS::Glue::MLTransform", "AWS::GroundStation::Config", "AWS::IAM::User", "AWS::IAM::SAMLProvider", "AWS::IAM::ServerCertificate", "AWS::IAM::Group", "AWS::IAM::Role", "AWS::IAM::Policy", "AWS::AccessAnalyzer::Analyzer", "AWS::IoT::Authorizer", "AWS::IoT::SecurityProfile", "AWS::IoT::RoleAlias", "AWS::IoT::Dimension", "AWS::IoT::Policy", "AWS::IoT::MitigationAction", "AWS::IoT::ScheduledAudit", "AWS::IoT::AccountAuditConfiguration", "AWS::IoTSiteWise::Gateway", "AWS::IoT::CustomMetric", "AWS::IoTWireless::ServiceProfile", "AWS::IoT::FleetMetric", "AWS::IoTAnalytics::Datastore", "AWS::IoTAnalytics::Dataset", "AWS::IoTAnalytics::Pipeline", "AWS::IoTAnalytics::Channel", "AWS::IoTEvents::Input", "AWS::IoTEvents::DetectorModel", "AWS::IoTEvents::AlarmModel", "AWS::IoTTwinMaker::Workspace", "AWS::IoTTwinMaker::Entity", "AWS::IoTTwinMaker::Scene", "AWS::IoTSiteWise::Dashboard", "AWS::IoTSiteWise::Project", "AWS::IoTSiteWise::Portal", "AWS::IoTSiteWise::AssetModel", "AWS::KMS::Key", "AWS::KMS::Alias", "AWS::Lambda::Function", "AWS::Lambda::Alias", "AWS::NetworkFirewall::Firewall", "AWS::NetworkFirewall::FirewallPolicy", "AWS::NetworkFirewall::RuleGroup", "AWS::NetworkFirewall::TLSInspectionConfiguration", "AWS:Panorama::Package", "AWS::ResilienceHub::ResiliencyPolicy", "AWS::RoboMaker::RobotApplicationVersion", "AWS::RoboMaker::RobotApplication", "AWS::RoboMaker::SimulationApplication", "AWS::Signer::SigningProfile", "AWS::SecretsManager::Secret", "AWS::ServiceCatalog::CloudFormationProduct", "AWS::ServiceCatalog::CloudFormationProvisionedProduct", "AWS::ServiceCatalog::Portfolio", "AWS::Shield::Protection", "AWS::ShieldRegional::Protection", "AWS::StepFunctions::Activity", "AWS::StepFunctions::StateMachine", "AWS::SSM::ManagedInstanceInventory", "AWS::SSM::PatchCompliance", "AWS::SSM::AssociationCompliance", "AWS::SSM::FileData", "AWS::Transfer::Agreement", "AWS::Transfer::Connector", "AWS::Transfer::Workflow", "AWS::WAF::RateBasedRule", "AWS::WAF::Rule", "AWS::WAF::WebACL", "AWS::WAF::RuleGroup", "AWS::WAFRegional::RateBasedRule", "AWS::WAFRegional::Rule", "AWS::WAFRegional::WebACL", "AWS::WAFRegional::RuleGroup", "AWS::WAFv2::WebACL", "AWS::WAFv2::RuleGroup", "AWS::WAFv2::ManagedRuleSet", "AWS::WAFv2::IPSet", "AWS::WAFv2::RegexPatternSet", "AWS::XRay::EncryptionConfig", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::ElasticLoadBalancingV2::Listener", "AWS::ElasticLoadBalancing::LoadBalancer", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::MediaPackage::PackagingGroup", "AWS::MediaPackage::PackagingConfiguration", } // nolint: prealloc var res []*configservice.ResourceIdentifier for _, t := range &resourceTypes { t := t input := &configservice.ListDiscoveredResourcesInput{ ResourceType: aws.String(t), } result, err := c.Client.ListDiscoveredResources(input) if err != nil { log.Fatalf("Error ListDiscoveredResources (ResourceType: %s): %v\n", t, err) return nil, err } res = append(res, result.ResourceIdentifiers...) for aws.StringValue(result.NextToken) != "" { input.NextToken = result.NextToken result, err = c.Client.ListDiscoveredResources(input) if err != nil { log.Fatalf("Error ListDiscoveredResources (Input: %v): %v\n", input, err) return nil, err } res = append(res, result.ResourceIdentifiers...) } } return res, nil } // getSnapshotOfItem ... finds ConfigurationItem in Snaphot with matching ResourceId and ResourceType func getSnapshotOfItem(item map[string]interface{}, snapshots []map[string]interface{}) map[string]interface{} { id := item["ResourceId"] resType := item["ResourceType"] for _, s := range snapshots { m := s if id == m["ResourceId"].(string) && resType == m["ResourceType"].(string) { return m } } return nil } // getPreviousSnapshot ... gets the name of the config snapshot bucket object // created prior to the lastExecution time // Assumes snapshots are taken every three hours - gets snapshot older than // lastExecution time but less than three hours before lastExecution time func getPreviousSnapshot( items []*configservice.ConfigurationItem, t time.Time, bucket, region string, svc s3iface.S3API) (*s3.Object, string, error) { // Get time from three hours before change...since snapshots are taken every // three hours, this will ensure we are looking in the correct folder by date prevTime := t.Add(time.Hour * time.Duration(-snapshotFrequency)) year, month, day := prevTime.Date() account := aws.StringValue(items[0].AccountId) prefix := strings.Join([]string{ "awsconfig", "AWSLogs", account, "Config", region, strconv.Itoa(year), strconv.Itoa(int(month)), strconv.Itoa(day), "ConfigSnapshot", }, "/") input := &s3.ListObjectsInput{ Bucket: aws.String(bucket), Prefix: aws.String(prefix), } results, err := svc.ListObjects(input) if err != nil { return nil, "", err } for _, o := range results.Contents { m := aws.TimeValue(o.LastModified) if m.After(prevTime) && m.Before(t) { return getSnapshot(svc, bucket, o) } } return nil, "", errors.New("snapshot not found") } func getSnapshot(svc s3iface.S3API, bucket string, o *s3.Object) (*s3.Object, string, error) { input := &s3.GetObjectInput{ Bucket: aws.String(bucket), Key: o.Key, } result, err := svc.GetObject(input) if err != nil { return o, "", err } defer result.Body.Close() b := bytes.Buffer{} if _, err := io.Copy(&b, result.Body); err != nil { return o, "", err } return o, b.String(), nil } func getSess() (config, *session.Session, error) { cfg := config{} err := env.Parse(&cfg) if err != nil { log.Fatalf("error parsing env config: %v", err) return cfg, nil, err } sess, err := session.NewSession( &aws.Config{ Region: aws.String(cfg.DefaultRegion), MaxRetries: aws.Int(maxRetries), }) if err != nil { log.Fatalf("error creating new session: %v\n", err) return cfg, nil, err } return cfg, sess, nil }
{ for _, v := range i { e := v.RelatedEvents r := v.Relationships sort.SliceStable(e, func(i, j int) bool { return sliceSorter(e[i], e[j]) }) sort.SliceStable(r, func(i, j int) bool { return sliceSorter(r[i], r[j]) }) v.RelatedEvents = e v.Relationships = r } }
identifier_body
get.go
package main import ( "bytes" "errors" "io" "log" "sort" "strconv" "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/configservice" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/caarlos0/env" ) const ( maxRetries = 20 snapshotFrequency = 3 // frequency of ConfigSnapshots in hours ) // sortItemSlices ... Sorts attributes of ConfigrationItems that are slices func sortItemSlices(i []*configservice.ConfigurationItem) { for _, v := range i { e := v.RelatedEvents r := v.Relationships sort.SliceStable(e, func(i, j int) bool { return sliceSorter(e[i], e[j]) }) sort.SliceStable(r, func(i, j int) bool { return sliceSorter(r[i], r[j]) }) v.RelatedEvents = e v.Relationships = r } } // GetLastExecution ... gets the time of the most recent execution of all config services func (c *CfgSvc) GetLastExecution() (time.Time, error) { t := time.Time{} stats, err := c.GetStatus() if err != nil { return t, err } if len(stats) == 0 { return t, errors.New("empty config rule evaluation status array") } for _, s := range stats { if t.Before(aws.TimeValue(s.LastSuccessfulEvaluationTime)) { t = aws.TimeValue(s.LastSuccessfulEvaluationTime) } } return t, nil } // GetItems ... gets AWS Config Service Configuration Items from resource history pages func (c *CfgSvc) GetItems(lastExecution time.Time) (items []*configservice.ConfigurationItem, err error) { res, err := c.GetDiscoveredResources() if err != nil { log.Fatalf("Error getting discovered resources: %v\n", err) return nil, err } for _, r := range res { var results []*configservice.ConfigurationItem input := &configservice.GetResourceConfigHistoryInput{ ResourceType: r.ResourceType, ResourceId: r.ResourceId, EarlierTime: aws.Time(lastExecution.Add(time.Minute * time.Duration(-window))), LaterTime: aws.Time(lastExecution.Add(time.Minute * time.Duration(window))), } err := c.Client.GetResourceConfigHistoryPages(input, func(page *configservice.GetResourceConfigHistoryOutput, lastPage bool) bool { results = append(results, page.ConfigurationItems...) return !lastPage }) if err != nil { log.Fatalf("error getting resource config history (Input: %v):\n%v\n", input, err) return nil, err } items = append(items, results...) } sortItemSlices(items) return items, nil } // GetStatus ... performs DescribeConfigRuleEvaluationStatus for all config rules func (c *CfgSvc) GetStatus() ([]*configservice.ConfigRuleEvaluationStatus, error) { params := configservice.DescribeConfigRuleEvaluationStatusInput{} result, err := c.Client.DescribeConfigRuleEvaluationStatus(&params) if err != nil { return nil, err } status := result.ConfigRulesEvaluationStatus for aws.StringValue(result.NextToken) != "" { params.NextToken = result.NextToken result, err = c.Client.DescribeConfigRuleEvaluationStatus(&params) if err != nil { return nil, err } status = append(status, result.ConfigRulesEvaluationStatus...) } return status, nil } // GetDiscoveredResources ... loops through all specified resourceTypes // Lists all resources by Type (Will need to loop over all cfg.ResourceType* types) // https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources func (c *CfgSvc) GetDiscoveredResources() ([]*configservice.ResourceIdentifier, error) { // List of resource types pulled from // github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json var resourceTypes = [...]string{ "AWS::AppStream::DirectoryConfig", "AWS::AppStream::Application", "AWS::AppFlow::Flow", "AWS::ApiGateway::Stage", "AWS::ApiGateway::RestApi", "AWS::ApiGatewayV2::Stage", "AWS::ApiGatewayV2::Api", "AWS::Athena::WorkGroup", "AWS::Athena::DataCatalog", "AWS::CloudFront::Distribution", "AWS::CloudFront::StreamingDistribution", "AWS::CloudWatch::Alarm", "AWS::CloudWatch::MetricStream", "AWS::RUM::AppMonitor", "AWS::Evidently::Project", "AWS::CodeGuruReviewer::RepositoryAssociation", "AWS::Connect::PhoneNumber", "AWS::CustomerProfiles::Domain", "AWS::Detective::Graph", "AWS::DynamoDB::Table", "AWS::EC2::Host", "AWS::EC2::EIP", "AWS::EC2::Instance", "AWS::EC2::NetworkInterface", "AWS::EC2::SecurityGroup", "AWS::EC2::NatGateway", "AWS::EC2::EgressOnlyInternetGateway", "AWS::EC2::EC2Fleet", "AWS::EC2::SpotFleet", "AWS::EC2::PrefixList", "AWS::EC2::FlowLog", "AWS::EC2::TransitGateway", "AWS::EC2::TransitGatewayAttachment", "AWS::EC2::TransitGatewayRouteTable", "AWS::EC2::VPCEndpoint", "AWS::EC2::VPCEndpointService", "AWS::EC2::VPCPeeringConnection", "AWS::EC2::RegisteredHAInstance", "AWS::EC2::SubnetRouteTableAssociation", "AWS::EC2::LaunchTemplate", "AWS::EC2::NetworkInsightsAccessScopeAnalysis", "AWS::EC2::TrafficMirrorTarget", "AWS::EC2::TrafficMirrorSession", "AWS::EC2::DHCPOptions", "AWS::EC2::IPAM", "AWS::EC2::NetworkInsightsPath", "AWS::EC2::TrafficMirrorFilter", "AWS::EC2::Volume", "AWS::ImageBuilder::ImagePipeline", "AWS::ImageBuilder::DistributionConfiguration", "AWS::ImageBuilder::InfrastructureConfiguration", "AWS::ECR::Repository", "AWS::ECR::RegistryPolicy", "AWS::ECR::PullThroughCacheRule", "AWS::ECR::PublicRepository", "AWS::ECS::Cluster", "AWS::ECS::TaskDefinition", "AWS::ECS::Service", "AWS::ECS::TaskSet", "AWS::EFS::FileSystem", "AWS::EFS::AccessPoint", "AWS::EKS::Cluster", "AWS::EKS::FargateProfile", "AWS::EKS::IdentityProviderConfig", "AWS::EKS::Addon", "AWS::EMR::SecurityConfiguration", "AWS::Events::EventBus", "AWS::Events::ApiDestination", "AWS::Events::Archive", "AWS::Events::Endpoint", "AWS::Events::Connection", "AWS::Events::Rule", "AWS::EC2::TrafficMirrorSession", "AWS::EventSchemas::RegistryPolicy", "AWS::EventSchemas::Discoverer", "AWS::EventSchemas::Schema", "AWS::Forecast::Dataset", "AWS::FraudDetector::Label", "AWS::FraudDetector::EntityType", "AWS::FraudDetector::Variable", "AWS::FraudDetector::Outcome", "AWS::GuardDuty::Detector", "AWS::GuardDuty::ThreatIntelSet", "AWS::GuardDuty::IPSet", "AWS::GuardDuty::Filter", "AWS::HealthLake::FHIRDatastore", "AWS::Cassandra::Keyspace", "AWS::IVS::Channel", "AWS::IVS::RecordingConfiguration", "AWS::IVS::PlaybackKeyPair", "AWS::Elasticsearch::Domain", "AWS::OpenSearch::Domain", "AWS::Elasticsearch::Domain", "AWS::Pinpoint::ApplicationSettings", "AWS::Pinpoint::Segment", "AWS::Pinpoint::App", "AWS::Pinpoint::Campaign", "AWS::Pinpoint::InAppTemplate", "AWS::QLDB::Ledger", "AWS::Kinesis::Stream", "AWS::Kinesis::StreamConsumer", "AWS::KinesisAnalyticsV2::Application", "AWS::KinesisFirehose::DeliveryStream", "AWS::KinesisVideo::SignalingChannel", "AWS::Lex::BotAlias", "AWS::Lex::Bot", "AWS::Lightsail::Disk", "AWS::Lightsail::Certificate", "AWS::Lightsail::Bucket", "AWS::Lightsail::StaticIp", "AWS::LookoutMetrics::Alert", "AWS::LookoutVision::Project", "AWS::AmazonMQ::Broker", "AWS::MSK::Cluster", "AWS::Redshift::Cluster", "AWS::Redshift::ClusterParameterGroup", "AWS::Redshift::ClusterSecurityGroup", "AWS::Redshift::ScheduledAction", "AWS::Redshift::ClusterSnapshot", "AWS::Redshift::ClusterSubnetGroup", "AWS::Redshift::EventSubscription", "AWS::RDS::DBInstance", "AWS::RDS::DBSecurityGroup", "AWS::RDS::DBSnapshot", "AWS::RDS::DBSubnetGroup", "AWS::RDS::EventSubscription", "AWS::RDS::DBCluster", "AWS::RDS::DBClusterSnapshot", "AWS::RDS::GlobalCluster", "AWS::Route53::HostedZone", "AWS::Route53::HealthCheck", "AWS::Route53Resolver::ResolverEndpoint", "AWS::Route53Resolver::ResolverRule", "AWS::Route53Resolver::ResolverRuleAssociation", "AWS::Route53Resolver::FirewallDomainList", "AWS::AWS::Route53Resolver::FirewallRuleGroupAssociation", "AWS::Route53RecoveryReadiness::Cell", "AWS::Route53RecoveryReadiness::ReadinessCheck", "AWS::Route53RecoveryReadiness::RecoveryGroup", "AWS::Route53RecoveryControl::Cluster", "AWS::Route53RecoveryControl::ControlPanel", "AWS::Route53RecoveryControl::RoutingControl", "AWS::Route53RecoveryControl::SafetyRule", "AWS::Route53RecoveryReadiness::ResourceSet", "AWS::SageMaker::CodeRepository", "AWS::SageMaker::Domain", "AWS::SageMaker::AppImageConfig", "AWS::SageMaker::Image", "AWS::SageMaker::Model", "AWS::SageMaker::NotebookInstance", "AWS::SageMaker::NotebookInstanceLifecycleConfig", "AWS::SageMaker::EndpointConfig", "AWS::SageMaker::Workteam", "AWS::SES::ConfigurationSet", "AWS::SES::ContactList", "AWS::SES::Template", "AWS::SES::ReceiptFilter", "AWS::SES::ReceiptRuleSet", "AWS::SNS::Topic", "AWS::SQS::Queue", "AWS::S3::Bucket", "AWS::S3::AccountPublicAccessBlock", "AWS::S3::MultiRegionAccessPoint", "AWS::S3::StorageLens", "AWS::EC2::CustomerGateway", "AWS::EC2::InternetGateway", "AWS::EC2::NetworkAcl", "AWS::EC2::RouteTable", "AWS::EC2::Subnet", "AWS::EC2::VPC", "AWS::EC2::VPNConnection", "AWS::EC2::VPNGateway", "AWS::NetworkManager::TransitGatewayRegistration", "AWS::NetworkManager::Site", "AWS::NetworkManager::Device", "AWS::NetworkManager::Link", "AWS::NetworkManager::GlobalNetwork", "AWS::WorkSpaces::ConnectionAlias", "AWS::WorkSpaces::Workspace", "AWS::Amplify::App", "AWS::AppConfig::Application", "AWS::AppConfig::Environment", "AWS::AppConfig::ConfigurationProfile", "AWS::AppConfig::DeploymentStrategy", "AWS::AppRunner::VpcConnector", "AWS::AppMesh::VirtualNode", "AWS::AppMesh::VirtualService", "AWS::AppSync::GraphQLApi", "AWS::AuditManager::Assessment", "AWS::AutoScaling::AutoScalingGroup", "AWS::AutoScaling::LaunchConfiguration", "AWS::AutoScaling::ScalingPolicy", "AWS::AutoScaling::ScheduledAction", "AWS::AutoScaling::WarmPool", "AWS::Backup::BackupPlan", "AWS::Backup::BackupSelection", "AWS::Backup::BackupVault", "AWS::Backup::RecoveryPoint", "AWS::Backup::ReportPlan", "AWS::Backup::BackupPlan", "AWS::Backup::BackupSelection", "AWS::Backup::BackupVault", "AWS::Backup::RecoveryPoint", "AWS::Batch::JobQueue", "AWS::Batch::ComputeEnvironment", "AWS::Budgets::BudgetsAction", "AWS::ACM::Certificate", "AWS::CloudFormation::Stack", "AWS::CloudTrail::Trail", "AWS::Cloud9::EnvironmentEC2", "AWS::ServiceDiscovery::Service", "AWS::ServiceDiscovery::PublicDnsNamespace", "AWS::ServiceDiscovery::HttpNamespace", "AWS::CodeArtifact::Repository", "AWS::CodeBuild::Project", "AWS::CodeDeploy::Application", "AWS::CodeDeploy::DeploymentConfig", "AWS::CodeDeploy::DeploymentGroup", "AWS::CodePipeline::Pipeline", "AWS::Config::ResourceCompliance", "AWS::Config::ConformancePackCompliance", "AWS::Config::ConfigurationRecorder", "AWS::Config::ResourceCompliance", "AWS::Config::ConfigurationRecorder", "AWS::Config::ConformancePackCompliance", "AWS::Config::ConfigurationRecorder", "AWS::DMS::EventSubscription", "AWS::DMS::ReplicationSubnetGroup", "AWS::DMS::ReplicationInstance", "AWS::DMS::ReplicationTask", "AWS::DMS::Certificate", "AWS::DataSync::LocationSMB", "AWS::DataSync::LocationFSxLustre", "AWS::DataSync::LocationFSxWindows", "AWS::DataSync::LocationS3", "AWS::DataSync::LocationEFS", "AWS::DataSync::LocationNFS", "AWS::DataSync::LocationHDFS", "AWS::DataSync::LocationObjectStorage", "AWS::DataSync::Task", "AWS::DeviceFarm::TestGridProject", "AWS::DeviceFarm::InstanceProfile", "AWS::DeviceFarm::Project", "AWS::ElasticBeanstalk::Application", "AWS::ElasticBeanstalk::ApplicationVersion", "AWS::ElasticBeanstalk::Environment", "AWS::FIS::ExperimentTemplate", "AWS::GlobalAccelerator::Listener", "AWS::GlobalAccelerator::EndpointGroup", "AWS::GlobalAccelerator::Accelerator", "AWS::Glue::Job", "AWS::Glue::Classifier", "AWS::Glue::MLTransform", "AWS::GroundStation::Config", "AWS::IAM::User", "AWS::IAM::SAMLProvider", "AWS::IAM::ServerCertificate", "AWS::IAM::Group", "AWS::IAM::Role", "AWS::IAM::Policy", "AWS::AccessAnalyzer::Analyzer", "AWS::IoT::Authorizer", "AWS::IoT::SecurityProfile", "AWS::IoT::RoleAlias", "AWS::IoT::Dimension", "AWS::IoT::Policy", "AWS::IoT::MitigationAction", "AWS::IoT::ScheduledAudit", "AWS::IoT::AccountAuditConfiguration", "AWS::IoTSiteWise::Gateway", "AWS::IoT::CustomMetric", "AWS::IoTWireless::ServiceProfile", "AWS::IoT::FleetMetric", "AWS::IoTAnalytics::Datastore", "AWS::IoTAnalytics::Dataset", "AWS::IoTAnalytics::Pipeline", "AWS::IoTAnalytics::Channel", "AWS::IoTEvents::Input", "AWS::IoTEvents::DetectorModel", "AWS::IoTEvents::AlarmModel", "AWS::IoTTwinMaker::Workspace", "AWS::IoTTwinMaker::Entity", "AWS::IoTTwinMaker::Scene", "AWS::IoTSiteWise::Dashboard", "AWS::IoTSiteWise::Project", "AWS::IoTSiteWise::Portal", "AWS::IoTSiteWise::AssetModel", "AWS::KMS::Key", "AWS::KMS::Alias", "AWS::Lambda::Function", "AWS::Lambda::Alias", "AWS::NetworkFirewall::Firewall", "AWS::NetworkFirewall::FirewallPolicy", "AWS::NetworkFirewall::RuleGroup", "AWS::NetworkFirewall::TLSInspectionConfiguration", "AWS:Panorama::Package", "AWS::ResilienceHub::ResiliencyPolicy", "AWS::RoboMaker::RobotApplicationVersion", "AWS::RoboMaker::RobotApplication", "AWS::RoboMaker::SimulationApplication", "AWS::Signer::SigningProfile", "AWS::SecretsManager::Secret", "AWS::ServiceCatalog::CloudFormationProduct", "AWS::ServiceCatalog::CloudFormationProvisionedProduct", "AWS::ServiceCatalog::Portfolio", "AWS::Shield::Protection", "AWS::ShieldRegional::Protection", "AWS::StepFunctions::Activity", "AWS::StepFunctions::StateMachine", "AWS::SSM::ManagedInstanceInventory", "AWS::SSM::PatchCompliance", "AWS::SSM::AssociationCompliance", "AWS::SSM::FileData", "AWS::Transfer::Agreement", "AWS::Transfer::Connector", "AWS::Transfer::Workflow", "AWS::WAF::RateBasedRule", "AWS::WAF::Rule", "AWS::WAF::WebACL", "AWS::WAF::RuleGroup", "AWS::WAFRegional::RateBasedRule", "AWS::WAFRegional::Rule", "AWS::WAFRegional::WebACL", "AWS::WAFRegional::RuleGroup", "AWS::WAFv2::WebACL", "AWS::WAFv2::RuleGroup", "AWS::WAFv2::ManagedRuleSet", "AWS::WAFv2::IPSet", "AWS::WAFv2::RegexPatternSet", "AWS::XRay::EncryptionConfig", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::ElasticLoadBalancingV2::Listener", "AWS::ElasticLoadBalancing::LoadBalancer", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::MediaPackage::PackagingGroup", "AWS::MediaPackage::PackagingConfiguration", } // nolint: prealloc var res []*configservice.ResourceIdentifier for _, t := range &resourceTypes { t := t input := &configservice.ListDiscoveredResourcesInput{ ResourceType: aws.String(t), } result, err := c.Client.ListDiscoveredResources(input) if err != nil { log.Fatalf("Error ListDiscoveredResources (ResourceType: %s): %v\n", t, err) return nil, err } res = append(res, result.ResourceIdentifiers...) for aws.StringValue(result.NextToken) != "" { input.NextToken = result.NextToken result, err = c.Client.ListDiscoveredResources(input) if err != nil { log.Fatalf("Error ListDiscoveredResources (Input: %v): %v\n", input, err) return nil, err } res = append(res, result.ResourceIdentifiers...) } } return res, nil } // getSnapshotOfItem ... finds ConfigurationItem in Snaphot with matching ResourceId and ResourceType func getSnapshotOfItem(item map[string]interface{}, snapshots []map[string]interface{}) map[string]interface{} { id := item["ResourceId"] resType := item["ResourceType"] for _, s := range snapshots { m := s if id == m["ResourceId"].(string) && resType == m["ResourceType"].(string) { return m } } return nil } // getPreviousSnapshot ... gets the name of the config snapshot bucket object // created prior to the lastExecution time // Assumes snapshots are taken every three hours - gets snapshot older than // lastExecution time but less than three hours before lastExecution time func
( items []*configservice.ConfigurationItem, t time.Time, bucket, region string, svc s3iface.S3API) (*s3.Object, string, error) { // Get time from three hours before change...since snapshots are taken every // three hours, this will ensure we are looking in the correct folder by date prevTime := t.Add(time.Hour * time.Duration(-snapshotFrequency)) year, month, day := prevTime.Date() account := aws.StringValue(items[0].AccountId) prefix := strings.Join([]string{ "awsconfig", "AWSLogs", account, "Config", region, strconv.Itoa(year), strconv.Itoa(int(month)), strconv.Itoa(day), "ConfigSnapshot", }, "/") input := &s3.ListObjectsInput{ Bucket: aws.String(bucket), Prefix: aws.String(prefix), } results, err := svc.ListObjects(input) if err != nil { return nil, "", err } for _, o := range results.Contents { m := aws.TimeValue(o.LastModified) if m.After(prevTime) && m.Before(t) { return getSnapshot(svc, bucket, o) } } return nil, "", errors.New("snapshot not found") } func getSnapshot(svc s3iface.S3API, bucket string, o *s3.Object) (*s3.Object, string, error) { input := &s3.GetObjectInput{ Bucket: aws.String(bucket), Key: o.Key, } result, err := svc.GetObject(input) if err != nil { return o, "", err } defer result.Body.Close() b := bytes.Buffer{} if _, err := io.Copy(&b, result.Body); err != nil { return o, "", err } return o, b.String(), nil } func getSess() (config, *session.Session, error) { cfg := config{} err := env.Parse(&cfg) if err != nil { log.Fatalf("error parsing env config: %v", err) return cfg, nil, err } sess, err := session.NewSession( &aws.Config{ Region: aws.String(cfg.DefaultRegion), MaxRetries: aws.Int(maxRetries), }) if err != nil { log.Fatalf("error creating new session: %v\n", err) return cfg, nil, err } return cfg, sess, nil }
getPreviousSnapshot
identifier_name
Server.py
#!/usr/bin/python import socket import sys import threading import time import Constants # Thread to serve clients class ServerThread(threading.Thread): # constructor def __init__(self, server, address, connection): threading.Thread.__init__(self) self.server = server self.address = address self.connection = connection # whether the client has been logined self.login = False # number of failed login attempts self.failed_login_attempts = 0 # the username of the client self.user = '' # time of the last operation self.last_op_time = time.time() # processes commands received from the client def run(self): try: while True: # receive message from the client msg = self.connection.recv(Constants.MAX_MSG_LENGTH) # If no message, break the loop if not msg: break # update the last operation time self.last_op_time = time.time() # if not logined, if not self.login: # process the login of the client self.process_login(msg) else: # process command if self.process_command(msg): # if the client exited, break the loop break except socket.error: pass # close the connection self.connection.close() # processes login command def process_login(self, msg): # if it is a login message ('login|username|password') if msg.startswith(Constants.MSG_LOGIN): # extract username and password cmd, user, password = msg.split('|') print 'login:', user, password # verify the username and password if self.server.verify_user(user, password): if not self.server.is_online(user): # if the user is not online # record his login time self.server.logins[user] = time.time() self.user = user self.login = True # send back a success message self.connection.send(Constants.MSG_SUCCESS) # send offline messages to him self.server.send_offline_messages(user, self.connection) # tell other clients self.server.broadcast('server', user + ' login', user) else: # if the user is already online, send back the message self.connection.send(Constants.MSG_USER_ALREADY_LOGINED) else: # increment the failed times self.failed_login_attempts += 1 # if it exceeds the maximum retry times, if self.failed_login_attempts >= Constants.MAX_LOGIN_ATTEMPTS: # tell the client self.connection.send(Constants.MSG_LOGIN_EXCEED_MAX_TIMES) # block the ip self.server.block_client(self.address) # disconnect the client self.server.disconnect(self.address) return True else: # send back a failed message self.connection.send(Constants.MSG_FAILED) else: # send back a failed message self.connection.send(Constants.MSG_FAILED) return False # processes the command def process_command(self, msg): exited = False if msg == Constants.MSG_EXIT: # client exits exited = True elif msg == Constants.MSG_WHO_ELSE: # send back who else self.connection.send('[who else] ' + ', '.join(self.server.who_else(self.address))) elif msg == Constants.MSG_WHO_LAST_HOUR: # send back who logined in the last hour self.connection.send('[who last hour] ' + ', '.join(self.server.who_last_hour())) elif msg.startswith(Constants.MSG_BROADCAST): # extract the message cmd, msg = msg.split('|', 1) # broadcast the message self.server.broadcast(self.user, msg) elif msg.startswith(Constants.MSG_MESSAGE): # extract the target user and message cmd, user, msg = msg.split('|', 2) # send message to the target user if not self.server.message(self.user, user, msg): if user in self.server.passwords: self.connection.send(user + ' is offline now, and will see the message when login.') else: self.connection.send(user + ' doesn\'t exist.') elif msg == Constants.MSG_LOGOUT: # if the user want to logout, tell the other clients self.server.broadcast('server', self.user + ' logout') # disconnect self.server.disconnect(self.address) exited = True return exited # Server class class Server: # constructor def __init__(self, port):
self.port = port # {client address -> client threads} self.clients = {} # {username -> password} self.passwords = {} # {username -> last login time} self.logins = {} # {ip -> blocked time} self.blocked_ips = {} # {username -> [messages]} self.offline_messages = {} # starts the server def start(self): # load the password file, exit if failed. if not self.load_passwords(): return # start a thread to check the timeout for inactive clients. t = threading.Thread(target=self.check_inactive_user) t.setDaemon(True) t.start() # create a server socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # force to reuse the address s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # bind the address s.bind(('127.0.0.1', self.port)) # listen to at most 10 clients s.listen(10) try: # server loop while True: # wait for the connection of client connection, address = s.accept() # if a client connects, check its blocked time block_t = self.remaining_block_time(address) if block_t == 0: # if no blocked time left, send an accept message to it connection.send(Constants.MEG_ACCEPTED) # start a thread for it self.clients[address] = ServerThread(self, address, connection) self.clients[address].start() else: # if the client is blocked, send back its remaining blocked seconds. connection.send('|'.join([Constants.MSG_BLOCKED, str(block_t)])) # wait for acknowledge connection.recv(Constants.MAX_MSG_LENGTH) # close the connection connection.close() except KeyboardInterrupt: # press ctrl-c to stop the server. self.stop_server() # stop the server def stop_server(self): print 'Stop server...' # disconnect all the clients for address in self.clients.keys(): self.disconnect(address) # disconnect a client def disconnect(self, address): # if the address is present, if address in self.clients: # get the client thread t = self.clients[address] if t.user != '': print 'logout:', t.user try: # send an exit message t.connection.send(Constants.MSG_EXIT) # close the connection t.connection.close() except socket.error: pass # remove its thread del self.clients[address] # returns the remaining blocked time of the client address def remaining_block_time(self, address): # get the ip from the address ip = address[0] # if it is not in the blocked dict, return 0 if ip not in self.blocked_ips: return 0 current_time = time.time() block_time = self.blocked_ips[ip] if current_time - block_time > Constants.BLOCK_TIME: # if the difference exceeds the block time, return 0 return 0 else: # otherwise return the remaining blocked time return Constants.BLOCK_TIME - (current_time - block_time) # blocks the ip of the client def block_client(self, address): # add the ip and blocked time to the blocked dict self.blocked_ips[address[0]] = time.time() # loads usernames and passwords from the password file # return True if success or False otherwise. def load_passwords(self): print 'load users' try: # open the file f = open(Constants.PASSWORD_FILE) # for each line in the file for line in f: # remove leading and trailing spaces line = line.strip() # if the line contains exactly one space if line.count(' ') == 1: # extract the username and password user, pwd = line.split(' ') # add them to the password dict self.passwords[user] = pwd # close the file f.close() return True except IOError: print '[Error] user_pass.txt is missing.' return False # returns True iff the username and password are correct. def verify_user(self, user, password): return user in self.passwords and self.passwords[user] == password # returns a list of online users excluding the current user def who_else(self, current_address): # create an empty list users = [] # for each address of online clients for address in self.clients: # if it is not the address of the current client if address != current_address: # add its username to the list users.append(self.clients[address].user) return users # returns a list of users who logined in the last hour def who_last_hour(self): # get the current time current_time = time.time() # for each user logined, if its last login time is in the last hour, # add it to the list. return [user for user in self.logins if current_time - self.logins[user] <= Constants.SEC_PER_MIN * Constants.LAST_HOUR] # sends a message the a specified user. # returns True iff the user is online. def message(self, from_user, to_user, msg): found = False # add a message header msg = '[' + from_user + ']: ' + msg # for each online client for address in self.clients: t = self.clients[address] # if the target user is found, send the message to him. if t.user == to_user: t.connection.send(msg) found = True if not found: # if the user is not present, add the message to the offline messages if to_user not in self.offline_messages: self.offline_messages[to_user] = [msg] else: self.offline_messages[to_user].append(msg) return found # broadcasts the message to all the users def broadcast(self, from_user, msg, excluding_user=''): # add a message header msg = '[' + from_user + ' broadcast]: ' + msg # for each online client for address in self.clients: # send the message to it if it's not the excluding user. t = self.clients[address] if t.user != excluding_user: t.connection.send(msg) # returns True if the specified user is online def is_online(self, user): found = False # for each online client for address in self.clients: # if the username matches, return True if self.clients[address].user == user: found = True break return found # checks and removes inactive clients def check_inactive_user(self): # loop in background till the server ends while True: print 'check timeout for inactive users' # get the current time current_time = time.time() # for each online client for address in self.clients.keys(): # if its last operation time is earlier than the timeout t = self.clients[address] if current_time - t.last_op_time > Constants.TIME_OUT * 60: # tell other clients self.broadcast('server', t.user + ' logout') print t.user, 'is kicked out' # automatically log him out self.disconnect(t.address) # sleep for a minute and check again time.sleep(Constants.SEC_PER_MIN) # send offline messages def send_offline_messages(self, user, connection): # if the user has offline messages if user in self.offline_messages: # send all the offline messages to him for msg in self.offline_messages[user]: connection.send('[offline message] ' + msg) # delete the messages del self.offline_messages[user] if __name__ == '__main__': if len(sys.argv) == 2: try: # create a server port = int(sys.argv[1]) s = Server(port) # start the server s.start() except ValueError: print '[Error] Invalid port' else: # invalid arguments print '[Error] Usage: python Server.py <port>'
# server port
random_line_split
Server.py
#!/usr/bin/python import socket import sys import threading import time import Constants # Thread to serve clients class ServerThread(threading.Thread): # constructor def __init__(self, server, address, connection): threading.Thread.__init__(self) self.server = server self.address = address self.connection = connection # whether the client has been logined self.login = False # number of failed login attempts self.failed_login_attempts = 0 # the username of the client self.user = '' # time of the last operation self.last_op_time = time.time() # processes commands received from the client def run(self): try: while True: # receive message from the client msg = self.connection.recv(Constants.MAX_MSG_LENGTH) # If no message, break the loop if not msg: break # update the last operation time self.last_op_time = time.time() # if not logined, if not self.login: # process the login of the client self.process_login(msg) else: # process command if self.process_command(msg): # if the client exited, break the loop break except socket.error: pass # close the connection self.connection.close() # processes login command def process_login(self, msg): # if it is a login message ('login|username|password') if msg.startswith(Constants.MSG_LOGIN): # extract username and password cmd, user, password = msg.split('|') print 'login:', user, password # verify the username and password if self.server.verify_user(user, password): if not self.server.is_online(user): # if the user is not online # record his login time self.server.logins[user] = time.time() self.user = user self.login = True # send back a success message self.connection.send(Constants.MSG_SUCCESS) # send offline messages to him self.server.send_offline_messages(user, self.connection) # tell other clients self.server.broadcast('server', user + ' login', user) else: # if the user is already online, send back the message self.connection.send(Constants.MSG_USER_ALREADY_LOGINED) else: # increment the failed times self.failed_login_attempts += 1 # if it exceeds the maximum retry times, if self.failed_login_attempts >= Constants.MAX_LOGIN_ATTEMPTS: # tell the client self.connection.send(Constants.MSG_LOGIN_EXCEED_MAX_TIMES) # block the ip self.server.block_client(self.address) # disconnect the client self.server.disconnect(self.address) return True else: # send back a failed message self.connection.send(Constants.MSG_FAILED) else: # send back a failed message self.connection.send(Constants.MSG_FAILED) return False # processes the command def process_command(self, msg): exited = False if msg == Constants.MSG_EXIT: # client exits exited = True elif msg == Constants.MSG_WHO_ELSE: # send back who else self.connection.send('[who else] ' + ', '.join(self.server.who_else(self.address))) elif msg == Constants.MSG_WHO_LAST_HOUR: # send back who logined in the last hour self.connection.send('[who last hour] ' + ', '.join(self.server.who_last_hour())) elif msg.startswith(Constants.MSG_BROADCAST): # extract the message cmd, msg = msg.split('|', 1) # broadcast the message self.server.broadcast(self.user, msg) elif msg.startswith(Constants.MSG_MESSAGE): # extract the target user and message cmd, user, msg = msg.split('|', 2) # send message to the target user if not self.server.message(self.user, user, msg): if user in self.server.passwords: self.connection.send(user + ' is offline now, and will see the message when login.') else: self.connection.send(user + ' doesn\'t exist.') elif msg == Constants.MSG_LOGOUT: # if the user want to logout, tell the other clients self.server.broadcast('server', self.user + ' logout') # disconnect self.server.disconnect(self.address) exited = True return exited # Server class class Server: # constructor def __init__(self, port): # server port self.port = port # {client address -> client threads} self.clients = {} # {username -> password} self.passwords = {} # {username -> last login time} self.logins = {} # {ip -> blocked time} self.blocked_ips = {} # {username -> [messages]} self.offline_messages = {} # starts the server def start(self): # load the password file, exit if failed. if not self.load_passwords(): return # start a thread to check the timeout for inactive clients. t = threading.Thread(target=self.check_inactive_user) t.setDaemon(True) t.start() # create a server socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # force to reuse the address s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # bind the address s.bind(('127.0.0.1', self.port)) # listen to at most 10 clients s.listen(10) try: # server loop while True: # wait for the connection of client connection, address = s.accept() # if a client connects, check its blocked time block_t = self.remaining_block_time(address) if block_t == 0: # if no blocked time left, send an accept message to it connection.send(Constants.MEG_ACCEPTED) # start a thread for it self.clients[address] = ServerThread(self, address, connection) self.clients[address].start() else: # if the client is blocked, send back its remaining blocked seconds. connection.send('|'.join([Constants.MSG_BLOCKED, str(block_t)])) # wait for acknowledge connection.recv(Constants.MAX_MSG_LENGTH) # close the connection connection.close() except KeyboardInterrupt: # press ctrl-c to stop the server. self.stop_server() # stop the server def stop_server(self): print 'Stop server...' # disconnect all the clients for address in self.clients.keys(): self.disconnect(address) # disconnect a client def disconnect(self, address): # if the address is present, if address in self.clients: # get the client thread t = self.clients[address] if t.user != '': print 'logout:', t.user try: # send an exit message t.connection.send(Constants.MSG_EXIT) # close the connection t.connection.close() except socket.error: pass # remove its thread del self.clients[address] # returns the remaining blocked time of the client address def remaining_block_time(self, address): # get the ip from the address ip = address[0] # if it is not in the blocked dict, return 0 if ip not in self.blocked_ips: return 0 current_time = time.time() block_time = self.blocked_ips[ip] if current_time - block_time > Constants.BLOCK_TIME: # if the difference exceeds the block time, return 0 return 0 else: # otherwise return the remaining blocked time return Constants.BLOCK_TIME - (current_time - block_time) # blocks the ip of the client def block_client(self, address): # add the ip and blocked time to the blocked dict
# loads usernames and passwords from the password file # return True if success or False otherwise. def load_passwords(self): print 'load users' try: # open the file f = open(Constants.PASSWORD_FILE) # for each line in the file for line in f: # remove leading and trailing spaces line = line.strip() # if the line contains exactly one space if line.count(' ') == 1: # extract the username and password user, pwd = line.split(' ') # add them to the password dict self.passwords[user] = pwd # close the file f.close() return True except IOError: print '[Error] user_pass.txt is missing.' return False # returns True iff the username and password are correct. def verify_user(self, user, password): return user in self.passwords and self.passwords[user] == password # returns a list of online users excluding the current user def who_else(self, current_address): # create an empty list users = [] # for each address of online clients for address in self.clients: # if it is not the address of the current client if address != current_address: # add its username to the list users.append(self.clients[address].user) return users # returns a list of users who logined in the last hour def who_last_hour(self): # get the current time current_time = time.time() # for each user logined, if its last login time is in the last hour, # add it to the list. return [user for user in self.logins if current_time - self.logins[user] <= Constants.SEC_PER_MIN * Constants.LAST_HOUR] # sends a message the a specified user. # returns True iff the user is online. def message(self, from_user, to_user, msg): found = False # add a message header msg = '[' + from_user + ']: ' + msg # for each online client for address in self.clients: t = self.clients[address] # if the target user is found, send the message to him. if t.user == to_user: t.connection.send(msg) found = True if not found: # if the user is not present, add the message to the offline messages if to_user not in self.offline_messages: self.offline_messages[to_user] = [msg] else: self.offline_messages[to_user].append(msg) return found # broadcasts the message to all the users def broadcast(self, from_user, msg, excluding_user=''): # add a message header msg = '[' + from_user + ' broadcast]: ' + msg # for each online client for address in self.clients: # send the message to it if it's not the excluding user. t = self.clients[address] if t.user != excluding_user: t.connection.send(msg) # returns True if the specified user is online def is_online(self, user): found = False # for each online client for address in self.clients: # if the username matches, return True if self.clients[address].user == user: found = True break return found # checks and removes inactive clients def check_inactive_user(self): # loop in background till the server ends while True: print 'check timeout for inactive users' # get the current time current_time = time.time() # for each online client for address in self.clients.keys(): # if its last operation time is earlier than the timeout t = self.clients[address] if current_time - t.last_op_time > Constants.TIME_OUT * 60: # tell other clients self.broadcast('server', t.user + ' logout') print t.user, 'is kicked out' # automatically log him out self.disconnect(t.address) # sleep for a minute and check again time.sleep(Constants.SEC_PER_MIN) # send offline messages def send_offline_messages(self, user, connection): # if the user has offline messages if user in self.offline_messages: # send all the offline messages to him for msg in self.offline_messages[user]: connection.send('[offline message] ' + msg) # delete the messages del self.offline_messages[user] if __name__ == '__main__': if len(sys.argv) == 2: try: # create a server port = int(sys.argv[1]) s = Server(port) # start the server s.start() except ValueError: print '[Error] Invalid port' else: # invalid arguments print '[Error] Usage: python Server.py <port>'
self.blocked_ips[address[0]] = time.time()
identifier_body
Server.py
#!/usr/bin/python import socket import sys import threading import time import Constants # Thread to serve clients class ServerThread(threading.Thread): # constructor def __init__(self, server, address, connection): threading.Thread.__init__(self) self.server = server self.address = address self.connection = connection # whether the client has been logined self.login = False # number of failed login attempts self.failed_login_attempts = 0 # the username of the client self.user = '' # time of the last operation self.last_op_time = time.time() # processes commands received from the client def run(self): try: while True: # receive message from the client msg = self.connection.recv(Constants.MAX_MSG_LENGTH) # If no message, break the loop if not msg: break # update the last operation time self.last_op_time = time.time() # if not logined, if not self.login: # process the login of the client self.process_login(msg) else: # process command if self.process_command(msg): # if the client exited, break the loop break except socket.error: pass # close the connection self.connection.close() # processes login command def process_login(self, msg): # if it is a login message ('login|username|password') if msg.startswith(Constants.MSG_LOGIN): # extract username and password cmd, user, password = msg.split('|') print 'login:', user, password # verify the username and password if self.server.verify_user(user, password): if not self.server.is_online(user): # if the user is not online # record his login time self.server.logins[user] = time.time() self.user = user self.login = True # send back a success message self.connection.send(Constants.MSG_SUCCESS) # send offline messages to him self.server.send_offline_messages(user, self.connection) # tell other clients self.server.broadcast('server', user + ' login', user) else: # if the user is already online, send back the message
else: # increment the failed times self.failed_login_attempts += 1 # if it exceeds the maximum retry times, if self.failed_login_attempts >= Constants.MAX_LOGIN_ATTEMPTS: # tell the client self.connection.send(Constants.MSG_LOGIN_EXCEED_MAX_TIMES) # block the ip self.server.block_client(self.address) # disconnect the client self.server.disconnect(self.address) return True else: # send back a failed message self.connection.send(Constants.MSG_FAILED) else: # send back a failed message self.connection.send(Constants.MSG_FAILED) return False # processes the command def process_command(self, msg): exited = False if msg == Constants.MSG_EXIT: # client exits exited = True elif msg == Constants.MSG_WHO_ELSE: # send back who else self.connection.send('[who else] ' + ', '.join(self.server.who_else(self.address))) elif msg == Constants.MSG_WHO_LAST_HOUR: # send back who logined in the last hour self.connection.send('[who last hour] ' + ', '.join(self.server.who_last_hour())) elif msg.startswith(Constants.MSG_BROADCAST): # extract the message cmd, msg = msg.split('|', 1) # broadcast the message self.server.broadcast(self.user, msg) elif msg.startswith(Constants.MSG_MESSAGE): # extract the target user and message cmd, user, msg = msg.split('|', 2) # send message to the target user if not self.server.message(self.user, user, msg): if user in self.server.passwords: self.connection.send(user + ' is offline now, and will see the message when login.') else: self.connection.send(user + ' doesn\'t exist.') elif msg == Constants.MSG_LOGOUT: # if the user want to logout, tell the other clients self.server.broadcast('server', self.user + ' logout') # disconnect self.server.disconnect(self.address) exited = True return exited # Server class class Server: # constructor def __init__(self, port): # server port self.port = port # {client address -> client threads} self.clients = {} # {username -> password} self.passwords = {} # {username -> last login time} self.logins = {} # {ip -> blocked time} self.blocked_ips = {} # {username -> [messages]} self.offline_messages = {} # starts the server def start(self): # load the password file, exit if failed. if not self.load_passwords(): return # start a thread to check the timeout for inactive clients. t = threading.Thread(target=self.check_inactive_user) t.setDaemon(True) t.start() # create a server socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # force to reuse the address s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # bind the address s.bind(('127.0.0.1', self.port)) # listen to at most 10 clients s.listen(10) try: # server loop while True: # wait for the connection of client connection, address = s.accept() # if a client connects, check its blocked time block_t = self.remaining_block_time(address) if block_t == 0: # if no blocked time left, send an accept message to it connection.send(Constants.MEG_ACCEPTED) # start a thread for it self.clients[address] = ServerThread(self, address, connection) self.clients[address].start() else: # if the client is blocked, send back its remaining blocked seconds. connection.send('|'.join([Constants.MSG_BLOCKED, str(block_t)])) # wait for acknowledge connection.recv(Constants.MAX_MSG_LENGTH) # close the connection connection.close() except KeyboardInterrupt: # press ctrl-c to stop the server. self.stop_server() # stop the server def stop_server(self): print 'Stop server...' # disconnect all the clients for address in self.clients.keys(): self.disconnect(address) # disconnect a client def disconnect(self, address): # if the address is present, if address in self.clients: # get the client thread t = self.clients[address] if t.user != '': print 'logout:', t.user try: # send an exit message t.connection.send(Constants.MSG_EXIT) # close the connection t.connection.close() except socket.error: pass # remove its thread del self.clients[address] # returns the remaining blocked time of the client address def remaining_block_time(self, address): # get the ip from the address ip = address[0] # if it is not in the blocked dict, return 0 if ip not in self.blocked_ips: return 0 current_time = time.time() block_time = self.blocked_ips[ip] if current_time - block_time > Constants.BLOCK_TIME: # if the difference exceeds the block time, return 0 return 0 else: # otherwise return the remaining blocked time return Constants.BLOCK_TIME - (current_time - block_time) # blocks the ip of the client def block_client(self, address): # add the ip and blocked time to the blocked dict self.blocked_ips[address[0]] = time.time() # loads usernames and passwords from the password file # return True if success or False otherwise. def load_passwords(self): print 'load users' try: # open the file f = open(Constants.PASSWORD_FILE) # for each line in the file for line in f: # remove leading and trailing spaces line = line.strip() # if the line contains exactly one space if line.count(' ') == 1: # extract the username and password user, pwd = line.split(' ') # add them to the password dict self.passwords[user] = pwd # close the file f.close() return True except IOError: print '[Error] user_pass.txt is missing.' return False # returns True iff the username and password are correct. def verify_user(self, user, password): return user in self.passwords and self.passwords[user] == password # returns a list of online users excluding the current user def who_else(self, current_address): # create an empty list users = [] # for each address of online clients for address in self.clients: # if it is not the address of the current client if address != current_address: # add its username to the list users.append(self.clients[address].user) return users # returns a list of users who logined in the last hour def who_last_hour(self): # get the current time current_time = time.time() # for each user logined, if its last login time is in the last hour, # add it to the list. return [user for user in self.logins if current_time - self.logins[user] <= Constants.SEC_PER_MIN * Constants.LAST_HOUR] # sends a message the a specified user. # returns True iff the user is online. def message(self, from_user, to_user, msg): found = False # add a message header msg = '[' + from_user + ']: ' + msg # for each online client for address in self.clients: t = self.clients[address] # if the target user is found, send the message to him. if t.user == to_user: t.connection.send(msg) found = True if not found: # if the user is not present, add the message to the offline messages if to_user not in self.offline_messages: self.offline_messages[to_user] = [msg] else: self.offline_messages[to_user].append(msg) return found # broadcasts the message to all the users def broadcast(self, from_user, msg, excluding_user=''): # add a message header msg = '[' + from_user + ' broadcast]: ' + msg # for each online client for address in self.clients: # send the message to it if it's not the excluding user. t = self.clients[address] if t.user != excluding_user: t.connection.send(msg) # returns True if the specified user is online def is_online(self, user): found = False # for each online client for address in self.clients: # if the username matches, return True if self.clients[address].user == user: found = True break return found # checks and removes inactive clients def check_inactive_user(self): # loop in background till the server ends while True: print 'check timeout for inactive users' # get the current time current_time = time.time() # for each online client for address in self.clients.keys(): # if its last operation time is earlier than the timeout t = self.clients[address] if current_time - t.last_op_time > Constants.TIME_OUT * 60: # tell other clients self.broadcast('server', t.user + ' logout') print t.user, 'is kicked out' # automatically log him out self.disconnect(t.address) # sleep for a minute and check again time.sleep(Constants.SEC_PER_MIN) # send offline messages def send_offline_messages(self, user, connection): # if the user has offline messages if user in self.offline_messages: # send all the offline messages to him for msg in self.offline_messages[user]: connection.send('[offline message] ' + msg) # delete the messages del self.offline_messages[user] if __name__ == '__main__': if len(sys.argv) == 2: try: # create a server port = int(sys.argv[1]) s = Server(port) # start the server s.start() except ValueError: print '[Error] Invalid port' else: # invalid arguments print '[Error] Usage: python Server.py <port>'
self.connection.send(Constants.MSG_USER_ALREADY_LOGINED)
conditional_block
Server.py
#!/usr/bin/python import socket import sys import threading import time import Constants # Thread to serve clients class ServerThread(threading.Thread): # constructor def __init__(self, server, address, connection): threading.Thread.__init__(self) self.server = server self.address = address self.connection = connection # whether the client has been logined self.login = False # number of failed login attempts self.failed_login_attempts = 0 # the username of the client self.user = '' # time of the last operation self.last_op_time = time.time() # processes commands received from the client def run(self): try: while True: # receive message from the client msg = self.connection.recv(Constants.MAX_MSG_LENGTH) # If no message, break the loop if not msg: break # update the last operation time self.last_op_time = time.time() # if not logined, if not self.login: # process the login of the client self.process_login(msg) else: # process command if self.process_command(msg): # if the client exited, break the loop break except socket.error: pass # close the connection self.connection.close() # processes login command def process_login(self, msg): # if it is a login message ('login|username|password') if msg.startswith(Constants.MSG_LOGIN): # extract username and password cmd, user, password = msg.split('|') print 'login:', user, password # verify the username and password if self.server.verify_user(user, password): if not self.server.is_online(user): # if the user is not online # record his login time self.server.logins[user] = time.time() self.user = user self.login = True # send back a success message self.connection.send(Constants.MSG_SUCCESS) # send offline messages to him self.server.send_offline_messages(user, self.connection) # tell other clients self.server.broadcast('server', user + ' login', user) else: # if the user is already online, send back the message self.connection.send(Constants.MSG_USER_ALREADY_LOGINED) else: # increment the failed times self.failed_login_attempts += 1 # if it exceeds the maximum retry times, if self.failed_login_attempts >= Constants.MAX_LOGIN_ATTEMPTS: # tell the client self.connection.send(Constants.MSG_LOGIN_EXCEED_MAX_TIMES) # block the ip self.server.block_client(self.address) # disconnect the client self.server.disconnect(self.address) return True else: # send back a failed message self.connection.send(Constants.MSG_FAILED) else: # send back a failed message self.connection.send(Constants.MSG_FAILED) return False # processes the command def process_command(self, msg): exited = False if msg == Constants.MSG_EXIT: # client exits exited = True elif msg == Constants.MSG_WHO_ELSE: # send back who else self.connection.send('[who else] ' + ', '.join(self.server.who_else(self.address))) elif msg == Constants.MSG_WHO_LAST_HOUR: # send back who logined in the last hour self.connection.send('[who last hour] ' + ', '.join(self.server.who_last_hour())) elif msg.startswith(Constants.MSG_BROADCAST): # extract the message cmd, msg = msg.split('|', 1) # broadcast the message self.server.broadcast(self.user, msg) elif msg.startswith(Constants.MSG_MESSAGE): # extract the target user and message cmd, user, msg = msg.split('|', 2) # send message to the target user if not self.server.message(self.user, user, msg): if user in self.server.passwords: self.connection.send(user + ' is offline now, and will see the message when login.') else: self.connection.send(user + ' doesn\'t exist.') elif msg == Constants.MSG_LOGOUT: # if the user want to logout, tell the other clients self.server.broadcast('server', self.user + ' logout') # disconnect self.server.disconnect(self.address) exited = True return exited # Server class class Server: # constructor def __init__(self, port): # server port self.port = port # {client address -> client threads} self.clients = {} # {username -> password} self.passwords = {} # {username -> last login time} self.logins = {} # {ip -> blocked time} self.blocked_ips = {} # {username -> [messages]} self.offline_messages = {} # starts the server def start(self): # load the password file, exit if failed. if not self.load_passwords(): return # start a thread to check the timeout for inactive clients. t = threading.Thread(target=self.check_inactive_user) t.setDaemon(True) t.start() # create a server socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # force to reuse the address s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # bind the address s.bind(('127.0.0.1', self.port)) # listen to at most 10 clients s.listen(10) try: # server loop while True: # wait for the connection of client connection, address = s.accept() # if a client connects, check its blocked time block_t = self.remaining_block_time(address) if block_t == 0: # if no blocked time left, send an accept message to it connection.send(Constants.MEG_ACCEPTED) # start a thread for it self.clients[address] = ServerThread(self, address, connection) self.clients[address].start() else: # if the client is blocked, send back its remaining blocked seconds. connection.send('|'.join([Constants.MSG_BLOCKED, str(block_t)])) # wait for acknowledge connection.recv(Constants.MAX_MSG_LENGTH) # close the connection connection.close() except KeyboardInterrupt: # press ctrl-c to stop the server. self.stop_server() # stop the server def
(self): print 'Stop server...' # disconnect all the clients for address in self.clients.keys(): self.disconnect(address) # disconnect a client def disconnect(self, address): # if the address is present, if address in self.clients: # get the client thread t = self.clients[address] if t.user != '': print 'logout:', t.user try: # send an exit message t.connection.send(Constants.MSG_EXIT) # close the connection t.connection.close() except socket.error: pass # remove its thread del self.clients[address] # returns the remaining blocked time of the client address def remaining_block_time(self, address): # get the ip from the address ip = address[0] # if it is not in the blocked dict, return 0 if ip not in self.blocked_ips: return 0 current_time = time.time() block_time = self.blocked_ips[ip] if current_time - block_time > Constants.BLOCK_TIME: # if the difference exceeds the block time, return 0 return 0 else: # otherwise return the remaining blocked time return Constants.BLOCK_TIME - (current_time - block_time) # blocks the ip of the client def block_client(self, address): # add the ip and blocked time to the blocked dict self.blocked_ips[address[0]] = time.time() # loads usernames and passwords from the password file # return True if success or False otherwise. def load_passwords(self): print 'load users' try: # open the file f = open(Constants.PASSWORD_FILE) # for each line in the file for line in f: # remove leading and trailing spaces line = line.strip() # if the line contains exactly one space if line.count(' ') == 1: # extract the username and password user, pwd = line.split(' ') # add them to the password dict self.passwords[user] = pwd # close the file f.close() return True except IOError: print '[Error] user_pass.txt is missing.' return False # returns True iff the username and password are correct. def verify_user(self, user, password): return user in self.passwords and self.passwords[user] == password # returns a list of online users excluding the current user def who_else(self, current_address): # create an empty list users = [] # for each address of online clients for address in self.clients: # if it is not the address of the current client if address != current_address: # add its username to the list users.append(self.clients[address].user) return users # returns a list of users who logined in the last hour def who_last_hour(self): # get the current time current_time = time.time() # for each user logined, if its last login time is in the last hour, # add it to the list. return [user for user in self.logins if current_time - self.logins[user] <= Constants.SEC_PER_MIN * Constants.LAST_HOUR] # sends a message the a specified user. # returns True iff the user is online. def message(self, from_user, to_user, msg): found = False # add a message header msg = '[' + from_user + ']: ' + msg # for each online client for address in self.clients: t = self.clients[address] # if the target user is found, send the message to him. if t.user == to_user: t.connection.send(msg) found = True if not found: # if the user is not present, add the message to the offline messages if to_user not in self.offline_messages: self.offline_messages[to_user] = [msg] else: self.offline_messages[to_user].append(msg) return found # broadcasts the message to all the users def broadcast(self, from_user, msg, excluding_user=''): # add a message header msg = '[' + from_user + ' broadcast]: ' + msg # for each online client for address in self.clients: # send the message to it if it's not the excluding user. t = self.clients[address] if t.user != excluding_user: t.connection.send(msg) # returns True if the specified user is online def is_online(self, user): found = False # for each online client for address in self.clients: # if the username matches, return True if self.clients[address].user == user: found = True break return found # checks and removes inactive clients def check_inactive_user(self): # loop in background till the server ends while True: print 'check timeout for inactive users' # get the current time current_time = time.time() # for each online client for address in self.clients.keys(): # if its last operation time is earlier than the timeout t = self.clients[address] if current_time - t.last_op_time > Constants.TIME_OUT * 60: # tell other clients self.broadcast('server', t.user + ' logout') print t.user, 'is kicked out' # automatically log him out self.disconnect(t.address) # sleep for a minute and check again time.sleep(Constants.SEC_PER_MIN) # send offline messages def send_offline_messages(self, user, connection): # if the user has offline messages if user in self.offline_messages: # send all the offline messages to him for msg in self.offline_messages[user]: connection.send('[offline message] ' + msg) # delete the messages del self.offline_messages[user] if __name__ == '__main__': if len(sys.argv) == 2: try: # create a server port = int(sys.argv[1]) s = Server(port) # start the server s.start() except ValueError: print '[Error] Invalid port' else: # invalid arguments print '[Error] Usage: python Server.py <port>'
stop_server
identifier_name
train_stage1.py
#!/usr/bin/python # -*- encoding: utf-8 -*- import os, sys, random, warnings import torch import argparse from renderer import Estimator3D from datasets import LP_Dataset, FirstStageDataset from logger import TrainStage1Logger from torch.utils.data import DataLoader from torch.autograd import Variable import torch.autograd as autograd from torchvision.utils import make_grid import torch.nn.functional as F import numpy as np from torch.nn.parallel import DistributedDataParallel as DDP import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.nn as nn import torch.optim import torch.multiprocessing as mp import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel from faceParsing.model import BiSeNet from face_backbone import IR_SE_50 logger = TrainStage1Logger('./logs_stage1') def train(models, criterions, optimizer, scheduler, train_loader, val_loader, epoch, args): landmark_weight = torch.cat([torch.ones((1,28)),20*torch.ones((1,3)),torch.ones((1,6)),torch.ones((1,12))*5, torch.ones((1,11)), 20*torch.ones((1,8))], dim = 1).cuda() mean = torch.FloatTensor([0.485, 0.456, 0.406]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) std = torch.FloatTensor([0.229, 0.224, 0.225]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) mean_f = torch.FloatTensor([0.5, 0.5, 0.5]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) std_f = torch.FloatTensor([0.5, 0.5, 0.5]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) for i, (occluded, img, lmk, flag) in enumerate(train_loader): optimizer.zero_grad() # Configure model input occluded = Variable(occluded.type(torch.cuda.FloatTensor), requires_grad=False).cuda() img = Variable(img.type(torch.cuda.FloatTensor), requires_grad=False).cuda() lmk = Variable(lmk.type(torch.cuda.FloatTensor), requires_grad=False).cuda() flag = Variable(flag.type(torch.cuda.FloatTensor), requires_grad=False).cuda() coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...]) rendered, landmark, reg_loss, gamma_loss = models['3D'].reconstruct(coef) rendered = rendered.permute(0,3,1,2).contiguous()[:,[2,1,0],:,:] # pose_loss = criterions['L1'](angles, ang) align_loss = torch.sum(torch.sum(torch.square(landmark-lmk), dim=2)*flag*landmark_weight, dim=1) / 68.0 align_loss = torch.sum(align_loss) / lmk.shape[0] # coefficients regularization 1.7e-3 coef_loss = torch.norm(coef[...,:80]) + 0.1*torch.norm(coef[...,80:144]) + 1.7e-3*torch.norm(coef[...,144:224]) # For skin parsing_input = F.interpolate((img-mean)/std, (512,512)) parsed = models['Seg'](parsing_input) parsed = F.interpolate(parsed, (224,224)) parsed = torch.argmax(parsed, dim=1, keepdim=True) mask = torch.zeros_like(parsed, dtype=torch.float32).cuda() # skin 1, nose 2, eye_glass 3, r_eye 4, l_eye 5, r_brow 6, l_brow 7, r_ear 8, l_ear 9, # inner_mouth 10, u_lip 11, l_lip 12, hair 13 indices = ((parsed>=1).type(torch.BoolTensor) & (parsed<=7).type(torch.BoolTensor) & (parsed!=3).type(torch.BoolTensor)) \ | ((parsed>=11).type(torch.BoolTensor) & (parsed<=12).type(torch.BoolTensor)) mask[indices] = 1.0 # Get vector mask rendered_noise = torch.mean(rendered, dim=1, keepdim=True) > 0.0 vector = torch.zeros_like(rendered_noise, dtype=img.dtype).cuda() vector[rendered_noise] = 1.0 # Synthesize background rendered = img*(1.-vector) + rendered*vector # Perceptual loss affined_r = F.interpolate(rendered[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True) affined_i = F.interpolate(img[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True) emb_r = models['face']((affined_r-mean_f)/std_f) emb_i = models['face']((affined_i-mean_f)/std_f) id_loss = torch.mean(1. - criterions['Cos'](emb_r, emb_i)) # Reconstruction loss rec_loss = torch.sum(torch.abs(img - rendered), dim=1)*mask rec_loss = torch.sum(rec_loss) / torch.sum(mask) total_loss = coef_loss*1e-4 + rec_loss*0.01 + reg_loss*0.25 + align_loss*0.007 + gamma_loss*10.0 + id_loss*0.15 total_loss.backward() optimizer.step() # logging if torch.distributed.get_rank() == 0: scheduler.step() total_iteration = len(train_loader) * epoch + i logger.log_training(coef_loss.item(), rec_loss.item(), reg_loss.item(), align_loss.item(), id_loss.item(), total_iteration) if total_iteration % 250 == 0: rendered_grid = make_grid(rendered, nrow=args.batch_size//2, normalize=True) lmk = lmk.type(torch.LongTensor) landmark = landmark.type(torch.LongTensor) color1 = torch.FloatTensor([1.0,0.0,0.0]).unsqueeze(-1).unsqueeze(-1) color2 = torch.FloatTensor([0.0,0.0,1.0]).unsqueeze(-1).unsqueeze(-1) for b in range(img.size(0)): for l in range(68): occluded[b, :, lmk[b,l,1]-2:lmk[b,l,1]+2, lmk[b,l,0]-2:lmk[b,l,0]+2] = color1 occluded[b, :, landmark[b,l,1]-2:landmark[b,l,1]+2, landmark[b,l,0]-2:landmark[b,l,0]+2] = color2 input_grid = make_grid(occluded, nrow=args.batch_size//2, normalize=False) logger.log_train_image(input_grid, rendered_grid, total_iteration) sys.stdout.write('\r[Epoch %d/%d][Iter %d/%d][Total_iter %d]' % (epoch, args.epochs, i, len(train_loader), total_iteration)) if i!=0 and total_iteration % args.val_iters == 0: error = validate(models, val_loader, epoch, args) logger.log_validation(error, epoch) torch.save(models['3D'].regressor.module.state_dict(), args.save_path+"/reg_it%d_%.4f_stage1.pth" % (total_iteration, error)) def validate(models, val_loader, epoch, args): with torch.no_grad(): align_error = 0.0 for i, (occluded, lmk) in enumerate(val_loader): print('\rval %d...' % (i+1), end='') occluded = Variable(occluded.type(torch.cuda.FloatTensor)).cuda() lmk = Variable(lmk.type(torch.cuda.FloatTensor)).cuda() coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...]) _, landmark = models['3D'].reconstruct(coef, test=True) align_error += torch.mean(torch.abs(landmark - lmk)) align_error /= len(val_loader) return align_error def
(args): if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') if args.gpu is not None: warnings.warn('You have chosen a specific GPU. This will completely ' 'disable data parallelism.') if args.dist_url == "env://" and args.world_size == -1: args.world_size = int(os.environ["WORLD_SIZE"]) args.distributed = args.world_size > 1 or args.multiprocessing_distributed ngpus_per_node = torch.cuda.device_count() if args.multiprocessing_distributed: # Since we have ngpus_per_node processes per node, the total world_size # needs to be adjusted accordingly args.world_size = ngpus_per_node * args.world_size # Use torch.multiprocessing.spawn to launch distributed processes: the # main_worker process function mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) else: # Simply call main_worker function main_worker(args.gpu, ngpus_per_node, args) def main_worker(gpu, ngpus_per_node, args): args.gpu = gpu if args.gpu is not None: print("Use GPU: {} for training".format(args.gpu)) if args.distributed: if args.dist_url == "env://" and args.rank == -1: args.rank = int(os.environ["RANK"]) if args.multiprocessing_distributed: # For multiprocessing distributed training, rank needs to be the # global rank among all the processes args.rank = args.rank * ngpus_per_node + gpu print('rank', args.rank) dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) torch.cuda.set_device(args.gpu) # Load models estimator3d = Estimator3D(is_cuda=True, batch_size=args.batch_size, model_path=args.checkpoint, test=False, back_white=False, device_id=args.gpu) estimator3d.regressor.cuda(args.gpu) parsing_net = BiSeNet(n_classes=19) parsing_net.cuda(args.gpu) parsing_net.load_state_dict(torch.load('faceParsing/model_final_diss.pth', map_location='cuda:'+str(args.gpu))) parsing_net.eval() face_encoder = IR_SE_50([112,112]) face_encoder.load_state_dict(torch.load('saved_models/face_res_50.pth', map_location='cuda:'+str(args.gpu))) face_encoder.cuda(args.gpu) face_encoder.eval() args.batch_size = int(args.batch_size / ngpus_per_node) args.workers = int(args.workers / ngpus_per_node) estimator3d.regressor = DDP(estimator3d.regressor, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True) parsing_net = DDP(parsing_net, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True) face_encoder = DDP(face_encoder, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True) models = {} models['3D'] = estimator3d models['Seg'] = parsing_net models['face'] = face_encoder # Losses criterions = {} criterions['L2'] = torch.nn.MSELoss().cuda(args.gpu) criterions['L1'] = torch.nn.L1Loss().cuda(args.gpu) criterions['Cos'] = torch.nn.CosineSimilarity().cuda(args.gpu) cudnn.benchmark = True dataset = FirstStageDataset(occ_path=args.train_data_path + '/occluded', \ img_path=args.train_data_path + '/ori_img', \ lmk_path=args.train_data_path + '/landmarks') if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) else: train_sampler = None train_loader = DataLoader( dataset, batch_size = args.batch_size, shuffle = (train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True ) val_dataset = LP_Dataset(args.val_data_path+'/occluded', args.val_data_path+'/landmarks') val_loader = DataLoader( val_dataset, batch_size = args.batch_size, shuffle = False, drop_last=True, num_workers=args.workers, pin_memory=True ) optimizer = torch.optim.AdamW(estimator3d.regressor.parameters(), lr=args.lr, betas=(0.5,0.999)) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs*(len(train_loader))) print(len(train_loader)) for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) train(models, criterions, optimizer, scheduler, train_loader, val_loader, epoch, args) if torch.distributed.get_rank() == 0: error = validate(models, val_loader, epoch, args) logger.log_validation(error, epoch) torch.save(estimator3d.regressor.module.state_dict(), args.save_path+"/reg_ep%d_%.4f_stage1.pth" % (epoch+1, error)) if __name__=='__main__': parser = argparse.ArgumentParser(description='Occlusion robust 3D face reconstruction') parser.add_argument('--train_data_path', required=True, help='path containing training data folders') parser.add_argument('--val_data_path', required=True, type=str, help='path containing validation data folders') parser.add_argument('--flag', default=None, type=str, help='flag prepended to filenames') parser.add_argument('--save_path', default='saved_models', help='path to save checkpoints') parser.add_argument('--checkpoint', default=None, type=str, help='path to resume checkpoint') parser.add_argument('--start_epoch', default=0, type=int, metavar='N') parser.add_argument('-j', '--workers', default=8, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=50, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--val_iters', default=5000, type=int, metavar='N') parser.add_argument('-b', '--batch-size', default=64, type=int, metavar='N') parser.add_argument('--lr', '--learning-rate', default=1e-5, type=float, metavar='LR', help='initial learning rate', dest='lr') parser.add_argument('--world-size', default=1, type=int, help='number of nodes for distributed training') parser.add_argument('--rank', default=0, type=int, help='node rank for distributed training') parser.add_argument('--dist-url', default='tcp://127.0.0.1:29500', type=str, help='url used to set up distributed training') parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend') parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.') parser.add_argument('--multiprocessing-distributed', action='store_true', default=True, help='Use multi-processing distributed training to launch ' 'N processes per node, which has N GPUs. This is the ' 'fastest way to use PyTorch for either single node or ' 'multi node data parallel training') args = parser.parse_args() main(args)
main
identifier_name
train_stage1.py
#!/usr/bin/python # -*- encoding: utf-8 -*- import os, sys, random, warnings import torch import argparse from renderer import Estimator3D from datasets import LP_Dataset, FirstStageDataset from logger import TrainStage1Logger from torch.utils.data import DataLoader from torch.autograd import Variable import torch.autograd as autograd from torchvision.utils import make_grid import torch.nn.functional as F import numpy as np from torch.nn.parallel import DistributedDataParallel as DDP import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.nn as nn import torch.optim import torch.multiprocessing as mp import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel from faceParsing.model import BiSeNet from face_backbone import IR_SE_50 logger = TrainStage1Logger('./logs_stage1') def train(models, criterions, optimizer, scheduler, train_loader, val_loader, epoch, args): landmark_weight = torch.cat([torch.ones((1,28)),20*torch.ones((1,3)),torch.ones((1,6)),torch.ones((1,12))*5, torch.ones((1,11)), 20*torch.ones((1,8))], dim = 1).cuda() mean = torch.FloatTensor([0.485, 0.456, 0.406]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) std = torch.FloatTensor([0.229, 0.224, 0.225]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) mean_f = torch.FloatTensor([0.5, 0.5, 0.5]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) std_f = torch.FloatTensor([0.5, 0.5, 0.5]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) for i, (occluded, img, lmk, flag) in enumerate(train_loader): optimizer.zero_grad() # Configure model input occluded = Variable(occluded.type(torch.cuda.FloatTensor), requires_grad=False).cuda() img = Variable(img.type(torch.cuda.FloatTensor), requires_grad=False).cuda() lmk = Variable(lmk.type(torch.cuda.FloatTensor), requires_grad=False).cuda() flag = Variable(flag.type(torch.cuda.FloatTensor), requires_grad=False).cuda() coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...]) rendered, landmark, reg_loss, gamma_loss = models['3D'].reconstruct(coef) rendered = rendered.permute(0,3,1,2).contiguous()[:,[2,1,0],:,:] # pose_loss = criterions['L1'](angles, ang) align_loss = torch.sum(torch.sum(torch.square(landmark-lmk), dim=2)*flag*landmark_weight, dim=1) / 68.0 align_loss = torch.sum(align_loss) / lmk.shape[0] # coefficients regularization 1.7e-3 coef_loss = torch.norm(coef[...,:80]) + 0.1*torch.norm(coef[...,80:144]) + 1.7e-3*torch.norm(coef[...,144:224]) # For skin parsing_input = F.interpolate((img-mean)/std, (512,512)) parsed = models['Seg'](parsing_input) parsed = F.interpolate(parsed, (224,224)) parsed = torch.argmax(parsed, dim=1, keepdim=True) mask = torch.zeros_like(parsed, dtype=torch.float32).cuda() # skin 1, nose 2, eye_glass 3, r_eye 4, l_eye 5, r_brow 6, l_brow 7, r_ear 8, l_ear 9, # inner_mouth 10, u_lip 11, l_lip 12, hair 13 indices = ((parsed>=1).type(torch.BoolTensor) & (parsed<=7).type(torch.BoolTensor) & (parsed!=3).type(torch.BoolTensor)) \ | ((parsed>=11).type(torch.BoolTensor) & (parsed<=12).type(torch.BoolTensor)) mask[indices] = 1.0 # Get vector mask rendered_noise = torch.mean(rendered, dim=1, keepdim=True) > 0.0 vector = torch.zeros_like(rendered_noise, dtype=img.dtype).cuda() vector[rendered_noise] = 1.0 # Synthesize background rendered = img*(1.-vector) + rendered*vector # Perceptual loss affined_r = F.interpolate(rendered[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True) affined_i = F.interpolate(img[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True) emb_r = models['face']((affined_r-mean_f)/std_f) emb_i = models['face']((affined_i-mean_f)/std_f) id_loss = torch.mean(1. - criterions['Cos'](emb_r, emb_i)) # Reconstruction loss rec_loss = torch.sum(torch.abs(img - rendered), dim=1)*mask rec_loss = torch.sum(rec_loss) / torch.sum(mask) total_loss = coef_loss*1e-4 + rec_loss*0.01 + reg_loss*0.25 + align_loss*0.007 + gamma_loss*10.0 + id_loss*0.15 total_loss.backward() optimizer.step() # logging if torch.distributed.get_rank() == 0: scheduler.step() total_iteration = len(train_loader) * epoch + i logger.log_training(coef_loss.item(), rec_loss.item(), reg_loss.item(), align_loss.item(), id_loss.item(), total_iteration) if total_iteration % 250 == 0: rendered_grid = make_grid(rendered, nrow=args.batch_size//2, normalize=True) lmk = lmk.type(torch.LongTensor) landmark = landmark.type(torch.LongTensor) color1 = torch.FloatTensor([1.0,0.0,0.0]).unsqueeze(-1).unsqueeze(-1) color2 = torch.FloatTensor([0.0,0.0,1.0]).unsqueeze(-1).unsqueeze(-1) for b in range(img.size(0)): for l in range(68): occluded[b, :, lmk[b,l,1]-2:lmk[b,l,1]+2, lmk[b,l,0]-2:lmk[b,l,0]+2] = color1 occluded[b, :, landmark[b,l,1]-2:landmark[b,l,1]+2, landmark[b,l,0]-2:landmark[b,l,0]+2] = color2 input_grid = make_grid(occluded, nrow=args.batch_size//2, normalize=False) logger.log_train_image(input_grid, rendered_grid, total_iteration) sys.stdout.write('\r[Epoch %d/%d][Iter %d/%d][Total_iter %d]' % (epoch, args.epochs, i, len(train_loader), total_iteration)) if i!=0 and total_iteration % args.val_iters == 0: error = validate(models, val_loader, epoch, args) logger.log_validation(error, epoch) torch.save(models['3D'].regressor.module.state_dict(), args.save_path+"/reg_it%d_%.4f_stage1.pth" % (total_iteration, error)) def validate(models, val_loader, epoch, args): with torch.no_grad(): align_error = 0.0 for i, (occluded, lmk) in enumerate(val_loader): print('\rval %d...' % (i+1), end='') occluded = Variable(occluded.type(torch.cuda.FloatTensor)).cuda() lmk = Variable(lmk.type(torch.cuda.FloatTensor)).cuda() coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...]) _, landmark = models['3D'].reconstruct(coef, test=True) align_error += torch.mean(torch.abs(landmark - lmk)) align_error /= len(val_loader) return align_error def main(args): if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') if args.gpu is not None: warnings.warn('You have chosen a specific GPU. This will completely ' 'disable data parallelism.') if args.dist_url == "env://" and args.world_size == -1: args.world_size = int(os.environ["WORLD_SIZE"]) args.distributed = args.world_size > 1 or args.multiprocessing_distributed ngpus_per_node = torch.cuda.device_count() if args.multiprocessing_distributed: # Since we have ngpus_per_node processes per node, the total world_size # needs to be adjusted accordingly args.world_size = ngpus_per_node * args.world_size # Use torch.multiprocessing.spawn to launch distributed processes: the # main_worker process function mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) else: # Simply call main_worker function main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args): args.gpu = gpu if args.gpu is not None: print("Use GPU: {} for training".format(args.gpu)) if args.distributed: if args.dist_url == "env://" and args.rank == -1: args.rank = int(os.environ["RANK"]) if args.multiprocessing_distributed: # For multiprocessing distributed training, rank needs to be the # global rank among all the processes args.rank = args.rank * ngpus_per_node + gpu print('rank', args.rank) dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) torch.cuda.set_device(args.gpu) # Load models estimator3d = Estimator3D(is_cuda=True, batch_size=args.batch_size, model_path=args.checkpoint, test=False, back_white=False, device_id=args.gpu) estimator3d.regressor.cuda(args.gpu) parsing_net = BiSeNet(n_classes=19) parsing_net.cuda(args.gpu) parsing_net.load_state_dict(torch.load('faceParsing/model_final_diss.pth', map_location='cuda:'+str(args.gpu))) parsing_net.eval() face_encoder = IR_SE_50([112,112]) face_encoder.load_state_dict(torch.load('saved_models/face_res_50.pth', map_location='cuda:'+str(args.gpu))) face_encoder.cuda(args.gpu) face_encoder.eval() args.batch_size = int(args.batch_size / ngpus_per_node) args.workers = int(args.workers / ngpus_per_node) estimator3d.regressor = DDP(estimator3d.regressor, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True) parsing_net = DDP(parsing_net, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True) face_encoder = DDP(face_encoder, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True) models = {} models['3D'] = estimator3d models['Seg'] = parsing_net models['face'] = face_encoder # Losses criterions = {} criterions['L2'] = torch.nn.MSELoss().cuda(args.gpu) criterions['L1'] = torch.nn.L1Loss().cuda(args.gpu) criterions['Cos'] = torch.nn.CosineSimilarity().cuda(args.gpu) cudnn.benchmark = True dataset = FirstStageDataset(occ_path=args.train_data_path + '/occluded', \ img_path=args.train_data_path + '/ori_img', \ lmk_path=args.train_data_path + '/landmarks') if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) else: train_sampler = None train_loader = DataLoader( dataset, batch_size = args.batch_size, shuffle = (train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True ) val_dataset = LP_Dataset(args.val_data_path+'/occluded', args.val_data_path+'/landmarks') val_loader = DataLoader( val_dataset, batch_size = args.batch_size, shuffle = False, drop_last=True, num_workers=args.workers, pin_memory=True ) optimizer = torch.optim.AdamW(estimator3d.regressor.parameters(), lr=args.lr, betas=(0.5,0.999)) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs*(len(train_loader))) print(len(train_loader)) for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) train(models, criterions, optimizer, scheduler, train_loader, val_loader, epoch, args) if torch.distributed.get_rank() == 0: error = validate(models, val_loader, epoch, args) logger.log_validation(error, epoch) torch.save(estimator3d.regressor.module.state_dict(), args.save_path+"/reg_ep%d_%.4f_stage1.pth" % (epoch+1, error)) if __name__=='__main__': parser = argparse.ArgumentParser(description='Occlusion robust 3D face reconstruction') parser.add_argument('--train_data_path', required=True, help='path containing training data folders') parser.add_argument('--val_data_path', required=True, type=str, help='path containing validation data folders') parser.add_argument('--flag', default=None, type=str, help='flag prepended to filenames') parser.add_argument('--save_path', default='saved_models', help='path to save checkpoints') parser.add_argument('--checkpoint', default=None, type=str, help='path to resume checkpoint') parser.add_argument('--start_epoch', default=0, type=int, metavar='N') parser.add_argument('-j', '--workers', default=8, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=50, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--val_iters', default=5000, type=int, metavar='N') parser.add_argument('-b', '--batch-size', default=64, type=int, metavar='N') parser.add_argument('--lr', '--learning-rate', default=1e-5, type=float, metavar='LR', help='initial learning rate', dest='lr') parser.add_argument('--world-size', default=1, type=int, help='number of nodes for distributed training') parser.add_argument('--rank', default=0, type=int, help='node rank for distributed training') parser.add_argument('--dist-url', default='tcp://127.0.0.1:29500', type=str, help='url used to set up distributed training') parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend') parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.') parser.add_argument('--multiprocessing-distributed', action='store_true', default=True, help='Use multi-processing distributed training to launch ' 'N processes per node, which has N GPUs. This is the ' 'fastest way to use PyTorch for either single node or ' 'multi node data parallel training') args = parser.parse_args() main(args)
random_line_split
train_stage1.py
#!/usr/bin/python # -*- encoding: utf-8 -*- import os, sys, random, warnings import torch import argparse from renderer import Estimator3D from datasets import LP_Dataset, FirstStageDataset from logger import TrainStage1Logger from torch.utils.data import DataLoader from torch.autograd import Variable import torch.autograd as autograd from torchvision.utils import make_grid import torch.nn.functional as F import numpy as np from torch.nn.parallel import DistributedDataParallel as DDP import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.nn as nn import torch.optim import torch.multiprocessing as mp import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel from faceParsing.model import BiSeNet from face_backbone import IR_SE_50 logger = TrainStage1Logger('./logs_stage1') def train(models, criterions, optimizer, scheduler, train_loader, val_loader, epoch, args): landmark_weight = torch.cat([torch.ones((1,28)),20*torch.ones((1,3)),torch.ones((1,6)),torch.ones((1,12))*5, torch.ones((1,11)), 20*torch.ones((1,8))], dim = 1).cuda() mean = torch.FloatTensor([0.485, 0.456, 0.406]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) std = torch.FloatTensor([0.229, 0.224, 0.225]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) mean_f = torch.FloatTensor([0.5, 0.5, 0.5]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) std_f = torch.FloatTensor([0.5, 0.5, 0.5]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) for i, (occluded, img, lmk, flag) in enumerate(train_loader): optimizer.zero_grad() # Configure model input occluded = Variable(occluded.type(torch.cuda.FloatTensor), requires_grad=False).cuda() img = Variable(img.type(torch.cuda.FloatTensor), requires_grad=False).cuda() lmk = Variable(lmk.type(torch.cuda.FloatTensor), requires_grad=False).cuda() flag = Variable(flag.type(torch.cuda.FloatTensor), requires_grad=False).cuda() coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...]) rendered, landmark, reg_loss, gamma_loss = models['3D'].reconstruct(coef) rendered = rendered.permute(0,3,1,2).contiguous()[:,[2,1,0],:,:] # pose_loss = criterions['L1'](angles, ang) align_loss = torch.sum(torch.sum(torch.square(landmark-lmk), dim=2)*flag*landmark_weight, dim=1) / 68.0 align_loss = torch.sum(align_loss) / lmk.shape[0] # coefficients regularization 1.7e-3 coef_loss = torch.norm(coef[...,:80]) + 0.1*torch.norm(coef[...,80:144]) + 1.7e-3*torch.norm(coef[...,144:224]) # For skin parsing_input = F.interpolate((img-mean)/std, (512,512)) parsed = models['Seg'](parsing_input) parsed = F.interpolate(parsed, (224,224)) parsed = torch.argmax(parsed, dim=1, keepdim=True) mask = torch.zeros_like(parsed, dtype=torch.float32).cuda() # skin 1, nose 2, eye_glass 3, r_eye 4, l_eye 5, r_brow 6, l_brow 7, r_ear 8, l_ear 9, # inner_mouth 10, u_lip 11, l_lip 12, hair 13 indices = ((parsed>=1).type(torch.BoolTensor) & (parsed<=7).type(torch.BoolTensor) & (parsed!=3).type(torch.BoolTensor)) \ | ((parsed>=11).type(torch.BoolTensor) & (parsed<=12).type(torch.BoolTensor)) mask[indices] = 1.0 # Get vector mask rendered_noise = torch.mean(rendered, dim=1, keepdim=True) > 0.0 vector = torch.zeros_like(rendered_noise, dtype=img.dtype).cuda() vector[rendered_noise] = 1.0 # Synthesize background rendered = img*(1.-vector) + rendered*vector # Perceptual loss affined_r = F.interpolate(rendered[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True) affined_i = F.interpolate(img[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True) emb_r = models['face']((affined_r-mean_f)/std_f) emb_i = models['face']((affined_i-mean_f)/std_f) id_loss = torch.mean(1. - criterions['Cos'](emb_r, emb_i)) # Reconstruction loss rec_loss = torch.sum(torch.abs(img - rendered), dim=1)*mask rec_loss = torch.sum(rec_loss) / torch.sum(mask) total_loss = coef_loss*1e-4 + rec_loss*0.01 + reg_loss*0.25 + align_loss*0.007 + gamma_loss*10.0 + id_loss*0.15 total_loss.backward() optimizer.step() # logging if torch.distributed.get_rank() == 0: scheduler.step() total_iteration = len(train_loader) * epoch + i logger.log_training(coef_loss.item(), rec_loss.item(), reg_loss.item(), align_loss.item(), id_loss.item(), total_iteration) if total_iteration % 250 == 0: rendered_grid = make_grid(rendered, nrow=args.batch_size//2, normalize=True) lmk = lmk.type(torch.LongTensor) landmark = landmark.type(torch.LongTensor) color1 = torch.FloatTensor([1.0,0.0,0.0]).unsqueeze(-1).unsqueeze(-1) color2 = torch.FloatTensor([0.0,0.0,1.0]).unsqueeze(-1).unsqueeze(-1) for b in range(img.size(0)): for l in range(68): occluded[b, :, lmk[b,l,1]-2:lmk[b,l,1]+2, lmk[b,l,0]-2:lmk[b,l,0]+2] = color1 occluded[b, :, landmark[b,l,1]-2:landmark[b,l,1]+2, landmark[b,l,0]-2:landmark[b,l,0]+2] = color2 input_grid = make_grid(occluded, nrow=args.batch_size//2, normalize=False) logger.log_train_image(input_grid, rendered_grid, total_iteration) sys.stdout.write('\r[Epoch %d/%d][Iter %d/%d][Total_iter %d]' % (epoch, args.epochs, i, len(train_loader), total_iteration)) if i!=0 and total_iteration % args.val_iters == 0: error = validate(models, val_loader, epoch, args) logger.log_validation(error, epoch) torch.save(models['3D'].regressor.module.state_dict(), args.save_path+"/reg_it%d_%.4f_stage1.pth" % (total_iteration, error)) def validate(models, val_loader, epoch, args): with torch.no_grad(): align_error = 0.0 for i, (occluded, lmk) in enumerate(val_loader): print('\rval %d...' % (i+1), end='') occluded = Variable(occluded.type(torch.cuda.FloatTensor)).cuda() lmk = Variable(lmk.type(torch.cuda.FloatTensor)).cuda() coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...]) _, landmark = models['3D'].reconstruct(coef, test=True) align_error += torch.mean(torch.abs(landmark - lmk)) align_error /= len(val_loader) return align_error def main(args): if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') if args.gpu is not None: warnings.warn('You have chosen a specific GPU. This will completely ' 'disable data parallelism.') if args.dist_url == "env://" and args.world_size == -1: args.world_size = int(os.environ["WORLD_SIZE"]) args.distributed = args.world_size > 1 or args.multiprocessing_distributed ngpus_per_node = torch.cuda.device_count() if args.multiprocessing_distributed: # Since we have ngpus_per_node processes per node, the total world_size # needs to be adjusted accordingly args.world_size = ngpus_per_node * args.world_size # Use torch.multiprocessing.spawn to launch distributed processes: the # main_worker process function mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) else: # Simply call main_worker function main_worker(args.gpu, ngpus_per_node, args) def main_worker(gpu, ngpus_per_node, args): args.gpu = gpu if args.gpu is not None: print("Use GPU: {} for training".format(args.gpu)) if args.distributed: if args.dist_url == "env://" and args.rank == -1: args.rank = int(os.environ["RANK"]) if args.multiprocessing_distributed: # For multiprocessing distributed training, rank needs to be the # global rank among all the processes args.rank = args.rank * ngpus_per_node + gpu print('rank', args.rank) dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) torch.cuda.set_device(args.gpu) # Load models estimator3d = Estimator3D(is_cuda=True, batch_size=args.batch_size, model_path=args.checkpoint, test=False, back_white=False, device_id=args.gpu) estimator3d.regressor.cuda(args.gpu) parsing_net = BiSeNet(n_classes=19) parsing_net.cuda(args.gpu) parsing_net.load_state_dict(torch.load('faceParsing/model_final_diss.pth', map_location='cuda:'+str(args.gpu))) parsing_net.eval() face_encoder = IR_SE_50([112,112]) face_encoder.load_state_dict(torch.load('saved_models/face_res_50.pth', map_location='cuda:'+str(args.gpu))) face_encoder.cuda(args.gpu) face_encoder.eval() args.batch_size = int(args.batch_size / ngpus_per_node) args.workers = int(args.workers / ngpus_per_node) estimator3d.regressor = DDP(estimator3d.regressor, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True) parsing_net = DDP(parsing_net, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True) face_encoder = DDP(face_encoder, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True) models = {} models['3D'] = estimator3d models['Seg'] = parsing_net models['face'] = face_encoder # Losses criterions = {} criterions['L2'] = torch.nn.MSELoss().cuda(args.gpu) criterions['L1'] = torch.nn.L1Loss().cuda(args.gpu) criterions['Cos'] = torch.nn.CosineSimilarity().cuda(args.gpu) cudnn.benchmark = True dataset = FirstStageDataset(occ_path=args.train_data_path + '/occluded', \ img_path=args.train_data_path + '/ori_img', \ lmk_path=args.train_data_path + '/landmarks') if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) else: train_sampler = None train_loader = DataLoader( dataset, batch_size = args.batch_size, shuffle = (train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True ) val_dataset = LP_Dataset(args.val_data_path+'/occluded', args.val_data_path+'/landmarks') val_loader = DataLoader( val_dataset, batch_size = args.batch_size, shuffle = False, drop_last=True, num_workers=args.workers, pin_memory=True ) optimizer = torch.optim.AdamW(estimator3d.regressor.parameters(), lr=args.lr, betas=(0.5,0.999)) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs*(len(train_loader))) print(len(train_loader)) for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) train(models, criterions, optimizer, scheduler, train_loader, val_loader, epoch, args) if torch.distributed.get_rank() == 0:
if __name__=='__main__': parser = argparse.ArgumentParser(description='Occlusion robust 3D face reconstruction') parser.add_argument('--train_data_path', required=True, help='path containing training data folders') parser.add_argument('--val_data_path', required=True, type=str, help='path containing validation data folders') parser.add_argument('--flag', default=None, type=str, help='flag prepended to filenames') parser.add_argument('--save_path', default='saved_models', help='path to save checkpoints') parser.add_argument('--checkpoint', default=None, type=str, help='path to resume checkpoint') parser.add_argument('--start_epoch', default=0, type=int, metavar='N') parser.add_argument('-j', '--workers', default=8, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=50, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--val_iters', default=5000, type=int, metavar='N') parser.add_argument('-b', '--batch-size', default=64, type=int, metavar='N') parser.add_argument('--lr', '--learning-rate', default=1e-5, type=float, metavar='LR', help='initial learning rate', dest='lr') parser.add_argument('--world-size', default=1, type=int, help='number of nodes for distributed training') parser.add_argument('--rank', default=0, type=int, help='node rank for distributed training') parser.add_argument('--dist-url', default='tcp://127.0.0.1:29500', type=str, help='url used to set up distributed training') parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend') parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.') parser.add_argument('--multiprocessing-distributed', action='store_true', default=True, help='Use multi-processing distributed training to launch ' 'N processes per node, which has N GPUs. This is the ' 'fastest way to use PyTorch for either single node or ' 'multi node data parallel training') args = parser.parse_args() main(args)
error = validate(models, val_loader, epoch, args) logger.log_validation(error, epoch) torch.save(estimator3d.regressor.module.state_dict(), args.save_path+"/reg_ep%d_%.4f_stage1.pth" % (epoch+1, error))
conditional_block
train_stage1.py
#!/usr/bin/python # -*- encoding: utf-8 -*- import os, sys, random, warnings import torch import argparse from renderer import Estimator3D from datasets import LP_Dataset, FirstStageDataset from logger import TrainStage1Logger from torch.utils.data import DataLoader from torch.autograd import Variable import torch.autograd as autograd from torchvision.utils import make_grid import torch.nn.functional as F import numpy as np from torch.nn.parallel import DistributedDataParallel as DDP import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.nn as nn import torch.optim import torch.multiprocessing as mp import torch.utils.data import torch.utils.data.distributed import torch.nn.parallel from faceParsing.model import BiSeNet from face_backbone import IR_SE_50 logger = TrainStage1Logger('./logs_stage1') def train(models, criterions, optimizer, scheduler, train_loader, val_loader, epoch, args): landmark_weight = torch.cat([torch.ones((1,28)),20*torch.ones((1,3)),torch.ones((1,6)),torch.ones((1,12))*5, torch.ones((1,11)), 20*torch.ones((1,8))], dim = 1).cuda() mean = torch.FloatTensor([0.485, 0.456, 0.406]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) std = torch.FloatTensor([0.229, 0.224, 0.225]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) mean_f = torch.FloatTensor([0.5, 0.5, 0.5]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) std_f = torch.FloatTensor([0.5, 0.5, 0.5]).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1) for i, (occluded, img, lmk, flag) in enumerate(train_loader): optimizer.zero_grad() # Configure model input occluded = Variable(occluded.type(torch.cuda.FloatTensor), requires_grad=False).cuda() img = Variable(img.type(torch.cuda.FloatTensor), requires_grad=False).cuda() lmk = Variable(lmk.type(torch.cuda.FloatTensor), requires_grad=False).cuda() flag = Variable(flag.type(torch.cuda.FloatTensor), requires_grad=False).cuda() coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...]) rendered, landmark, reg_loss, gamma_loss = models['3D'].reconstruct(coef) rendered = rendered.permute(0,3,1,2).contiguous()[:,[2,1,0],:,:] # pose_loss = criterions['L1'](angles, ang) align_loss = torch.sum(torch.sum(torch.square(landmark-lmk), dim=2)*flag*landmark_weight, dim=1) / 68.0 align_loss = torch.sum(align_loss) / lmk.shape[0] # coefficients regularization 1.7e-3 coef_loss = torch.norm(coef[...,:80]) + 0.1*torch.norm(coef[...,80:144]) + 1.7e-3*torch.norm(coef[...,144:224]) # For skin parsing_input = F.interpolate((img-mean)/std, (512,512)) parsed = models['Seg'](parsing_input) parsed = F.interpolate(parsed, (224,224)) parsed = torch.argmax(parsed, dim=1, keepdim=True) mask = torch.zeros_like(parsed, dtype=torch.float32).cuda() # skin 1, nose 2, eye_glass 3, r_eye 4, l_eye 5, r_brow 6, l_brow 7, r_ear 8, l_ear 9, # inner_mouth 10, u_lip 11, l_lip 12, hair 13 indices = ((parsed>=1).type(torch.BoolTensor) & (parsed<=7).type(torch.BoolTensor) & (parsed!=3).type(torch.BoolTensor)) \ | ((parsed>=11).type(torch.BoolTensor) & (parsed<=12).type(torch.BoolTensor)) mask[indices] = 1.0 # Get vector mask rendered_noise = torch.mean(rendered, dim=1, keepdim=True) > 0.0 vector = torch.zeros_like(rendered_noise, dtype=img.dtype).cuda() vector[rendered_noise] = 1.0 # Synthesize background rendered = img*(1.-vector) + rendered*vector # Perceptual loss affined_r = F.interpolate(rendered[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True) affined_i = F.interpolate(img[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True) emb_r = models['face']((affined_r-mean_f)/std_f) emb_i = models['face']((affined_i-mean_f)/std_f) id_loss = torch.mean(1. - criterions['Cos'](emb_r, emb_i)) # Reconstruction loss rec_loss = torch.sum(torch.abs(img - rendered), dim=1)*mask rec_loss = torch.sum(rec_loss) / torch.sum(mask) total_loss = coef_loss*1e-4 + rec_loss*0.01 + reg_loss*0.25 + align_loss*0.007 + gamma_loss*10.0 + id_loss*0.15 total_loss.backward() optimizer.step() # logging if torch.distributed.get_rank() == 0: scheduler.step() total_iteration = len(train_loader) * epoch + i logger.log_training(coef_loss.item(), rec_loss.item(), reg_loss.item(), align_loss.item(), id_loss.item(), total_iteration) if total_iteration % 250 == 0: rendered_grid = make_grid(rendered, nrow=args.batch_size//2, normalize=True) lmk = lmk.type(torch.LongTensor) landmark = landmark.type(torch.LongTensor) color1 = torch.FloatTensor([1.0,0.0,0.0]).unsqueeze(-1).unsqueeze(-1) color2 = torch.FloatTensor([0.0,0.0,1.0]).unsqueeze(-1).unsqueeze(-1) for b in range(img.size(0)): for l in range(68): occluded[b, :, lmk[b,l,1]-2:lmk[b,l,1]+2, lmk[b,l,0]-2:lmk[b,l,0]+2] = color1 occluded[b, :, landmark[b,l,1]-2:landmark[b,l,1]+2, landmark[b,l,0]-2:landmark[b,l,0]+2] = color2 input_grid = make_grid(occluded, nrow=args.batch_size//2, normalize=False) logger.log_train_image(input_grid, rendered_grid, total_iteration) sys.stdout.write('\r[Epoch %d/%d][Iter %d/%d][Total_iter %d]' % (epoch, args.epochs, i, len(train_loader), total_iteration)) if i!=0 and total_iteration % args.val_iters == 0: error = validate(models, val_loader, epoch, args) logger.log_validation(error, epoch) torch.save(models['3D'].regressor.module.state_dict(), args.save_path+"/reg_it%d_%.4f_stage1.pth" % (total_iteration, error)) def validate(models, val_loader, epoch, args): with torch.no_grad(): align_error = 0.0 for i, (occluded, lmk) in enumerate(val_loader): print('\rval %d...' % (i+1), end='') occluded = Variable(occluded.type(torch.cuda.FloatTensor)).cuda() lmk = Variable(lmk.type(torch.cuda.FloatTensor)).cuda() coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...]) _, landmark = models['3D'].reconstruct(coef, test=True) align_error += torch.mean(torch.abs(landmark - lmk)) align_error /= len(val_loader) return align_error def main(args): if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') if args.gpu is not None: warnings.warn('You have chosen a specific GPU. This will completely ' 'disable data parallelism.') if args.dist_url == "env://" and args.world_size == -1: args.world_size = int(os.environ["WORLD_SIZE"]) args.distributed = args.world_size > 1 or args.multiprocessing_distributed ngpus_per_node = torch.cuda.device_count() if args.multiprocessing_distributed: # Since we have ngpus_per_node processes per node, the total world_size # needs to be adjusted accordingly args.world_size = ngpus_per_node * args.world_size # Use torch.multiprocessing.spawn to launch distributed processes: the # main_worker process function mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) else: # Simply call main_worker function main_worker(args.gpu, ngpus_per_node, args) def main_worker(gpu, ngpus_per_node, args):
if __name__=='__main__': parser = argparse.ArgumentParser(description='Occlusion robust 3D face reconstruction') parser.add_argument('--train_data_path', required=True, help='path containing training data folders') parser.add_argument('--val_data_path', required=True, type=str, help='path containing validation data folders') parser.add_argument('--flag', default=None, type=str, help='flag prepended to filenames') parser.add_argument('--save_path', default='saved_models', help='path to save checkpoints') parser.add_argument('--checkpoint', default=None, type=str, help='path to resume checkpoint') parser.add_argument('--start_epoch', default=0, type=int, metavar='N') parser.add_argument('-j', '--workers', default=8, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=50, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--val_iters', default=5000, type=int, metavar='N') parser.add_argument('-b', '--batch-size', default=64, type=int, metavar='N') parser.add_argument('--lr', '--learning-rate', default=1e-5, type=float, metavar='LR', help='initial learning rate', dest='lr') parser.add_argument('--world-size', default=1, type=int, help='number of nodes for distributed training') parser.add_argument('--rank', default=0, type=int, help='node rank for distributed training') parser.add_argument('--dist-url', default='tcp://127.0.0.1:29500', type=str, help='url used to set up distributed training') parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend') parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.') parser.add_argument('--multiprocessing-distributed', action='store_true', default=True, help='Use multi-processing distributed training to launch ' 'N processes per node, which has N GPUs. This is the ' 'fastest way to use PyTorch for either single node or ' 'multi node data parallel training') args = parser.parse_args() main(args)
args.gpu = gpu if args.gpu is not None: print("Use GPU: {} for training".format(args.gpu)) if args.distributed: if args.dist_url == "env://" and args.rank == -1: args.rank = int(os.environ["RANK"]) if args.multiprocessing_distributed: # For multiprocessing distributed training, rank needs to be the # global rank among all the processes args.rank = args.rank * ngpus_per_node + gpu print('rank', args.rank) dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) torch.cuda.set_device(args.gpu) # Load models estimator3d = Estimator3D(is_cuda=True, batch_size=args.batch_size, model_path=args.checkpoint, test=False, back_white=False, device_id=args.gpu) estimator3d.regressor.cuda(args.gpu) parsing_net = BiSeNet(n_classes=19) parsing_net.cuda(args.gpu) parsing_net.load_state_dict(torch.load('faceParsing/model_final_diss.pth', map_location='cuda:'+str(args.gpu))) parsing_net.eval() face_encoder = IR_SE_50([112,112]) face_encoder.load_state_dict(torch.load('saved_models/face_res_50.pth', map_location='cuda:'+str(args.gpu))) face_encoder.cuda(args.gpu) face_encoder.eval() args.batch_size = int(args.batch_size / ngpus_per_node) args.workers = int(args.workers / ngpus_per_node) estimator3d.regressor = DDP(estimator3d.regressor, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True) parsing_net = DDP(parsing_net, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True) face_encoder = DDP(face_encoder, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True) models = {} models['3D'] = estimator3d models['Seg'] = parsing_net models['face'] = face_encoder # Losses criterions = {} criterions['L2'] = torch.nn.MSELoss().cuda(args.gpu) criterions['L1'] = torch.nn.L1Loss().cuda(args.gpu) criterions['Cos'] = torch.nn.CosineSimilarity().cuda(args.gpu) cudnn.benchmark = True dataset = FirstStageDataset(occ_path=args.train_data_path + '/occluded', \ img_path=args.train_data_path + '/ori_img', \ lmk_path=args.train_data_path + '/landmarks') if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) else: train_sampler = None train_loader = DataLoader( dataset, batch_size = args.batch_size, shuffle = (train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True ) val_dataset = LP_Dataset(args.val_data_path+'/occluded', args.val_data_path+'/landmarks') val_loader = DataLoader( val_dataset, batch_size = args.batch_size, shuffle = False, drop_last=True, num_workers=args.workers, pin_memory=True ) optimizer = torch.optim.AdamW(estimator3d.regressor.parameters(), lr=args.lr, betas=(0.5,0.999)) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs*(len(train_loader))) print(len(train_loader)) for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) train(models, criterions, optimizer, scheduler, train_loader, val_loader, epoch, args) if torch.distributed.get_rank() == 0: error = validate(models, val_loader, epoch, args) logger.log_validation(error, epoch) torch.save(estimator3d.regressor.module.state_dict(), args.save_path+"/reg_ep%d_%.4f_stage1.pth" % (epoch+1, error))
identifier_body
atariclip.py
#!/usr/bin/env python import ray import base64 import time import gym import requests, json, numpy as np import time import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from timers import TimerStat import resnet def preprocess(arr): H, W, C = arr.shape arr = arr.reshape(C, H, W) arr = arr.astype(np.float32) arr = (arr - 128) / 128 return arr def convert_torch(xs): xs = np.array(xs) return Variable(torch.from_numpy(xs)) def from_torch(xs): return xs.data.numpy() class ModelBig(nn.Module): def __init__(self): super(ModelBig, self).__init__() self.conv1 = nn.Conv2d(3, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(400, 6) for layer in self.parameters(): layer.requires_grad = False def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = F.max_pool2d(x, 3, 3) x = F.max_pool2d(x, 3, 3) x = x.view(-1, 400) x = F.relu(self.fc1(x)) return F.log_softmax(x, dim=1) class ModelSimple(nn.Module): def __init__(self): super(ModelSimple, self).__init__() #self.conv1 = nn.Conv2d(3, 10, kernel_size=5) #self.conv2 = nn.Conv2d(10, 20, kernel_size=5) #self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(100800, 6) # self.fc2 = nn.Linear(50, 6) for layer in self.parameters(): layer.requires_grad = False def forward(self, x): #x = F.relu(F.max_pool2d(self.conv1(x), 2)) #x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) #x = F.max_pool2d(x, 3, 3) #x = F.max_pool2d(x, 3, 3) x = x.view(-1, 100800) x = F.relu(self.fc1(x)) #x = F.dropout(x, training=self.training) #x = self.fc2(x) return F.log_softmax(x, dim=1) class ModelDummy(nn.Module): def __init__(self): super(ModelDummy, self).__init__() self.fc1 = nn.Linear(1, 1) # self.fc2 = nn.Linear(50, 6) for layer in self.parameters(): layer.requires_grad = False def forward(self, x): num = x.size(0) return convert_torch(np.array([1 for i in range(num)])) def evaluate_model(model, xs): """ Args: xs: (N, shape) """ res = model(convert_torch(xs)) npar = from_torch(res) if len(npar.shape) == 1: return npar # for dummy eval return from_torch(res).argmax(axis=1) def get_model(model_name="simple"): if model_name == "big": return ModelBig() elif model_name == "simple": return ModelSimple() elif model_name == "dummy": return ModelDummy() class Simulator(object): def __init__(self, args): self._env = gym.make(args.env) _state = self._env.reset() self._init_state = np.array([preprocess(_state) for i in range(args.batch)]) if args.batch == 0: self._init_state = np.array([0], dtype=np.float32) def onestep(self, arr, start=False): self._init_state += 0.001 # if start: # return self._init_state # state = self._env.step(arr)[0] return self._init_state
return self._init_state class Clip(object): def __init__(self, shape, model_name): from clipper_admin import ClipperConnection, DockerContainerManager from clipper_admin.deployers import python as python_deployer from clipper_admin.deployers import pytorch as pytorch_deployer self.clipper_conn = ClipperConnection(DockerContainerManager()) try: self.clipper_conn.connect() self.clipper_conn.stop_all() except Exception: pass self.clipper_conn.start_clipper() self.clipper_conn.register_application( name="hello-world", input_type="strings", default_output="-1.0", slo_micros=10**8) ptmodel = get_model(model_name) def policy(model, x): print(len(x)) batch = (len(x)) arr = [] for j in x: print(type(j), len(j)) res = np.frombuffer(base64.decodestring(j), dtype=np.float32) print(res.shape) arr += [res] x = np.array(arr) x = x.reshape((-1,) + shape[1:]) print("new shape", x.shape) return evaluate_model(model, x).reshape((batch, shape[0])) pytorch_deployer.deploy_pytorch_model( self.clipper_conn, name="policy", version=1, input_type="strings", func=policy, pytorch_model=ptmodel) self.clipper_conn.link_model_to_app( app_name="hello-world", model_name="policy") # class PolicyActor(object): # def __init__(self): # self.ptmodel = Model() # # def query(self, state): # state = [state] # return evaluate_model(self.ptmodel, state) class ClipperRunner(Simulator): def __init__(self, args): super(ClipperRunner, self).__init__(args) self.shape = self.initial_state().shape self._headers = {"Content-type": "application/json"} def run(self, steps): state = self.initial_state() serialize_timer = TimerStat() step_timer = TimerStat() for i in range(steps): with step_timer: with serialize_timer: s = base64.b64encode(state) data = json.dumps({"input": s}) res = requests.post( "http://localhost:1337/hello-world/predict", headers=self._headers, data=data).json() out = res['output'] state = self.onestep(out) print("Serialize", serialize_timer.mean) print("Step", step_timer.mean) # class RayRunner(Simulator): # def __init__(self, env): # super(RayRunner, self).__init__(env) # self.shape = self.initial_state().shape # self.timers = {"query": TimerStat(), "step": TimerStat()} # def run(self, steps, policy_actor): # state = self.initial_state() # for i in range(steps): # with self.timers["query"]: # out = ray.get(policy_actor.query.remote(state)) # with self.timers["step"]: # state = self.onestep(out) # def stats(self): # return {k: v.mean for k, v in self.timers.items()} def eval_ray_batch(args): model = get_model(args.model) RemoteSimulator = ray.remote(Simulator) simulators = [RemoteSimulator.remote(args) for i in range(args.num_sims)] ac = [None for i in range(args.num_sims)] init_shape = ray.get(simulators[0].initial_state.remote()).shape start = time.time() remaining = {sim.onestep.remote(a, i == 0): sim for a, sim in zip(ac, simulators)} counter = {sim: 0 for sim in simulators} timers = {k: TimerStat() for k in ["fwd", "wait", "get", "step"]} while any(v < args.iters for v in counter.values()): # TODO: consider evaluating as ray.wait with timers["step"]: with timers["wait"]: [data_fut], _ = ray.wait(list(remaining)) with timers["get"]: xs = ray.get(data_fut) sim = remaining.pop(data_fut) counter[sim] += 1 with timers["fwd"]: ac = evaluate_model(model, xs) if counter[sim] < args.iters: remaining[sim.onestep.remote(ac[0], i == 0)] = sim print("Took %f sec..." % (time.time() - start)) print(xs.shape) print("\n".join(["%s: %0.5f" % (k, t.mean) for k, t in timers.items()])) def eval_simple(args): model = get_model(args.model) sim = Simulator(args) fwd = TimerStat() start = time.time() ac = [None] for i in range(args.iters): xs = sim.onestep(ac[0], i == 0) with fwd: ac = evaluate_model(model, xs) print("Took %f sec..." % (time.time() - start)) print(fwd.mean, "Avg Fwd pass..") # def eval_ray(args): # RemoteRayRunner = ray.remote(RayRunner) # simulators = [RemoteRayRunner.remote(args) for i in range(args.num_sims)] # RemotePolicy = ray.remote(PolicyActor) # p = RemotePolicy.remote() # start = time.time() # ray.get([sim.run.remote(args.iters, p) for sim in simulators]) # print("Took %0.4f sec..." % (time.time() - start)) # stats = ray.get(simulators[0].stats.remote()) # print(stats) def eval_clipper(args): RemoteClipperRunner = ray.remote(ClipperRunner) simulators = [RemoteClipperRunner.remote(args) for i in range(args.num_sims)] c = Clip(ray.get(simulators[0].initial_state.remote()).shape, args.model) start = time.time() ray.get([sim.run.remote(args.iters) for sim in simulators]) print("Took %f sec..." % (time.time() - start)) import argparse parser = argparse.ArgumentParser() parser.add_argument("--runtime", type=str, choices=["ray", "clipper", "simple"], help="Choose between Ray or Clipper") parser.add_argument("--env", type=str, default="Pong-v0", help="Env Keyword for starting a simulator") parser.add_argument("--batch", type=int, default=1, help="Size of data") parser.add_argument("--num-sims", type=int, default=1, help="Number of simultaneous simulations to evaluate") parser.add_argument("--iters", type=int, default=500, help="Number of steps per sim to evaluate") parser.add_argument("--model", type=str, default="simple", help="Use a bigger CNN model.") if __name__ == "__main__": args = parser.parse_args() if args.runtime == "ray": import ray ray.init() eval_ray_batch(args) elif args.runtime == "clipper": import ray ray.init() eval_clipper(args) elif args.runtime == "simple": eval_simple(args)
def initial_state(self):
random_line_split
atariclip.py
#!/usr/bin/env python import ray import base64 import time import gym import requests, json, numpy as np import time import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from timers import TimerStat import resnet def preprocess(arr): H, W, C = arr.shape arr = arr.reshape(C, H, W) arr = arr.astype(np.float32) arr = (arr - 128) / 128 return arr def convert_torch(xs): xs = np.array(xs) return Variable(torch.from_numpy(xs)) def from_torch(xs): return xs.data.numpy() class ModelBig(nn.Module): def __init__(self): super(ModelBig, self).__init__() self.conv1 = nn.Conv2d(3, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(400, 6) for layer in self.parameters(): layer.requires_grad = False def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = F.max_pool2d(x, 3, 3) x = F.max_pool2d(x, 3, 3) x = x.view(-1, 400) x = F.relu(self.fc1(x)) return F.log_softmax(x, dim=1) class ModelSimple(nn.Module): def __init__(self): super(ModelSimple, self).__init__() #self.conv1 = nn.Conv2d(3, 10, kernel_size=5) #self.conv2 = nn.Conv2d(10, 20, kernel_size=5) #self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(100800, 6) # self.fc2 = nn.Linear(50, 6) for layer in self.parameters(): layer.requires_grad = False def forward(self, x): #x = F.relu(F.max_pool2d(self.conv1(x), 2)) #x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) #x = F.max_pool2d(x, 3, 3) #x = F.max_pool2d(x, 3, 3) x = x.view(-1, 100800) x = F.relu(self.fc1(x)) #x = F.dropout(x, training=self.training) #x = self.fc2(x) return F.log_softmax(x, dim=1) class ModelDummy(nn.Module): def __init__(self): super(ModelDummy, self).__init__() self.fc1 = nn.Linear(1, 1) # self.fc2 = nn.Linear(50, 6) for layer in self.parameters(): layer.requires_grad = False def forward(self, x): num = x.size(0) return convert_torch(np.array([1 for i in range(num)])) def evaluate_model(model, xs): """ Args: xs: (N, shape) """ res = model(convert_torch(xs)) npar = from_torch(res) if len(npar.shape) == 1: return npar # for dummy eval return from_torch(res).argmax(axis=1) def get_model(model_name="simple"): if model_name == "big": return ModelBig() elif model_name == "simple": return ModelSimple() elif model_name == "dummy": return ModelDummy() class Simulator(object): def __init__(self, args): self._env = gym.make(args.env) _state = self._env.reset() self._init_state = np.array([preprocess(_state) for i in range(args.batch)]) if args.batch == 0: self._init_state = np.array([0], dtype=np.float32) def onestep(self, arr, start=False): self._init_state += 0.001 # if start: # return self._init_state # state = self._env.step(arr)[0] return self._init_state def initial_state(self): return self._init_state class Clip(object): def __init__(self, shape, model_name): from clipper_admin import ClipperConnection, DockerContainerManager from clipper_admin.deployers import python as python_deployer from clipper_admin.deployers import pytorch as pytorch_deployer self.clipper_conn = ClipperConnection(DockerContainerManager()) try: self.clipper_conn.connect() self.clipper_conn.stop_all() except Exception: pass self.clipper_conn.start_clipper() self.clipper_conn.register_application( name="hello-world", input_type="strings", default_output="-1.0", slo_micros=10**8) ptmodel = get_model(model_name) def policy(model, x): print(len(x)) batch = (len(x)) arr = [] for j in x: print(type(j), len(j)) res = np.frombuffer(base64.decodestring(j), dtype=np.float32) print(res.shape) arr += [res] x = np.array(arr) x = x.reshape((-1,) + shape[1:]) print("new shape", x.shape) return evaluate_model(model, x).reshape((batch, shape[0])) pytorch_deployer.deploy_pytorch_model( self.clipper_conn, name="policy", version=1, input_type="strings", func=policy, pytorch_model=ptmodel) self.clipper_conn.link_model_to_app( app_name="hello-world", model_name="policy") # class PolicyActor(object): # def __init__(self): # self.ptmodel = Model() # # def query(self, state): # state = [state] # return evaluate_model(self.ptmodel, state) class ClipperRunner(Simulator): def __init__(self, args): super(ClipperRunner, self).__init__(args) self.shape = self.initial_state().shape self._headers = {"Content-type": "application/json"} def run(self, steps): state = self.initial_state() serialize_timer = TimerStat() step_timer = TimerStat() for i in range(steps): with step_timer: with serialize_timer: s = base64.b64encode(state) data = json.dumps({"input": s}) res = requests.post( "http://localhost:1337/hello-world/predict", headers=self._headers, data=data).json() out = res['output'] state = self.onestep(out) print("Serialize", serialize_timer.mean) print("Step", step_timer.mean) # class RayRunner(Simulator): # def __init__(self, env): # super(RayRunner, self).__init__(env) # self.shape = self.initial_state().shape # self.timers = {"query": TimerStat(), "step": TimerStat()} # def run(self, steps, policy_actor): # state = self.initial_state() # for i in range(steps): # with self.timers["query"]: # out = ray.get(policy_actor.query.remote(state)) # with self.timers["step"]: # state = self.onestep(out) # def stats(self): # return {k: v.mean for k, v in self.timers.items()} def eval_ray_batch(args): model = get_model(args.model) RemoteSimulator = ray.remote(Simulator) simulators = [RemoteSimulator.remote(args) for i in range(args.num_sims)] ac = [None for i in range(args.num_sims)] init_shape = ray.get(simulators[0].initial_state.remote()).shape start = time.time() remaining = {sim.onestep.remote(a, i == 0): sim for a, sim in zip(ac, simulators)} counter = {sim: 0 for sim in simulators} timers = {k: TimerStat() for k in ["fwd", "wait", "get", "step"]} while any(v < args.iters for v in counter.values()): # TODO: consider evaluating as ray.wait with timers["step"]: with timers["wait"]: [data_fut], _ = ray.wait(list(remaining)) with timers["get"]: xs = ray.get(data_fut) sim = remaining.pop(data_fut) counter[sim] += 1 with timers["fwd"]: ac = evaluate_model(model, xs) if counter[sim] < args.iters: remaining[sim.onestep.remote(ac[0], i == 0)] = sim print("Took %f sec..." % (time.time() - start)) print(xs.shape) print("\n".join(["%s: %0.5f" % (k, t.mean) for k, t in timers.items()])) def eval_simple(args): model = get_model(args.model) sim = Simulator(args) fwd = TimerStat() start = time.time() ac = [None] for i in range(args.iters): xs = sim.onestep(ac[0], i == 0) with fwd: ac = evaluate_model(model, xs) print("Took %f sec..." % (time.time() - start)) print(fwd.mean, "Avg Fwd pass..") # def eval_ray(args): # RemoteRayRunner = ray.remote(RayRunner) # simulators = [RemoteRayRunner.remote(args) for i in range(args.num_sims)] # RemotePolicy = ray.remote(PolicyActor) # p = RemotePolicy.remote() # start = time.time() # ray.get([sim.run.remote(args.iters, p) for sim in simulators]) # print("Took %0.4f sec..." % (time.time() - start)) # stats = ray.get(simulators[0].stats.remote()) # print(stats) def eval_clipper(args):
import argparse parser = argparse.ArgumentParser() parser.add_argument("--runtime", type=str, choices=["ray", "clipper", "simple"], help="Choose between Ray or Clipper") parser.add_argument("--env", type=str, default="Pong-v0", help="Env Keyword for starting a simulator") parser.add_argument("--batch", type=int, default=1, help="Size of data") parser.add_argument("--num-sims", type=int, default=1, help="Number of simultaneous simulations to evaluate") parser.add_argument("--iters", type=int, default=500, help="Number of steps per sim to evaluate") parser.add_argument("--model", type=str, default="simple", help="Use a bigger CNN model.") if __name__ == "__main__": args = parser.parse_args() if args.runtime == "ray": import ray ray.init() eval_ray_batch(args) elif args.runtime == "clipper": import ray ray.init() eval_clipper(args) elif args.runtime == "simple": eval_simple(args)
RemoteClipperRunner = ray.remote(ClipperRunner) simulators = [RemoteClipperRunner.remote(args) for i in range(args.num_sims)] c = Clip(ray.get(simulators[0].initial_state.remote()).shape, args.model) start = time.time() ray.get([sim.run.remote(args.iters) for sim in simulators]) print("Took %f sec..." % (time.time() - start))
identifier_body
atariclip.py
#!/usr/bin/env python import ray import base64 import time import gym import requests, json, numpy as np import time import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from timers import TimerStat import resnet def preprocess(arr): H, W, C = arr.shape arr = arr.reshape(C, H, W) arr = arr.astype(np.float32) arr = (arr - 128) / 128 return arr def convert_torch(xs): xs = np.array(xs) return Variable(torch.from_numpy(xs)) def from_torch(xs): return xs.data.numpy() class ModelBig(nn.Module): def __init__(self): super(ModelBig, self).__init__() self.conv1 = nn.Conv2d(3, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(400, 6) for layer in self.parameters(): layer.requires_grad = False def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = F.max_pool2d(x, 3, 3) x = F.max_pool2d(x, 3, 3) x = x.view(-1, 400) x = F.relu(self.fc1(x)) return F.log_softmax(x, dim=1) class ModelSimple(nn.Module): def __init__(self): super(ModelSimple, self).__init__() #self.conv1 = nn.Conv2d(3, 10, kernel_size=5) #self.conv2 = nn.Conv2d(10, 20, kernel_size=5) #self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(100800, 6) # self.fc2 = nn.Linear(50, 6) for layer in self.parameters(): layer.requires_grad = False def forward(self, x): #x = F.relu(F.max_pool2d(self.conv1(x), 2)) #x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) #x = F.max_pool2d(x, 3, 3) #x = F.max_pool2d(x, 3, 3) x = x.view(-1, 100800) x = F.relu(self.fc1(x)) #x = F.dropout(x, training=self.training) #x = self.fc2(x) return F.log_softmax(x, dim=1) class ModelDummy(nn.Module): def __init__(self): super(ModelDummy, self).__init__() self.fc1 = nn.Linear(1, 1) # self.fc2 = nn.Linear(50, 6) for layer in self.parameters(): layer.requires_grad = False def forward(self, x): num = x.size(0) return convert_torch(np.array([1 for i in range(num)])) def evaluate_model(model, xs): """ Args: xs: (N, shape) """ res = model(convert_torch(xs)) npar = from_torch(res) if len(npar.shape) == 1: return npar # for dummy eval return from_torch(res).argmax(axis=1) def get_model(model_name="simple"): if model_name == "big": return ModelBig() elif model_name == "simple": return ModelSimple() elif model_name == "dummy": return ModelDummy() class Simulator(object): def __init__(self, args): self._env = gym.make(args.env) _state = self._env.reset() self._init_state = np.array([preprocess(_state) for i in range(args.batch)]) if args.batch == 0: self._init_state = np.array([0], dtype=np.float32) def onestep(self, arr, start=False): self._init_state += 0.001 # if start: # return self._init_state # state = self._env.step(arr)[0] return self._init_state def initial_state(self): return self._init_state class Clip(object): def __init__(self, shape, model_name): from clipper_admin import ClipperConnection, DockerContainerManager from clipper_admin.deployers import python as python_deployer from clipper_admin.deployers import pytorch as pytorch_deployer self.clipper_conn = ClipperConnection(DockerContainerManager()) try: self.clipper_conn.connect() self.clipper_conn.stop_all() except Exception: pass self.clipper_conn.start_clipper() self.clipper_conn.register_application( name="hello-world", input_type="strings", default_output="-1.0", slo_micros=10**8) ptmodel = get_model(model_name) def policy(model, x): print(len(x)) batch = (len(x)) arr = [] for j in x: print(type(j), len(j)) res = np.frombuffer(base64.decodestring(j), dtype=np.float32) print(res.shape) arr += [res] x = np.array(arr) x = x.reshape((-1,) + shape[1:]) print("new shape", x.shape) return evaluate_model(model, x).reshape((batch, shape[0])) pytorch_deployer.deploy_pytorch_model( self.clipper_conn, name="policy", version=1, input_type="strings", func=policy, pytorch_model=ptmodel) self.clipper_conn.link_model_to_app( app_name="hello-world", model_name="policy") # class PolicyActor(object): # def __init__(self): # self.ptmodel = Model() # # def query(self, state): # state = [state] # return evaluate_model(self.ptmodel, state) class ClipperRunner(Simulator): def __init__(self, args): super(ClipperRunner, self).__init__(args) self.shape = self.initial_state().shape self._headers = {"Content-type": "application/json"} def run(self, steps): state = self.initial_state() serialize_timer = TimerStat() step_timer = TimerStat() for i in range(steps): with step_timer: with serialize_timer: s = base64.b64encode(state) data = json.dumps({"input": s}) res = requests.post( "http://localhost:1337/hello-world/predict", headers=self._headers, data=data).json() out = res['output'] state = self.onestep(out) print("Serialize", serialize_timer.mean) print("Step", step_timer.mean) # class RayRunner(Simulator): # def __init__(self, env): # super(RayRunner, self).__init__(env) # self.shape = self.initial_state().shape # self.timers = {"query": TimerStat(), "step": TimerStat()} # def run(self, steps, policy_actor): # state = self.initial_state() # for i in range(steps): # with self.timers["query"]: # out = ray.get(policy_actor.query.remote(state)) # with self.timers["step"]: # state = self.onestep(out) # def stats(self): # return {k: v.mean for k, v in self.timers.items()} def eval_ray_batch(args): model = get_model(args.model) RemoteSimulator = ray.remote(Simulator) simulators = [RemoteSimulator.remote(args) for i in range(args.num_sims)] ac = [None for i in range(args.num_sims)] init_shape = ray.get(simulators[0].initial_state.remote()).shape start = time.time() remaining = {sim.onestep.remote(a, i == 0): sim for a, sim in zip(ac, simulators)} counter = {sim: 0 for sim in simulators} timers = {k: TimerStat() for k in ["fwd", "wait", "get", "step"]} while any(v < args.iters for v in counter.values()): # TODO: consider evaluating as ray.wait with timers["step"]: with timers["wait"]: [data_fut], _ = ray.wait(list(remaining)) with timers["get"]: xs = ray.get(data_fut) sim = remaining.pop(data_fut) counter[sim] += 1 with timers["fwd"]: ac = evaluate_model(model, xs) if counter[sim] < args.iters: remaining[sim.onestep.remote(ac[0], i == 0)] = sim print("Took %f sec..." % (time.time() - start)) print(xs.shape) print("\n".join(["%s: %0.5f" % (k, t.mean) for k, t in timers.items()])) def eval_simple(args): model = get_model(args.model) sim = Simulator(args) fwd = TimerStat() start = time.time() ac = [None] for i in range(args.iters): xs = sim.onestep(ac[0], i == 0) with fwd: ac = evaluate_model(model, xs) print("Took %f sec..." % (time.time() - start)) print(fwd.mean, "Avg Fwd pass..") # def eval_ray(args): # RemoteRayRunner = ray.remote(RayRunner) # simulators = [RemoteRayRunner.remote(args) for i in range(args.num_sims)] # RemotePolicy = ray.remote(PolicyActor) # p = RemotePolicy.remote() # start = time.time() # ray.get([sim.run.remote(args.iters, p) for sim in simulators]) # print("Took %0.4f sec..." % (time.time() - start)) # stats = ray.get(simulators[0].stats.remote()) # print(stats) def eval_clipper(args): RemoteClipperRunner = ray.remote(ClipperRunner) simulators = [RemoteClipperRunner.remote(args) for i in range(args.num_sims)] c = Clip(ray.get(simulators[0].initial_state.remote()).shape, args.model) start = time.time() ray.get([sim.run.remote(args.iters) for sim in simulators]) print("Took %f sec..." % (time.time() - start)) import argparse parser = argparse.ArgumentParser() parser.add_argument("--runtime", type=str, choices=["ray", "clipper", "simple"], help="Choose between Ray or Clipper") parser.add_argument("--env", type=str, default="Pong-v0", help="Env Keyword for starting a simulator") parser.add_argument("--batch", type=int, default=1, help="Size of data") parser.add_argument("--num-sims", type=int, default=1, help="Number of simultaneous simulations to evaluate") parser.add_argument("--iters", type=int, default=500, help="Number of steps per sim to evaluate") parser.add_argument("--model", type=str, default="simple", help="Use a bigger CNN model.") if __name__ == "__main__": args = parser.parse_args() if args.runtime == "ray":
elif args.runtime == "clipper": import ray ray.init() eval_clipper(args) elif args.runtime == "simple": eval_simple(args)
import ray ray.init() eval_ray_batch(args)
conditional_block
atariclip.py
#!/usr/bin/env python import ray import base64 import time import gym import requests, json, numpy as np import time import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from timers import TimerStat import resnet def preprocess(arr): H, W, C = arr.shape arr = arr.reshape(C, H, W) arr = arr.astype(np.float32) arr = (arr - 128) / 128 return arr def convert_torch(xs): xs = np.array(xs) return Variable(torch.from_numpy(xs)) def from_torch(xs): return xs.data.numpy() class ModelBig(nn.Module): def __init__(self): super(ModelBig, self).__init__() self.conv1 = nn.Conv2d(3, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(400, 6) for layer in self.parameters(): layer.requires_grad = False def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = F.max_pool2d(x, 3, 3) x = F.max_pool2d(x, 3, 3) x = x.view(-1, 400) x = F.relu(self.fc1(x)) return F.log_softmax(x, dim=1) class ModelSimple(nn.Module): def __init__(self): super(ModelSimple, self).__init__() #self.conv1 = nn.Conv2d(3, 10, kernel_size=5) #self.conv2 = nn.Conv2d(10, 20, kernel_size=5) #self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(100800, 6) # self.fc2 = nn.Linear(50, 6) for layer in self.parameters(): layer.requires_grad = False def forward(self, x): #x = F.relu(F.max_pool2d(self.conv1(x), 2)) #x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) #x = F.max_pool2d(x, 3, 3) #x = F.max_pool2d(x, 3, 3) x = x.view(-1, 100800) x = F.relu(self.fc1(x)) #x = F.dropout(x, training=self.training) #x = self.fc2(x) return F.log_softmax(x, dim=1) class ModelDummy(nn.Module): def __init__(self): super(ModelDummy, self).__init__() self.fc1 = nn.Linear(1, 1) # self.fc2 = nn.Linear(50, 6) for layer in self.parameters(): layer.requires_grad = False def forward(self, x): num = x.size(0) return convert_torch(np.array([1 for i in range(num)])) def evaluate_model(model, xs): """ Args: xs: (N, shape) """ res = model(convert_torch(xs)) npar = from_torch(res) if len(npar.shape) == 1: return npar # for dummy eval return from_torch(res).argmax(axis=1) def get_model(model_name="simple"): if model_name == "big": return ModelBig() elif model_name == "simple": return ModelSimple() elif model_name == "dummy": return ModelDummy() class Simulator(object): def __init__(self, args): self._env = gym.make(args.env) _state = self._env.reset() self._init_state = np.array([preprocess(_state) for i in range(args.batch)]) if args.batch == 0: self._init_state = np.array([0], dtype=np.float32) def onestep(self, arr, start=False): self._init_state += 0.001 # if start: # return self._init_state # state = self._env.step(arr)[0] return self._init_state def initial_state(self): return self._init_state class Clip(object): def __init__(self, shape, model_name): from clipper_admin import ClipperConnection, DockerContainerManager from clipper_admin.deployers import python as python_deployer from clipper_admin.deployers import pytorch as pytorch_deployer self.clipper_conn = ClipperConnection(DockerContainerManager()) try: self.clipper_conn.connect() self.clipper_conn.stop_all() except Exception: pass self.clipper_conn.start_clipper() self.clipper_conn.register_application( name="hello-world", input_type="strings", default_output="-1.0", slo_micros=10**8) ptmodel = get_model(model_name) def policy(model, x): print(len(x)) batch = (len(x)) arr = [] for j in x: print(type(j), len(j)) res = np.frombuffer(base64.decodestring(j), dtype=np.float32) print(res.shape) arr += [res] x = np.array(arr) x = x.reshape((-1,) + shape[1:]) print("new shape", x.shape) return evaluate_model(model, x).reshape((batch, shape[0])) pytorch_deployer.deploy_pytorch_model( self.clipper_conn, name="policy", version=1, input_type="strings", func=policy, pytorch_model=ptmodel) self.clipper_conn.link_model_to_app( app_name="hello-world", model_name="policy") # class PolicyActor(object): # def __init__(self): # self.ptmodel = Model() # # def query(self, state): # state = [state] # return evaluate_model(self.ptmodel, state) class ClipperRunner(Simulator): def __init__(self, args): super(ClipperRunner, self).__init__(args) self.shape = self.initial_state().shape self._headers = {"Content-type": "application/json"} def run(self, steps): state = self.initial_state() serialize_timer = TimerStat() step_timer = TimerStat() for i in range(steps): with step_timer: with serialize_timer: s = base64.b64encode(state) data = json.dumps({"input": s}) res = requests.post( "http://localhost:1337/hello-world/predict", headers=self._headers, data=data).json() out = res['output'] state = self.onestep(out) print("Serialize", serialize_timer.mean) print("Step", step_timer.mean) # class RayRunner(Simulator): # def __init__(self, env): # super(RayRunner, self).__init__(env) # self.shape = self.initial_state().shape # self.timers = {"query": TimerStat(), "step": TimerStat()} # def run(self, steps, policy_actor): # state = self.initial_state() # for i in range(steps): # with self.timers["query"]: # out = ray.get(policy_actor.query.remote(state)) # with self.timers["step"]: # state = self.onestep(out) # def stats(self): # return {k: v.mean for k, v in self.timers.items()} def eval_ray_batch(args): model = get_model(args.model) RemoteSimulator = ray.remote(Simulator) simulators = [RemoteSimulator.remote(args) for i in range(args.num_sims)] ac = [None for i in range(args.num_sims)] init_shape = ray.get(simulators[0].initial_state.remote()).shape start = time.time() remaining = {sim.onestep.remote(a, i == 0): sim for a, sim in zip(ac, simulators)} counter = {sim: 0 for sim in simulators} timers = {k: TimerStat() for k in ["fwd", "wait", "get", "step"]} while any(v < args.iters for v in counter.values()): # TODO: consider evaluating as ray.wait with timers["step"]: with timers["wait"]: [data_fut], _ = ray.wait(list(remaining)) with timers["get"]: xs = ray.get(data_fut) sim = remaining.pop(data_fut) counter[sim] += 1 with timers["fwd"]: ac = evaluate_model(model, xs) if counter[sim] < args.iters: remaining[sim.onestep.remote(ac[0], i == 0)] = sim print("Took %f sec..." % (time.time() - start)) print(xs.shape) print("\n".join(["%s: %0.5f" % (k, t.mean) for k, t in timers.items()])) def eval_simple(args): model = get_model(args.model) sim = Simulator(args) fwd = TimerStat() start = time.time() ac = [None] for i in range(args.iters): xs = sim.onestep(ac[0], i == 0) with fwd: ac = evaluate_model(model, xs) print("Took %f sec..." % (time.time() - start)) print(fwd.mean, "Avg Fwd pass..") # def eval_ray(args): # RemoteRayRunner = ray.remote(RayRunner) # simulators = [RemoteRayRunner.remote(args) for i in range(args.num_sims)] # RemotePolicy = ray.remote(PolicyActor) # p = RemotePolicy.remote() # start = time.time() # ray.get([sim.run.remote(args.iters, p) for sim in simulators]) # print("Took %0.4f sec..." % (time.time() - start)) # stats = ray.get(simulators[0].stats.remote()) # print(stats) def
(args): RemoteClipperRunner = ray.remote(ClipperRunner) simulators = [RemoteClipperRunner.remote(args) for i in range(args.num_sims)] c = Clip(ray.get(simulators[0].initial_state.remote()).shape, args.model) start = time.time() ray.get([sim.run.remote(args.iters) for sim in simulators]) print("Took %f sec..." % (time.time() - start)) import argparse parser = argparse.ArgumentParser() parser.add_argument("--runtime", type=str, choices=["ray", "clipper", "simple"], help="Choose between Ray or Clipper") parser.add_argument("--env", type=str, default="Pong-v0", help="Env Keyword for starting a simulator") parser.add_argument("--batch", type=int, default=1, help="Size of data") parser.add_argument("--num-sims", type=int, default=1, help="Number of simultaneous simulations to evaluate") parser.add_argument("--iters", type=int, default=500, help="Number of steps per sim to evaluate") parser.add_argument("--model", type=str, default="simple", help="Use a bigger CNN model.") if __name__ == "__main__": args = parser.parse_args() if args.runtime == "ray": import ray ray.init() eval_ray_batch(args) elif args.runtime == "clipper": import ray ray.init() eval_clipper(args) elif args.runtime == "simple": eval_simple(args)
eval_clipper
identifier_name
olm_parser.rs
use crate::graph::graph::{Rules, Edges, Graph, Vertices}; use crate::io::{ limit_iter::Limit, sub_matrix::SubMatrix, tri_wave::TriWave, utils::{DiagonalReflection, Reflection, Rotation}, }; use crate::utils::{index_to_coords, is_inside, coords_to_index}; use crate::wfc::collapse; use bimap::BiMap; use hashbrown::HashMap; use image::{imageops, Rgb, RgbImage, Pixel}; use itertools::Itertools; use nalgebra::DMatrix; use std::ops::{IndexMut, Index, AddAssign}; use std::convert::TryFrom; use indexmap::IndexMap; use std::ops::Not; use crate::MSu16xNU; type Chunk = DMatrix<usize>; type PixelKeys = BiMap<usize, Rgb<u8>>; // TODO: handle unwrap of image::open properly pub fn parse(filename: &str, chunk_size: usize) -> (Rules, PixelKeys, MSu16xNU, IndexMap<Chunk, u16>) { let img = image::open(filename).unwrap().to_rgb8(); let pixel_aliases = alias_pixels(&img); let chunk_frequencies = chunk_image(img, chunk_size, &pixel_aliases, true, false, false, false); let overlap_rules = overlaps(&chunk_frequencies, chunk_size); if chunk_frequencies.len() > MSu16xNU::len() { println!("Chunks LEN: {}", chunk_frequencies.len()); panic!("labels multiset not large enough to store all unique chunks") } let all_labels = chunk_frequencies.values().collect(); let raw_graph = create_raw_graph(&all_labels, chunk_size, (3, 3)); let mut pruned_rules: Rules = HashMap::new(); (0..all_labels.count_non_zero()) .for_each(|label| { // pruned graph vertices returned from collapse let pruned_graph = propagate_overlaps(raw_graph.clone(), &overlap_rules, label as usize); real_vertex_indexes(chunk_size) .iter() .enumerate() .for_each(|(direction, index)| { let set = pruned_graph.vertices.index(*index); if !set.is_empty() { pruned_rules.insert((direction as u16, label as usize), *set); } }); }); (pruned_rules, pixel_aliases, all_labels, chunk_frequencies) } // todo: work out if step will be needed, currently useless const fn real_vertex_indexes(chunk_size: usize) -> [usize; 8] { let dim = (3 * chunk_size) - (chunk_size - 1); let step = chunk_size - 1; [ 0, // NW step + 1, // N (step + 1) * 2, // NE dim * chunk_size, // W // dim * chunk_size + step + 1 // Center (unused) dim * chunk_size + (step + 1) * 2, // E dim * chunk_size * 2, // SW dim * chunk_size * 2 + step + 1, // S dim * chunk_size * 2 + (step + 1) * 2, // SE ] } fn sub_images(image: RgbImage, chunk_size: usize) -> impl Iterator<Item=RgbImage> { let chunk_size_32: u32 = TryFrom::try_from(chunk_size) .expect("chunk_size too large, cannot convert to u32"); let height_iter = 0..(image.dimensions().1) - (chunk_size_32 - 1); let width_iter = 0..(image.dimensions().0) - (chunk_size_32 - 1); height_iter .cartesian_product(width_iter) .map(move |(y, x)| { imageops::crop_imm(&image, x, y, chunk_size_32, chunk_size_32).to_image() }) } fn alias_sub_image(image: RgbImage, pixel_aliases: &PixelKeys) -> Vec<usize> { image .pixels() .map(|p| *pixel_aliases.get_by_right(&p).unwrap()) .collect() } fn alias_pixels(image: &RgbImage) -> PixelKeys { image .pixels() .unique() .copied() .enumerate() .collect() } // returns the input image in unique chunks and frequencies of those chunks fn
( image: RgbImage, chunk_size: usize, pixel_aliases: &PixelKeys, rotate: bool, reflect_vertical: bool, reflect_horizontal: bool, reflect_diagonal: bool, ) -> IndexMap<Chunk, u16> { sub_images(image, chunk_size) .map(|sub_image| alias_sub_image(sub_image, pixel_aliases)) .fold(IndexMap::new(), |mut acc, aliases| { let chunk = DMatrix::from_row_slice(chunk_size, chunk_size, &aliases); if rotate { let mut rot_chunk = chunk.clone(); for _ in 0..3 { rot_chunk = rot_chunk.rotate_90(); push_chunk_frequency(rot_chunk.clone(), &mut acc); } } if reflect_vertical { push_chunk_frequency(chunk.reflect_vertical(), &mut acc); } if reflect_horizontal { push_chunk_frequency(chunk.reflect_horizontal(), &mut acc); } if reflect_diagonal { push_chunk_frequency(chunk.reflect_top_left(), &mut acc); push_chunk_frequency(chunk.reflect_bottom_left(), &mut acc); } push_chunk_frequency(chunk, &mut acc); acc }) } fn push_chunk_frequency(chunk: Chunk, frequencies: &mut IndexMap<Chunk, u16>) { frequencies.entry(chunk).and_modify(|f| *f += 1).or_insert(1); } type Position = (usize, usize); type Size = (usize, usize); type Direction = u16; fn sub_chunk_positions(chunk_size: usize) -> Vec<(Position, Size, Direction)> { let period = chunk_size * 2 - 1; let positions = Limit::new(chunk_size).zip(TriWave::new(chunk_size)).take(period); let pos_cart_prod = positions.clone().cartesian_product(positions); pos_cart_prod .map(|((y_position, y_size), (x_position, x_size))| ( (x_position, y_position), (x_size + 1, y_size + 1) )) .filter(|(_, (width, height))| width != &chunk_size || height != &chunk_size) .enumerate() .map(|(direction, (position, size))| ( position, size, direction as u16 )) .collect() } fn overlaps(chunks: &IndexMap<Chunk, u16>, chunk_size: usize) -> Rules { chunks .keys() .enumerate() .fold(HashMap::new(), |mut rules, (label, chunk)| { let sub_positions = sub_chunk_positions(chunk_size); sub_positions .iter() .for_each(|(position, size, direction)| { let sub_chunk = chunk.sub_matrix(*position, *size); let reverse_index = sub_positions.len() - 1 - *direction as usize; let (rev_pos, rev_size, _) = sub_positions[reverse_index]; chunks .keys() .enumerate() .for_each(|(other_label, other_chunk)| { // find mirrored sub chunk let other_sub_chunk = other_chunk.sub_matrix(rev_pos, rev_size); if sub_chunk == other_sub_chunk { let mut set = MSu16xNU::empty(); set.insert(other_label, 1); rules .entry((*direction, label)) .and_modify(|l| l.add_assign(set)) .or_insert(set); } }) }); rules }) } // Create a raw graph for pruning fn create_raw_graph(all_labels: &MSu16xNU, chunk_size: usize, (height, width): (usize, usize)) -> Graph { // pixel based graph dimensions let v_dim_x = (width * chunk_size) - (chunk_size - 1); let v_dim_y = (height * chunk_size) - (chunk_size - 1); let vertices_len = v_dim_x * v_dim_y; let vertices: Vec<MSu16xNU> = vec![*all_labels; vertices_len]; // create negative indexed range to offset vertex centered directional field by N let signed_chunk_size: i32 = TryFrom::try_from(chunk_size) .expect("Cannot convert chunk_size to i32"); let range = 1 - signed_chunk_size..signed_chunk_size; // calculate real cartesian space offest coordinates let range_cart_prod = range.clone() .cartesian_product(range) .filter(|i| i != &(0, 0)); // remove 0 offset for correct directional mapping let edges: Edges = (0..vertices_len) .fold(HashMap::new(), |mut acc, index| { let (x, y) = index_to_coords(index, v_dim_x); range_cart_prod .clone() .map(|(y_offset, x_offset)| (y as i32 + y_offset, x as i32 + x_offset)) .enumerate() // remove coordinates outside of graph .filter(|(_, offsets)| is_inside(*offsets, (v_dim_x, v_dim_y))) .for_each(|(direction, (y_offset, x_offset))| { let other_index = coords_to_index(x_offset as usize, y_offset as usize, v_dim_x); acc .entry(index as u32) .and_modify(|v| v.push((other_index as u32, direction as u16))) .or_insert(vec![(other_index as u32, direction as u16)]); }); acc }); Graph::new(vertices, edges, *all_labels) } fn propagate_overlaps(mut graph: Graph, rules: &Rules, label: usize) -> Graph { let central_vertex = (graph.vertices.len() - 1) / 2; graph.vertices.index_mut(central_vertex).choose(label); collapse::collapse(rules, &graph, None, Some(1)) } #[cfg(test)] mod tests { use super::*; use crate::utils::hash_map; use image::ImageBuffer; #[test] fn test_alias_pixels() { let pixels = vec![255, 255, 255, 0, 0, 0, 122, 122, 122, 96, 96, 96]; let img = ImageBuffer::from_vec(2, 2, pixels).unwrap(); let pixel_aliases = alias_pixels(&img); assert_eq!(pixel_aliases.len(), 4); } #[test] fn test_chunk_image() { let img = image::open("resources/test/chunk_image_test.png").unwrap().to_rgb8(); let mut pixel_aliases: PixelKeys = BiMap::new(); pixel_aliases.insert(0, Rgb::from([255, 255, 255])); pixel_aliases.insert(1, Rgb::from([0, 0, 0])); let chunk_map = chunk_image(img, 2, &pixel_aliases, true, false, false, false); let mut expected_map: IndexMap<Chunk, u16> = IndexMap::new(); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 0, 0]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 0, 0]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 1, 0]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 0, 1]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 1, 1]), 2); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 1, 1]), 2); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 0, 1]), 2); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 1, 0]), 2); assert_eq!(chunk_map.len(), 8); expected_map .iter() .for_each(|(chunk, frequency)| { assert_eq!(chunk_map.get(chunk).unwrap(), frequency); }); } #[test] fn test_subchunk_positions() { let sub_chunks = vec![ ((0, 0), (1, 1), 0), ((0, 0), (2, 1), 1), ((1, 0), (1, 1), 2), ((0, 0), (1, 2), 3), // ((0, 0), (2, 2), 4) --> Implicit full overlap removed ((1, 0), (1, 2), 4), ((0, 1), (1, 1), 5), ((0, 1), (2, 1), 6), ((1, 1), (1, 1), 7) ]; assert_eq!(sub_chunk_positions(2), sub_chunks); } #[test] fn test_overlaps() { let mut chunks_n2: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 2, 3]), 1); chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![3, 2, 0, 1]), 1); chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![2, 0, 3, 1]), 1); let mut overlaps_n2: Rules = HashMap::new(); overlaps_n2.insert((5, 0), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((0, 1), [1, 0, 0, 0].iter().collect()); overlaps_n2.insert((6, 1), [1, 0, 0, 0].iter().collect()); overlaps_n2.insert((1, 0), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((2, 1), [1, 0, 0, 0].iter().collect()); overlaps_n2.insert((7, 0), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((2, 2), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((5, 1), [0, 0, 1, 0].iter().collect()); let result_n2 = overlaps(&chunks_n2, 2); assert_eq!(result_n2, overlaps_n2); let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![0, 1, 2, 3, 4, 5, 6, 7, 8]), 1); chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![9, 10, 11, 12, 13, 14, 15, 16, 0]), 1); let mut overlaps_n3: Rules = HashMap::new(); overlaps_n3.insert((0, 0), [0, 1].iter().collect()); overlaps_n3.insert((23, 1), [1, 0].iter().collect()); let result_n3 = overlaps(&chunks_n3, 3); assert_eq!(result_n3, overlaps_n3); let mut chunks_n4: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n4.insert(DMatrix::from_row_slice( 4, 4, &vec![0, 0, 2, 3, 0, 1, 4, 5, 6, 7, 0, 0, 8, 9, 0, 1]), 1); // test overlapping with self only let mut overlaps_n4: Rules = HashMap::new(); overlaps_n4.insert((8, 0), [1, 0].iter().collect()); overlaps_n4.insert((39, 0), [1, 0].iter().collect()); let results_n4 = overlaps(&chunks_n4, 4); assert_eq!(results_n4, overlaps_n4); } #[test] fn test_create_raw_graph() { let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n3.insert(DMatrix::from_row_slice(1, 1, &vec![0]), 1); let edges_n3: Edges = hash_map(&[ (0, vec![(1, 12), (2, 13), (4, 16), (5, 17), (6, 18), (8, 21), (9, 22), (10, 23)]), (1, vec![(0, 11), (2, 12), (3, 13), (4, 15), (5, 16), (6, 17), (7, 18), (8, 20), (9, 21), (10, 22), (11, 23)]), (2, vec![(0, 10), (1, 11), (3, 12), (4, 14), (5, 15), (6, 16), (7, 17), (8, 19), (9, 20), (10, 21), (11, 22)]), (3, vec![(1, 10), (2, 11), (5, 14), (6, 15), (7, 16), (9, 19), (10, 20), (11, 21)]), (4, vec![(0, 7), (1, 8), (2, 9), (5, 12), (6, 13), (8, 16), (9, 17), (10, 18), (12, 21), (13, 22), (14, 23)]), ]); let all_labels: MSu16xNU = chunks_n3.values().collect(); let raw_graph = create_raw_graph(&all_labels, 3, (2, 2)); assert_eq!(raw_graph.edges.get(&0).unwrap(), edges_n3.get(&0).unwrap()); assert_eq!(raw_graph.edges.get(&1).unwrap(), edges_n3.get(&1).unwrap()); assert_eq!(raw_graph.edges.get(&2).unwrap(), edges_n3.get(&2).unwrap()); assert_eq!(raw_graph.edges.get(&3).unwrap(), edges_n3.get(&3).unwrap()); assert_eq!(raw_graph.edges.get(&4).unwrap(), edges_n3.get(&4).unwrap()); } }
chunk_image
identifier_name
olm_parser.rs
use crate::graph::graph::{Rules, Edges, Graph, Vertices}; use crate::io::{ limit_iter::Limit, sub_matrix::SubMatrix, tri_wave::TriWave, utils::{DiagonalReflection, Reflection, Rotation}, }; use crate::utils::{index_to_coords, is_inside, coords_to_index}; use crate::wfc::collapse; use bimap::BiMap; use hashbrown::HashMap; use image::{imageops, Rgb, RgbImage, Pixel}; use itertools::Itertools; use nalgebra::DMatrix; use std::ops::{IndexMut, Index, AddAssign}; use std::convert::TryFrom; use indexmap::IndexMap; use std::ops::Not; use crate::MSu16xNU; type Chunk = DMatrix<usize>; type PixelKeys = BiMap<usize, Rgb<u8>>; // TODO: handle unwrap of image::open properly pub fn parse(filename: &str, chunk_size: usize) -> (Rules, PixelKeys, MSu16xNU, IndexMap<Chunk, u16>) { let img = image::open(filename).unwrap().to_rgb8(); let pixel_aliases = alias_pixels(&img); let chunk_frequencies = chunk_image(img, chunk_size, &pixel_aliases, true, false, false, false); let overlap_rules = overlaps(&chunk_frequencies, chunk_size); if chunk_frequencies.len() > MSu16xNU::len() { println!("Chunks LEN: {}", chunk_frequencies.len()); panic!("labels multiset not large enough to store all unique chunks") } let all_labels = chunk_frequencies.values().collect(); let raw_graph = create_raw_graph(&all_labels, chunk_size, (3, 3)); let mut pruned_rules: Rules = HashMap::new(); (0..all_labels.count_non_zero()) .for_each(|label| { // pruned graph vertices returned from collapse let pruned_graph = propagate_overlaps(raw_graph.clone(), &overlap_rules, label as usize); real_vertex_indexes(chunk_size) .iter() .enumerate() .for_each(|(direction, index)| { let set = pruned_graph.vertices.index(*index); if !set.is_empty() { pruned_rules.insert((direction as u16, label as usize), *set); } }); }); (pruned_rules, pixel_aliases, all_labels, chunk_frequencies) } // todo: work out if step will be needed, currently useless const fn real_vertex_indexes(chunk_size: usize) -> [usize; 8] { let dim = (3 * chunk_size) - (chunk_size - 1); let step = chunk_size - 1; [ 0, // NW step + 1, // N (step + 1) * 2, // NE dim * chunk_size, // W // dim * chunk_size + step + 1 // Center (unused) dim * chunk_size + (step + 1) * 2, // E dim * chunk_size * 2, // SW dim * chunk_size * 2 + step + 1, // S dim * chunk_size * 2 + (step + 1) * 2, // SE ] } fn sub_images(image: RgbImage, chunk_size: usize) -> impl Iterator<Item=RgbImage> { let chunk_size_32: u32 = TryFrom::try_from(chunk_size) .expect("chunk_size too large, cannot convert to u32"); let height_iter = 0..(image.dimensions().1) - (chunk_size_32 - 1); let width_iter = 0..(image.dimensions().0) - (chunk_size_32 - 1); height_iter .cartesian_product(width_iter) .map(move |(y, x)| { imageops::crop_imm(&image, x, y, chunk_size_32, chunk_size_32).to_image() }) } fn alias_sub_image(image: RgbImage, pixel_aliases: &PixelKeys) -> Vec<usize> { image .pixels() .map(|p| *pixel_aliases.get_by_right(&p).unwrap()) .collect() } fn alias_pixels(image: &RgbImage) -> PixelKeys { image .pixels() .unique() .copied() .enumerate() .collect() } // returns the input image in unique chunks and frequencies of those chunks fn chunk_image( image: RgbImage, chunk_size: usize, pixel_aliases: &PixelKeys, rotate: bool, reflect_vertical: bool, reflect_horizontal: bool, reflect_diagonal: bool, ) -> IndexMap<Chunk, u16> { sub_images(image, chunk_size) .map(|sub_image| alias_sub_image(sub_image, pixel_aliases)) .fold(IndexMap::new(), |mut acc, aliases| { let chunk = DMatrix::from_row_slice(chunk_size, chunk_size, &aliases); if rotate { let mut rot_chunk = chunk.clone(); for _ in 0..3 { rot_chunk = rot_chunk.rotate_90(); push_chunk_frequency(rot_chunk.clone(), &mut acc); } } if reflect_vertical { push_chunk_frequency(chunk.reflect_vertical(), &mut acc); } if reflect_horizontal { push_chunk_frequency(chunk.reflect_horizontal(), &mut acc); } if reflect_diagonal { push_chunk_frequency(chunk.reflect_top_left(), &mut acc); push_chunk_frequency(chunk.reflect_bottom_left(), &mut acc); } push_chunk_frequency(chunk, &mut acc); acc }) } fn push_chunk_frequency(chunk: Chunk, frequencies: &mut IndexMap<Chunk, u16>) { frequencies.entry(chunk).and_modify(|f| *f += 1).or_insert(1); } type Position = (usize, usize); type Size = (usize, usize); type Direction = u16; fn sub_chunk_positions(chunk_size: usize) -> Vec<(Position, Size, Direction)> { let period = chunk_size * 2 - 1; let positions = Limit::new(chunk_size).zip(TriWave::new(chunk_size)).take(period); let pos_cart_prod = positions.clone().cartesian_product(positions); pos_cart_prod .map(|((y_position, y_size), (x_position, x_size))| ( (x_position, y_position), (x_size + 1, y_size + 1) )) .filter(|(_, (width, height))| width != &chunk_size || height != &chunk_size) .enumerate() .map(|(direction, (position, size))| ( position, size, direction as u16 )) .collect() } fn overlaps(chunks: &IndexMap<Chunk, u16>, chunk_size: usize) -> Rules { chunks .keys() .enumerate() .fold(HashMap::new(), |mut rules, (label, chunk)| { let sub_positions = sub_chunk_positions(chunk_size); sub_positions .iter() .for_each(|(position, size, direction)| { let sub_chunk = chunk.sub_matrix(*position, *size); let reverse_index = sub_positions.len() - 1 - *direction as usize; let (rev_pos, rev_size, _) = sub_positions[reverse_index]; chunks .keys() .enumerate() .for_each(|(other_label, other_chunk)| { // find mirrored sub chunk let other_sub_chunk = other_chunk.sub_matrix(rev_pos, rev_size); if sub_chunk == other_sub_chunk
}) }); rules }) } // Create a raw graph for pruning fn create_raw_graph(all_labels: &MSu16xNU, chunk_size: usize, (height, width): (usize, usize)) -> Graph { // pixel based graph dimensions let v_dim_x = (width * chunk_size) - (chunk_size - 1); let v_dim_y = (height * chunk_size) - (chunk_size - 1); let vertices_len = v_dim_x * v_dim_y; let vertices: Vec<MSu16xNU> = vec![*all_labels; vertices_len]; // create negative indexed range to offset vertex centered directional field by N let signed_chunk_size: i32 = TryFrom::try_from(chunk_size) .expect("Cannot convert chunk_size to i32"); let range = 1 - signed_chunk_size..signed_chunk_size; // calculate real cartesian space offest coordinates let range_cart_prod = range.clone() .cartesian_product(range) .filter(|i| i != &(0, 0)); // remove 0 offset for correct directional mapping let edges: Edges = (0..vertices_len) .fold(HashMap::new(), |mut acc, index| { let (x, y) = index_to_coords(index, v_dim_x); range_cart_prod .clone() .map(|(y_offset, x_offset)| (y as i32 + y_offset, x as i32 + x_offset)) .enumerate() // remove coordinates outside of graph .filter(|(_, offsets)| is_inside(*offsets, (v_dim_x, v_dim_y))) .for_each(|(direction, (y_offset, x_offset))| { let other_index = coords_to_index(x_offset as usize, y_offset as usize, v_dim_x); acc .entry(index as u32) .and_modify(|v| v.push((other_index as u32, direction as u16))) .or_insert(vec![(other_index as u32, direction as u16)]); }); acc }); Graph::new(vertices, edges, *all_labels) } fn propagate_overlaps(mut graph: Graph, rules: &Rules, label: usize) -> Graph { let central_vertex = (graph.vertices.len() - 1) / 2; graph.vertices.index_mut(central_vertex).choose(label); collapse::collapse(rules, &graph, None, Some(1)) } #[cfg(test)] mod tests { use super::*; use crate::utils::hash_map; use image::ImageBuffer; #[test] fn test_alias_pixels() { let pixels = vec![255, 255, 255, 0, 0, 0, 122, 122, 122, 96, 96, 96]; let img = ImageBuffer::from_vec(2, 2, pixels).unwrap(); let pixel_aliases = alias_pixels(&img); assert_eq!(pixel_aliases.len(), 4); } #[test] fn test_chunk_image() { let img = image::open("resources/test/chunk_image_test.png").unwrap().to_rgb8(); let mut pixel_aliases: PixelKeys = BiMap::new(); pixel_aliases.insert(0, Rgb::from([255, 255, 255])); pixel_aliases.insert(1, Rgb::from([0, 0, 0])); let chunk_map = chunk_image(img, 2, &pixel_aliases, true, false, false, false); let mut expected_map: IndexMap<Chunk, u16> = IndexMap::new(); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 0, 0]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 0, 0]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 1, 0]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 0, 1]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 1, 1]), 2); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 1, 1]), 2); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 0, 1]), 2); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 1, 0]), 2); assert_eq!(chunk_map.len(), 8); expected_map .iter() .for_each(|(chunk, frequency)| { assert_eq!(chunk_map.get(chunk).unwrap(), frequency); }); } #[test] fn test_subchunk_positions() { let sub_chunks = vec![ ((0, 0), (1, 1), 0), ((0, 0), (2, 1), 1), ((1, 0), (1, 1), 2), ((0, 0), (1, 2), 3), // ((0, 0), (2, 2), 4) --> Implicit full overlap removed ((1, 0), (1, 2), 4), ((0, 1), (1, 1), 5), ((0, 1), (2, 1), 6), ((1, 1), (1, 1), 7) ]; assert_eq!(sub_chunk_positions(2), sub_chunks); } #[test] fn test_overlaps() { let mut chunks_n2: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 2, 3]), 1); chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![3, 2, 0, 1]), 1); chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![2, 0, 3, 1]), 1); let mut overlaps_n2: Rules = HashMap::new(); overlaps_n2.insert((5, 0), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((0, 1), [1, 0, 0, 0].iter().collect()); overlaps_n2.insert((6, 1), [1, 0, 0, 0].iter().collect()); overlaps_n2.insert((1, 0), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((2, 1), [1, 0, 0, 0].iter().collect()); overlaps_n2.insert((7, 0), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((2, 2), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((5, 1), [0, 0, 1, 0].iter().collect()); let result_n2 = overlaps(&chunks_n2, 2); assert_eq!(result_n2, overlaps_n2); let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![0, 1, 2, 3, 4, 5, 6, 7, 8]), 1); chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![9, 10, 11, 12, 13, 14, 15, 16, 0]), 1); let mut overlaps_n3: Rules = HashMap::new(); overlaps_n3.insert((0, 0), [0, 1].iter().collect()); overlaps_n3.insert((23, 1), [1, 0].iter().collect()); let result_n3 = overlaps(&chunks_n3, 3); assert_eq!(result_n3, overlaps_n3); let mut chunks_n4: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n4.insert(DMatrix::from_row_slice( 4, 4, &vec![0, 0, 2, 3, 0, 1, 4, 5, 6, 7, 0, 0, 8, 9, 0, 1]), 1); // test overlapping with self only let mut overlaps_n4: Rules = HashMap::new(); overlaps_n4.insert((8, 0), [1, 0].iter().collect()); overlaps_n4.insert((39, 0), [1, 0].iter().collect()); let results_n4 = overlaps(&chunks_n4, 4); assert_eq!(results_n4, overlaps_n4); } #[test] fn test_create_raw_graph() { let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n3.insert(DMatrix::from_row_slice(1, 1, &vec![0]), 1); let edges_n3: Edges = hash_map(&[ (0, vec![(1, 12), (2, 13), (4, 16), (5, 17), (6, 18), (8, 21), (9, 22), (10, 23)]), (1, vec![(0, 11), (2, 12), (3, 13), (4, 15), (5, 16), (6, 17), (7, 18), (8, 20), (9, 21), (10, 22), (11, 23)]), (2, vec![(0, 10), (1, 11), (3, 12), (4, 14), (5, 15), (6, 16), (7, 17), (8, 19), (9, 20), (10, 21), (11, 22)]), (3, vec![(1, 10), (2, 11), (5, 14), (6, 15), (7, 16), (9, 19), (10, 20), (11, 21)]), (4, vec![(0, 7), (1, 8), (2, 9), (5, 12), (6, 13), (8, 16), (9, 17), (10, 18), (12, 21), (13, 22), (14, 23)]), ]); let all_labels: MSu16xNU = chunks_n3.values().collect(); let raw_graph = create_raw_graph(&all_labels, 3, (2, 2)); assert_eq!(raw_graph.edges.get(&0).unwrap(), edges_n3.get(&0).unwrap()); assert_eq!(raw_graph.edges.get(&1).unwrap(), edges_n3.get(&1).unwrap()); assert_eq!(raw_graph.edges.get(&2).unwrap(), edges_n3.get(&2).unwrap()); assert_eq!(raw_graph.edges.get(&3).unwrap(), edges_n3.get(&3).unwrap()); assert_eq!(raw_graph.edges.get(&4).unwrap(), edges_n3.get(&4).unwrap()); } }
{ let mut set = MSu16xNU::empty(); set.insert(other_label, 1); rules .entry((*direction, label)) .and_modify(|l| l.add_assign(set)) .or_insert(set); }
conditional_block
olm_parser.rs
use crate::graph::graph::{Rules, Edges, Graph, Vertices}; use crate::io::{ limit_iter::Limit, sub_matrix::SubMatrix, tri_wave::TriWave, utils::{DiagonalReflection, Reflection, Rotation}, }; use crate::utils::{index_to_coords, is_inside, coords_to_index}; use crate::wfc::collapse; use bimap::BiMap; use hashbrown::HashMap; use image::{imageops, Rgb, RgbImage, Pixel}; use itertools::Itertools; use nalgebra::DMatrix; use std::ops::{IndexMut, Index, AddAssign}; use std::convert::TryFrom; use indexmap::IndexMap; use std::ops::Not; use crate::MSu16xNU; type Chunk = DMatrix<usize>; type PixelKeys = BiMap<usize, Rgb<u8>>; // TODO: handle unwrap of image::open properly pub fn parse(filename: &str, chunk_size: usize) -> (Rules, PixelKeys, MSu16xNU, IndexMap<Chunk, u16>) { let img = image::open(filename).unwrap().to_rgb8(); let pixel_aliases = alias_pixels(&img); let chunk_frequencies = chunk_image(img, chunk_size, &pixel_aliases, true, false, false, false); let overlap_rules = overlaps(&chunk_frequencies, chunk_size); if chunk_frequencies.len() > MSu16xNU::len() { println!("Chunks LEN: {}", chunk_frequencies.len()); panic!("labels multiset not large enough to store all unique chunks") } let all_labels = chunk_frequencies.values().collect(); let raw_graph = create_raw_graph(&all_labels, chunk_size, (3, 3)); let mut pruned_rules: Rules = HashMap::new(); (0..all_labels.count_non_zero()) .for_each(|label| { // pruned graph vertices returned from collapse let pruned_graph = propagate_overlaps(raw_graph.clone(), &overlap_rules, label as usize); real_vertex_indexes(chunk_size) .iter() .enumerate() .for_each(|(direction, index)| { let set = pruned_graph.vertices.index(*index); if !set.is_empty() { pruned_rules.insert((direction as u16, label as usize), *set); } }); }); (pruned_rules, pixel_aliases, all_labels, chunk_frequencies) } // todo: work out if step will be needed, currently useless const fn real_vertex_indexes(chunk_size: usize) -> [usize; 8] { let dim = (3 * chunk_size) - (chunk_size - 1); let step = chunk_size - 1; [ 0, // NW step + 1, // N (step + 1) * 2, // NE dim * chunk_size, // W // dim * chunk_size + step + 1 // Center (unused) dim * chunk_size + (step + 1) * 2, // E dim * chunk_size * 2, // SW dim * chunk_size * 2 + step + 1, // S dim * chunk_size * 2 + (step + 1) * 2, // SE ] } fn sub_images(image: RgbImage, chunk_size: usize) -> impl Iterator<Item=RgbImage> { let chunk_size_32: u32 = TryFrom::try_from(chunk_size) .expect("chunk_size too large, cannot convert to u32"); let height_iter = 0..(image.dimensions().1) - (chunk_size_32 - 1); let width_iter = 0..(image.dimensions().0) - (chunk_size_32 - 1); height_iter .cartesian_product(width_iter) .map(move |(y, x)| { imageops::crop_imm(&image, x, y, chunk_size_32, chunk_size_32).to_image() }) } fn alias_sub_image(image: RgbImage, pixel_aliases: &PixelKeys) -> Vec<usize> { image .pixels() .map(|p| *pixel_aliases.get_by_right(&p).unwrap()) .collect() } fn alias_pixels(image: &RgbImage) -> PixelKeys { image .pixels() .unique() .copied() .enumerate() .collect() } // returns the input image in unique chunks and frequencies of those chunks fn chunk_image( image: RgbImage, chunk_size: usize, pixel_aliases: &PixelKeys, rotate: bool, reflect_vertical: bool, reflect_horizontal: bool, reflect_diagonal: bool, ) -> IndexMap<Chunk, u16> { sub_images(image, chunk_size) .map(|sub_image| alias_sub_image(sub_image, pixel_aliases)) .fold(IndexMap::new(), |mut acc, aliases| { let chunk = DMatrix::from_row_slice(chunk_size, chunk_size, &aliases); if rotate { let mut rot_chunk = chunk.clone(); for _ in 0..3 { rot_chunk = rot_chunk.rotate_90(); push_chunk_frequency(rot_chunk.clone(), &mut acc); } } if reflect_vertical { push_chunk_frequency(chunk.reflect_vertical(), &mut acc); } if reflect_horizontal { push_chunk_frequency(chunk.reflect_horizontal(), &mut acc); } if reflect_diagonal { push_chunk_frequency(chunk.reflect_top_left(), &mut acc); push_chunk_frequency(chunk.reflect_bottom_left(), &mut acc); } push_chunk_frequency(chunk, &mut acc); acc }) } fn push_chunk_frequency(chunk: Chunk, frequencies: &mut IndexMap<Chunk, u16>) { frequencies.entry(chunk).and_modify(|f| *f += 1).or_insert(1); } type Position = (usize, usize); type Size = (usize, usize); type Direction = u16; fn sub_chunk_positions(chunk_size: usize) -> Vec<(Position, Size, Direction)> { let period = chunk_size * 2 - 1; let positions = Limit::new(chunk_size).zip(TriWave::new(chunk_size)).take(period); let pos_cart_prod = positions.clone().cartesian_product(positions); pos_cart_prod .map(|((y_position, y_size), (x_position, x_size))| ( (x_position, y_position), (x_size + 1, y_size + 1) )) .filter(|(_, (width, height))| width != &chunk_size || height != &chunk_size) .enumerate() .map(|(direction, (position, size))| ( position, size, direction as u16 )) .collect() } fn overlaps(chunks: &IndexMap<Chunk, u16>, chunk_size: usize) -> Rules { chunks .keys() .enumerate() .fold(HashMap::new(), |mut rules, (label, chunk)| { let sub_positions = sub_chunk_positions(chunk_size); sub_positions .iter() .for_each(|(position, size, direction)| { let sub_chunk = chunk.sub_matrix(*position, *size); let reverse_index = sub_positions.len() - 1 - *direction as usize; let (rev_pos, rev_size, _) = sub_positions[reverse_index]; chunks .keys() .enumerate() .for_each(|(other_label, other_chunk)| { // find mirrored sub chunk let other_sub_chunk = other_chunk.sub_matrix(rev_pos, rev_size); if sub_chunk == other_sub_chunk { let mut set = MSu16xNU::empty(); set.insert(other_label, 1); rules .entry((*direction, label)) .and_modify(|l| l.add_assign(set))
}) } // Create a raw graph for pruning fn create_raw_graph(all_labels: &MSu16xNU, chunk_size: usize, (height, width): (usize, usize)) -> Graph { // pixel based graph dimensions let v_dim_x = (width * chunk_size) - (chunk_size - 1); let v_dim_y = (height * chunk_size) - (chunk_size - 1); let vertices_len = v_dim_x * v_dim_y; let vertices: Vec<MSu16xNU> = vec![*all_labels; vertices_len]; // create negative indexed range to offset vertex centered directional field by N let signed_chunk_size: i32 = TryFrom::try_from(chunk_size) .expect("Cannot convert chunk_size to i32"); let range = 1 - signed_chunk_size..signed_chunk_size; // calculate real cartesian space offest coordinates let range_cart_prod = range.clone() .cartesian_product(range) .filter(|i| i != &(0, 0)); // remove 0 offset for correct directional mapping let edges: Edges = (0..vertices_len) .fold(HashMap::new(), |mut acc, index| { let (x, y) = index_to_coords(index, v_dim_x); range_cart_prod .clone() .map(|(y_offset, x_offset)| (y as i32 + y_offset, x as i32 + x_offset)) .enumerate() // remove coordinates outside of graph .filter(|(_, offsets)| is_inside(*offsets, (v_dim_x, v_dim_y))) .for_each(|(direction, (y_offset, x_offset))| { let other_index = coords_to_index(x_offset as usize, y_offset as usize, v_dim_x); acc .entry(index as u32) .and_modify(|v| v.push((other_index as u32, direction as u16))) .or_insert(vec![(other_index as u32, direction as u16)]); }); acc }); Graph::new(vertices, edges, *all_labels) } fn propagate_overlaps(mut graph: Graph, rules: &Rules, label: usize) -> Graph { let central_vertex = (graph.vertices.len() - 1) / 2; graph.vertices.index_mut(central_vertex).choose(label); collapse::collapse(rules, &graph, None, Some(1)) } #[cfg(test)] mod tests { use super::*; use crate::utils::hash_map; use image::ImageBuffer; #[test] fn test_alias_pixels() { let pixels = vec![255, 255, 255, 0, 0, 0, 122, 122, 122, 96, 96, 96]; let img = ImageBuffer::from_vec(2, 2, pixels).unwrap(); let pixel_aliases = alias_pixels(&img); assert_eq!(pixel_aliases.len(), 4); } #[test] fn test_chunk_image() { let img = image::open("resources/test/chunk_image_test.png").unwrap().to_rgb8(); let mut pixel_aliases: PixelKeys = BiMap::new(); pixel_aliases.insert(0, Rgb::from([255, 255, 255])); pixel_aliases.insert(1, Rgb::from([0, 0, 0])); let chunk_map = chunk_image(img, 2, &pixel_aliases, true, false, false, false); let mut expected_map: IndexMap<Chunk, u16> = IndexMap::new(); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 0, 0]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 0, 0]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 1, 0]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 0, 1]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 1, 1]), 2); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 1, 1]), 2); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 0, 1]), 2); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 1, 0]), 2); assert_eq!(chunk_map.len(), 8); expected_map .iter() .for_each(|(chunk, frequency)| { assert_eq!(chunk_map.get(chunk).unwrap(), frequency); }); } #[test] fn test_subchunk_positions() { let sub_chunks = vec![ ((0, 0), (1, 1), 0), ((0, 0), (2, 1), 1), ((1, 0), (1, 1), 2), ((0, 0), (1, 2), 3), // ((0, 0), (2, 2), 4) --> Implicit full overlap removed ((1, 0), (1, 2), 4), ((0, 1), (1, 1), 5), ((0, 1), (2, 1), 6), ((1, 1), (1, 1), 7) ]; assert_eq!(sub_chunk_positions(2), sub_chunks); } #[test] fn test_overlaps() { let mut chunks_n2: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 2, 3]), 1); chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![3, 2, 0, 1]), 1); chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![2, 0, 3, 1]), 1); let mut overlaps_n2: Rules = HashMap::new(); overlaps_n2.insert((5, 0), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((0, 1), [1, 0, 0, 0].iter().collect()); overlaps_n2.insert((6, 1), [1, 0, 0, 0].iter().collect()); overlaps_n2.insert((1, 0), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((2, 1), [1, 0, 0, 0].iter().collect()); overlaps_n2.insert((7, 0), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((2, 2), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((5, 1), [0, 0, 1, 0].iter().collect()); let result_n2 = overlaps(&chunks_n2, 2); assert_eq!(result_n2, overlaps_n2); let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![0, 1, 2, 3, 4, 5, 6, 7, 8]), 1); chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![9, 10, 11, 12, 13, 14, 15, 16, 0]), 1); let mut overlaps_n3: Rules = HashMap::new(); overlaps_n3.insert((0, 0), [0, 1].iter().collect()); overlaps_n3.insert((23, 1), [1, 0].iter().collect()); let result_n3 = overlaps(&chunks_n3, 3); assert_eq!(result_n3, overlaps_n3); let mut chunks_n4: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n4.insert(DMatrix::from_row_slice( 4, 4, &vec![0, 0, 2, 3, 0, 1, 4, 5, 6, 7, 0, 0, 8, 9, 0, 1]), 1); // test overlapping with self only let mut overlaps_n4: Rules = HashMap::new(); overlaps_n4.insert((8, 0), [1, 0].iter().collect()); overlaps_n4.insert((39, 0), [1, 0].iter().collect()); let results_n4 = overlaps(&chunks_n4, 4); assert_eq!(results_n4, overlaps_n4); } #[test] fn test_create_raw_graph() { let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n3.insert(DMatrix::from_row_slice(1, 1, &vec![0]), 1); let edges_n3: Edges = hash_map(&[ (0, vec![(1, 12), (2, 13), (4, 16), (5, 17), (6, 18), (8, 21), (9, 22), (10, 23)]), (1, vec![(0, 11), (2, 12), (3, 13), (4, 15), (5, 16), (6, 17), (7, 18), (8, 20), (9, 21), (10, 22), (11, 23)]), (2, vec![(0, 10), (1, 11), (3, 12), (4, 14), (5, 15), (6, 16), (7, 17), (8, 19), (9, 20), (10, 21), (11, 22)]), (3, vec![(1, 10), (2, 11), (5, 14), (6, 15), (7, 16), (9, 19), (10, 20), (11, 21)]), (4, vec![(0, 7), (1, 8), (2, 9), (5, 12), (6, 13), (8, 16), (9, 17), (10, 18), (12, 21), (13, 22), (14, 23)]), ]); let all_labels: MSu16xNU = chunks_n3.values().collect(); let raw_graph = create_raw_graph(&all_labels, 3, (2, 2)); assert_eq!(raw_graph.edges.get(&0).unwrap(), edges_n3.get(&0).unwrap()); assert_eq!(raw_graph.edges.get(&1).unwrap(), edges_n3.get(&1).unwrap()); assert_eq!(raw_graph.edges.get(&2).unwrap(), edges_n3.get(&2).unwrap()); assert_eq!(raw_graph.edges.get(&3).unwrap(), edges_n3.get(&3).unwrap()); assert_eq!(raw_graph.edges.get(&4).unwrap(), edges_n3.get(&4).unwrap()); } }
.or_insert(set); } }) }); rules
random_line_split
olm_parser.rs
use crate::graph::graph::{Rules, Edges, Graph, Vertices}; use crate::io::{ limit_iter::Limit, sub_matrix::SubMatrix, tri_wave::TriWave, utils::{DiagonalReflection, Reflection, Rotation}, }; use crate::utils::{index_to_coords, is_inside, coords_to_index}; use crate::wfc::collapse; use bimap::BiMap; use hashbrown::HashMap; use image::{imageops, Rgb, RgbImage, Pixel}; use itertools::Itertools; use nalgebra::DMatrix; use std::ops::{IndexMut, Index, AddAssign}; use std::convert::TryFrom; use indexmap::IndexMap; use std::ops::Not; use crate::MSu16xNU; type Chunk = DMatrix<usize>; type PixelKeys = BiMap<usize, Rgb<u8>>; // TODO: handle unwrap of image::open properly pub fn parse(filename: &str, chunk_size: usize) -> (Rules, PixelKeys, MSu16xNU, IndexMap<Chunk, u16>) { let img = image::open(filename).unwrap().to_rgb8(); let pixel_aliases = alias_pixels(&img); let chunk_frequencies = chunk_image(img, chunk_size, &pixel_aliases, true, false, false, false); let overlap_rules = overlaps(&chunk_frequencies, chunk_size); if chunk_frequencies.len() > MSu16xNU::len() { println!("Chunks LEN: {}", chunk_frequencies.len()); panic!("labels multiset not large enough to store all unique chunks") } let all_labels = chunk_frequencies.values().collect(); let raw_graph = create_raw_graph(&all_labels, chunk_size, (3, 3)); let mut pruned_rules: Rules = HashMap::new(); (0..all_labels.count_non_zero()) .for_each(|label| { // pruned graph vertices returned from collapse let pruned_graph = propagate_overlaps(raw_graph.clone(), &overlap_rules, label as usize); real_vertex_indexes(chunk_size) .iter() .enumerate() .for_each(|(direction, index)| { let set = pruned_graph.vertices.index(*index); if !set.is_empty() { pruned_rules.insert((direction as u16, label as usize), *set); } }); }); (pruned_rules, pixel_aliases, all_labels, chunk_frequencies) } // todo: work out if step will be needed, currently useless const fn real_vertex_indexes(chunk_size: usize) -> [usize; 8] { let dim = (3 * chunk_size) - (chunk_size - 1); let step = chunk_size - 1; [ 0, // NW step + 1, // N (step + 1) * 2, // NE dim * chunk_size, // W // dim * chunk_size + step + 1 // Center (unused) dim * chunk_size + (step + 1) * 2, // E dim * chunk_size * 2, // SW dim * chunk_size * 2 + step + 1, // S dim * chunk_size * 2 + (step + 1) * 2, // SE ] } fn sub_images(image: RgbImage, chunk_size: usize) -> impl Iterator<Item=RgbImage> { let chunk_size_32: u32 = TryFrom::try_from(chunk_size) .expect("chunk_size too large, cannot convert to u32"); let height_iter = 0..(image.dimensions().1) - (chunk_size_32 - 1); let width_iter = 0..(image.dimensions().0) - (chunk_size_32 - 1); height_iter .cartesian_product(width_iter) .map(move |(y, x)| { imageops::crop_imm(&image, x, y, chunk_size_32, chunk_size_32).to_image() }) } fn alias_sub_image(image: RgbImage, pixel_aliases: &PixelKeys) -> Vec<usize> { image .pixels() .map(|p| *pixel_aliases.get_by_right(&p).unwrap()) .collect() } fn alias_pixels(image: &RgbImage) -> PixelKeys { image .pixels() .unique() .copied() .enumerate() .collect() } // returns the input image in unique chunks and frequencies of those chunks fn chunk_image( image: RgbImage, chunk_size: usize, pixel_aliases: &PixelKeys, rotate: bool, reflect_vertical: bool, reflect_horizontal: bool, reflect_diagonal: bool, ) -> IndexMap<Chunk, u16> { sub_images(image, chunk_size) .map(|sub_image| alias_sub_image(sub_image, pixel_aliases)) .fold(IndexMap::new(), |mut acc, aliases| { let chunk = DMatrix::from_row_slice(chunk_size, chunk_size, &aliases); if rotate { let mut rot_chunk = chunk.clone(); for _ in 0..3 { rot_chunk = rot_chunk.rotate_90(); push_chunk_frequency(rot_chunk.clone(), &mut acc); } } if reflect_vertical { push_chunk_frequency(chunk.reflect_vertical(), &mut acc); } if reflect_horizontal { push_chunk_frequency(chunk.reflect_horizontal(), &mut acc); } if reflect_diagonal { push_chunk_frequency(chunk.reflect_top_left(), &mut acc); push_chunk_frequency(chunk.reflect_bottom_left(), &mut acc); } push_chunk_frequency(chunk, &mut acc); acc }) } fn push_chunk_frequency(chunk: Chunk, frequencies: &mut IndexMap<Chunk, u16>) { frequencies.entry(chunk).and_modify(|f| *f += 1).or_insert(1); } type Position = (usize, usize); type Size = (usize, usize); type Direction = u16; fn sub_chunk_positions(chunk_size: usize) -> Vec<(Position, Size, Direction)> { let period = chunk_size * 2 - 1; let positions = Limit::new(chunk_size).zip(TriWave::new(chunk_size)).take(period); let pos_cart_prod = positions.clone().cartesian_product(positions); pos_cart_prod .map(|((y_position, y_size), (x_position, x_size))| ( (x_position, y_position), (x_size + 1, y_size + 1) )) .filter(|(_, (width, height))| width != &chunk_size || height != &chunk_size) .enumerate() .map(|(direction, (position, size))| ( position, size, direction as u16 )) .collect() } fn overlaps(chunks: &IndexMap<Chunk, u16>, chunk_size: usize) -> Rules { chunks .keys() .enumerate() .fold(HashMap::new(), |mut rules, (label, chunk)| { let sub_positions = sub_chunk_positions(chunk_size); sub_positions .iter() .for_each(|(position, size, direction)| { let sub_chunk = chunk.sub_matrix(*position, *size); let reverse_index = sub_positions.len() - 1 - *direction as usize; let (rev_pos, rev_size, _) = sub_positions[reverse_index]; chunks .keys() .enumerate() .for_each(|(other_label, other_chunk)| { // find mirrored sub chunk let other_sub_chunk = other_chunk.sub_matrix(rev_pos, rev_size); if sub_chunk == other_sub_chunk { let mut set = MSu16xNU::empty(); set.insert(other_label, 1); rules .entry((*direction, label)) .and_modify(|l| l.add_assign(set)) .or_insert(set); } }) }); rules }) } // Create a raw graph for pruning fn create_raw_graph(all_labels: &MSu16xNU, chunk_size: usize, (height, width): (usize, usize)) -> Graph { // pixel based graph dimensions let v_dim_x = (width * chunk_size) - (chunk_size - 1); let v_dim_y = (height * chunk_size) - (chunk_size - 1); let vertices_len = v_dim_x * v_dim_y; let vertices: Vec<MSu16xNU> = vec![*all_labels; vertices_len]; // create negative indexed range to offset vertex centered directional field by N let signed_chunk_size: i32 = TryFrom::try_from(chunk_size) .expect("Cannot convert chunk_size to i32"); let range = 1 - signed_chunk_size..signed_chunk_size; // calculate real cartesian space offest coordinates let range_cart_prod = range.clone() .cartesian_product(range) .filter(|i| i != &(0, 0)); // remove 0 offset for correct directional mapping let edges: Edges = (0..vertices_len) .fold(HashMap::new(), |mut acc, index| { let (x, y) = index_to_coords(index, v_dim_x); range_cart_prod .clone() .map(|(y_offset, x_offset)| (y as i32 + y_offset, x as i32 + x_offset)) .enumerate() // remove coordinates outside of graph .filter(|(_, offsets)| is_inside(*offsets, (v_dim_x, v_dim_y))) .for_each(|(direction, (y_offset, x_offset))| { let other_index = coords_to_index(x_offset as usize, y_offset as usize, v_dim_x); acc .entry(index as u32) .and_modify(|v| v.push((other_index as u32, direction as u16))) .or_insert(vec![(other_index as u32, direction as u16)]); }); acc }); Graph::new(vertices, edges, *all_labels) } fn propagate_overlaps(mut graph: Graph, rules: &Rules, label: usize) -> Graph { let central_vertex = (graph.vertices.len() - 1) / 2; graph.vertices.index_mut(central_vertex).choose(label); collapse::collapse(rules, &graph, None, Some(1)) } #[cfg(test)] mod tests { use super::*; use crate::utils::hash_map; use image::ImageBuffer; #[test] fn test_alias_pixels() { let pixels = vec![255, 255, 255, 0, 0, 0, 122, 122, 122, 96, 96, 96]; let img = ImageBuffer::from_vec(2, 2, pixels).unwrap(); let pixel_aliases = alias_pixels(&img); assert_eq!(pixel_aliases.len(), 4); } #[test] fn test_chunk_image()
#[test] fn test_subchunk_positions() { let sub_chunks = vec![ ((0, 0), (1, 1), 0), ((0, 0), (2, 1), 1), ((1, 0), (1, 1), 2), ((0, 0), (1, 2), 3), // ((0, 0), (2, 2), 4) --> Implicit full overlap removed ((1, 0), (1, 2), 4), ((0, 1), (1, 1), 5), ((0, 1), (2, 1), 6), ((1, 1), (1, 1), 7) ]; assert_eq!(sub_chunk_positions(2), sub_chunks); } #[test] fn test_overlaps() { let mut chunks_n2: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 2, 3]), 1); chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![3, 2, 0, 1]), 1); chunks_n2.insert(DMatrix::from_row_slice(2, 2, &vec![2, 0, 3, 1]), 1); let mut overlaps_n2: Rules = HashMap::new(); overlaps_n2.insert((5, 0), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((0, 1), [1, 0, 0, 0].iter().collect()); overlaps_n2.insert((6, 1), [1, 0, 0, 0].iter().collect()); overlaps_n2.insert((1, 0), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((2, 1), [1, 0, 0, 0].iter().collect()); overlaps_n2.insert((7, 0), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((2, 2), [0, 1, 0, 0].iter().collect()); overlaps_n2.insert((5, 1), [0, 0, 1, 0].iter().collect()); let result_n2 = overlaps(&chunks_n2, 2); assert_eq!(result_n2, overlaps_n2); let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![0, 1, 2, 3, 4, 5, 6, 7, 8]), 1); chunks_n3.insert(DMatrix::from_row_slice(3, 3, &vec![9, 10, 11, 12, 13, 14, 15, 16, 0]), 1); let mut overlaps_n3: Rules = HashMap::new(); overlaps_n3.insert((0, 0), [0, 1].iter().collect()); overlaps_n3.insert((23, 1), [1, 0].iter().collect()); let result_n3 = overlaps(&chunks_n3, 3); assert_eq!(result_n3, overlaps_n3); let mut chunks_n4: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n4.insert(DMatrix::from_row_slice( 4, 4, &vec![0, 0, 2, 3, 0, 1, 4, 5, 6, 7, 0, 0, 8, 9, 0, 1]), 1); // test overlapping with self only let mut overlaps_n4: Rules = HashMap::new(); overlaps_n4.insert((8, 0), [1, 0].iter().collect()); overlaps_n4.insert((39, 0), [1, 0].iter().collect()); let results_n4 = overlaps(&chunks_n4, 4); assert_eq!(results_n4, overlaps_n4); } #[test] fn test_create_raw_graph() { let mut chunks_n3: IndexMap<Chunk, u16> = IndexMap::new(); chunks_n3.insert(DMatrix::from_row_slice(1, 1, &vec![0]), 1); let edges_n3: Edges = hash_map(&[ (0, vec![(1, 12), (2, 13), (4, 16), (5, 17), (6, 18), (8, 21), (9, 22), (10, 23)]), (1, vec![(0, 11), (2, 12), (3, 13), (4, 15), (5, 16), (6, 17), (7, 18), (8, 20), (9, 21), (10, 22), (11, 23)]), (2, vec![(0, 10), (1, 11), (3, 12), (4, 14), (5, 15), (6, 16), (7, 17), (8, 19), (9, 20), (10, 21), (11, 22)]), (3, vec![(1, 10), (2, 11), (5, 14), (6, 15), (7, 16), (9, 19), (10, 20), (11, 21)]), (4, vec![(0, 7), (1, 8), (2, 9), (5, 12), (6, 13), (8, 16), (9, 17), (10, 18), (12, 21), (13, 22), (14, 23)]), ]); let all_labels: MSu16xNU = chunks_n3.values().collect(); let raw_graph = create_raw_graph(&all_labels, 3, (2, 2)); assert_eq!(raw_graph.edges.get(&0).unwrap(), edges_n3.get(&0).unwrap()); assert_eq!(raw_graph.edges.get(&1).unwrap(), edges_n3.get(&1).unwrap()); assert_eq!(raw_graph.edges.get(&2).unwrap(), edges_n3.get(&2).unwrap()); assert_eq!(raw_graph.edges.get(&3).unwrap(), edges_n3.get(&3).unwrap()); assert_eq!(raw_graph.edges.get(&4).unwrap(), edges_n3.get(&4).unwrap()); } }
{ let img = image::open("resources/test/chunk_image_test.png").unwrap().to_rgb8(); let mut pixel_aliases: PixelKeys = BiMap::new(); pixel_aliases.insert(0, Rgb::from([255, 255, 255])); pixel_aliases.insert(1, Rgb::from([0, 0, 0])); let chunk_map = chunk_image(img, 2, &pixel_aliases, true, false, false, false); let mut expected_map: IndexMap<Chunk, u16> = IndexMap::new(); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 0, 0]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 0, 0]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 1, 0]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 0, 1]), 1); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 1, 1]), 2); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 1, 1]), 2); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 0, 1]), 2); expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 1, 0]), 2); assert_eq!(chunk_map.len(), 8); expected_map .iter() .for_each(|(chunk, frequency)| { assert_eq!(chunk_map.get(chunk).unwrap(), frequency); }); }
identifier_body
wasm.rs
use std::collections::HashMap; use std::convert::TryFrom; use crate::error::Error; /// The allowable types for any real value in wasm (u8 and others are packed) #[derive(Copy, Clone, PartialEq)] pub enum PrimitiveType { I32, I64, F32, F64, } impl From<i32> for PrimitiveType { fn from(_: i32) -> PrimitiveType { PrimitiveType::I32 } } impl From<i64> for PrimitiveType { fn from(_: i64) -> PrimitiveType { PrimitiveType::I64 } } impl From<f32> for PrimitiveType { fn from(_: f32) -> PrimitiveType { PrimitiveType::F32 } } impl From<f64> for PrimitiveType { fn from(_: f64) -> PrimitiveType { PrimitiveType::F64 } } /// Storage type for all wasm values #[derive(Copy, Clone)] pub union InternalValue { i32: i32, i64: i64, f32: f32, f64: f64, } impl From<i32> for InternalValue { fn from(x: i32) -> InternalValue { InternalValue { i32: x } } } impl From<i64> for InternalValue { fn from(x: i64) -> InternalValue { InternalValue { i64: x } } } impl From<f32> for InternalValue { fn from(x: f32) -> InternalValue { InternalValue { f32: x } } } impl From<f64> for InternalValue { fn from(x: f64) -> InternalValue { InternalValue { f64: x } } } /// Representation of all wasm values #[derive(Copy, Clone)] pub struct Value { t: PrimitiveType, v: InternalValue, } impl Value { pub fn new<T: Into<InternalValue> + Into<PrimitiveType> + Copy>(x: T) -> Self { Self { t: x.into(), v: x.into(), } } pub fn from_explicit_type(t: PrimitiveType, v: u64) -> Value { Self { t, v: InternalValue { i64: v as i64 }, } } #[inline] pub fn as_i32_unchecked(&self) -> i32 { unsafe { self.v.i32 } } #[inline] pub fn as_i64_unchecked(&self) -> i64 { unsafe { self.v.i64 } } #[inline] pub fn as_f32_unchecked(&self) -> f32 { unsafe { self.v.f32 } } #[inline] pub fn as_f64_unchecked(&self) -> f64 { unsafe { self.v.f64 } } } impl From<i32> for Value { fn from(v: i32) -> Self { Self { t: PrimitiveType::from(v), v: InternalValue::from(v), } } } impl From<i64> for Value { fn from(v: i64) -> Self { Self { t: PrimitiveType::from(v), v: InternalValue::from(v), } } } impl From<f32> for Value { fn from(v: f32) -> Self { Self { t: PrimitiveType::from(v), v: InternalValue::from(v), } } } impl From<f64> for Value { fn from(v: f64) -> Self { Self { t: PrimitiveType::from(v), v: InternalValue::from(v), } } } impl TryFrom<Value> for u32 { type Error = Error; fn try_from(x: Value) -> Result<u32, Error> { match x.t { PrimitiveType::I32 => Ok(unsafe { x.v.i32 as u32 }), _ => Err(Error::Misc("Cannot extract as u32 from incorrect type")), } } } impl From<&PrimitiveType> for Value { fn from(x: &PrimitiveType) -> Value { match x { PrimitiveType::I32 => Value::new(0_i32), PrimitiveType::I64 => Value::new(0_i64), PrimitiveType::F32 => Value::new(0_f32), PrimitiveType::F64 => Value::new(0_f64), } } } impl std::fmt::Display for Value { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { unsafe { match self.t { PrimitiveType::I32 => { write!(f, "(i32:{})", self.v.i32) } PrimitiveType::I64 => { write!(f, "(i64:{})", self.v.i64) } PrimitiveType::F32 => { write!(f, "(f32:{})", self.v.f32) } PrimitiveType::F64 => { write!(f, "(f64:{})", self.v.f64) } } } } } /// Represents expected runtime errors, i.e. problems with the program, not the interpreter pub enum Trap { MemoryOutOfBounds, UndefinedDivision, } pub enum ControlInfo { Branch(u32), Return, Trap(Trap), None, } /// Representation of a wasm stack. /// All functions use a new stack when called. #[derive(Default)] pub struct Stack { values: Vec<Value>, } impl Stack { fn new() -> Self { Self::default() } fn push_value(&mut self, v: Value) { log::debug!("Pushing {}", v); self.values.push(v); } pub fn pop_value(&mut self) -> Result<Value, Error> { log::debug!("Current stack len {}", self.values.len()); if self.values.is_empty() { Err(Error::StackViolation) } else { unsafe { Ok(self.values.pop().unwrap_unchecked()) } } } /// Return the 0-indexed offset'th value from the stack (such that 0 is the most recently pushed value) pub fn fetch_value(&self, offset: usize) -> Result<&Value, Error> { let stack_size = self.values.len(); let offset_to_fetch = stack_size - 1 - offset; match self.values.get(offset_to_fetch) { Some(n) => Ok(n), None => { log::debug!("Try to read {} stack size {}", offset_to_fetch, stack_size); Err(Error::StackViolation) } } } pub fn assert_empty(&self) -> Result<(), Error> { if self.values.is_empty() { Ok(()) } else { Err(Error::StackViolation) } } } impl std::fmt::Display for Stack { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { write!(f, "Current stack:\n[")?; for v in self.values.iter() { writeln!(f, " {}", v)?; } write!(f, "]\n\n")?; Ok(()) } } pub trait Instruction { /// A wasm instruction may modify any state of the program fn execute( &self, stack: &mut Stack, memory: &mut Memory, locals: &mut Vec<Value>, functions: &Vec<Function>, ) -> Result<ControlInfo, Error>; } pub mod inst; #[derive(Default)] struct Table { functions: Vec<usize>, } pub struct Function { r#type: FunctionType, local_types: Vec<PrimitiveType>, instructions: Vec<Box<dyn Instruction>>, } impl Function { pub fn new(r#type: FunctionType) -> Self { Self { r#type, local_types: Vec::new(), instructions: Vec::new(), } } pub fn push_inst(&mut self, i: Box<dyn Instruction>) { self.instructions.push(i); } pub fn num_params(&self) -> usize { self.r#type.num_params() } pub fn num_locals(&self) -> usize { self.local_types.len() } pub fn new_locals(&mut self, count: usize, t: PrimitiveType) { self.local_types.reserve(count); for _ in 0..count { self.local_types.push(t); } } fn do_return(mut stack: Stack) -> Result<Value, Error> { let ret = stack.pop_value(); stack.assert_empty()?; ret } pub fn call( &self, functions: &Vec<Function>, memory: &mut Memory, args: Vec<Value>, ) -> Result<Value, Error> { let mut stack = Stack::new(); let mut locals = Vec::with_capacity(self.num_params() + self.num_locals()); for arg in args { locals.push(arg); } for t in &self.local_types { locals.push(Value::from(t)); } for instruction in &self.instructions { match instruction.execute(&mut stack, memory, &mut locals, functions)? { ControlInfo::Return => { return Self::do_return(stack); } ControlInfo::Trap(Trap::MemoryOutOfBounds) => panic!(), //TODO: don't panic, handle traps gracefully ControlInfo::Trap(Trap::UndefinedDivision) => panic!(), _ => (), }; } Self::do_return(stack) } } #[derive(Default)] pub struct Memory { bytes: Vec<u8>, virtual_size_pages: u32, upper_limit_pages: u32, } const PAGE_SIZE: u64 = 0x10000; impl Memory { pub fn new(min: u32, max: u32) -> Self { let mut s = Self { bytes: Vec::with_capacity((PAGE_SIZE * min as u64) as usize), virtual_size_pages: min, upper_limit_pages: max, }; s.write(PAGE_SIZE * min as u64, 32, 4); // It looks like s } pub fn write(&mut self, mut value: u64, bitwidth: u8, address: u64) -> Option<()> { log::debug!( "Write to address 0x{:x} with bitwidth {} and value 0x{:x}", address, bitwidth, value ); if bitwidth % 8 != 0 { // Probably don't even need to implement this panic!(); } let bytes_to_write = bitwidth / 8; let last_write_address = address + bytes_to_write as u64; // Check for out of bounds access if last_write_address > PAGE_SIZE * self.virtual_size_pages as u64 { return None; } // Resize internal vector if needed if self.bytes.is_empty() || last_write_address > (self.bytes.len() - 1) as u64 { self.bytes.resize((last_write_address + 1) as usize, 0); } for i in (address..(address + bytes_to_write as u64)).rev() { self.bytes[i as usize] = (value & 0xFF) as u8; value >>= 8; } Some(()) } pub fn read( &mut self, result_type: PrimitiveType, bitwidth: u8, address: u64, ) -> Option<Value> { let bytes_to_read = (bitwidth / 8) as u64; let mut result = 0_u64; for i in address..(address + bytes_to_read) { result <<= 8; result += self.bytes[i as usize] as u64; } log::debug!( "Read from address 0x{:x} with bitwidth {} and value 0x{:x}", address, bitwidth, result ); Some(Value::from_explicit_type(result_type, result)) } } #[derive(Default, Clone)] pub struct FunctionType { pub params: Vec<PrimitiveType>, pub returns: Vec<PrimitiveType>, } impl FunctionType { pub fn new(params: Vec<PrimitiveType>, returns: Vec<PrimitiveType>) -> Self { Self { params, returns } } pub fn
(&self) -> usize { self.params.len() } pub fn params_iter(&self) -> std::slice::Iter<PrimitiveType> { self.params.iter() } } pub enum Export { Function(usize), Table(usize), Memory(usize), Global(usize), } #[derive(Default)] pub struct Module { function_types: Vec<FunctionType>, functions: Vec<Function>, exports: HashMap<String, Export>, table: Table, memory: Memory, globals: Vec<Value>, } impl Module { pub fn new() -> Self { Self::default() } pub fn call(&mut self, function_name: &str, args: Vec<Value>) -> Result<Value, Error> { let function_index = match self.exports.get(function_name) { Some(Export::Function(n)) => *n, _ => return Err(Error::Misc("On module call, given name is not a function")), }; let function = match self.functions.get(function_index) { Some(n) => n, None => { return Err(Error::Misc( "Function index given by export section is not valid", )) } }; function.call(&self.functions, &mut self.memory, args) } pub fn add_function_type(&mut self, ft: FunctionType) { self.function_types.push(ft); } pub fn get_function_type(&self, i: usize) -> FunctionType { self.function_types[i].clone() } pub fn add_function(&mut self, f: Function) { self.functions.push(f); } pub fn add_memory(&mut self, m: Memory) { self.memory = m; } pub fn add_export(&mut self, name: String, export: Export) -> Result<(), Error> { if self.exports.contains_key(&name) { return Err(Error::UnexpectedData("Expected a unique export name")); } self.exports.insert(name, export); Ok(()) } pub fn get_mut_function(&mut self, i: usize) -> &mut Function { &mut self.functions[i] } }
num_params
identifier_name
wasm.rs
use std::collections::HashMap; use std::convert::TryFrom; use crate::error::Error; /// The allowable types for any real value in wasm (u8 and others are packed) #[derive(Copy, Clone, PartialEq)] pub enum PrimitiveType { I32, I64, F32, F64, } impl From<i32> for PrimitiveType { fn from(_: i32) -> PrimitiveType { PrimitiveType::I32 } } impl From<i64> for PrimitiveType { fn from(_: i64) -> PrimitiveType { PrimitiveType::I64 } } impl From<f32> for PrimitiveType { fn from(_: f32) -> PrimitiveType { PrimitiveType::F32 } } impl From<f64> for PrimitiveType { fn from(_: f64) -> PrimitiveType { PrimitiveType::F64 } } /// Storage type for all wasm values #[derive(Copy, Clone)] pub union InternalValue { i32: i32, i64: i64, f32: f32, f64: f64, } impl From<i32> for InternalValue { fn from(x: i32) -> InternalValue { InternalValue { i32: x } } } impl From<i64> for InternalValue { fn from(x: i64) -> InternalValue { InternalValue { i64: x } } } impl From<f32> for InternalValue { fn from(x: f32) -> InternalValue { InternalValue { f32: x } } } impl From<f64> for InternalValue { fn from(x: f64) -> InternalValue { InternalValue { f64: x } } } /// Representation of all wasm values #[derive(Copy, Clone)] pub struct Value { t: PrimitiveType, v: InternalValue, } impl Value { pub fn new<T: Into<InternalValue> + Into<PrimitiveType> + Copy>(x: T) -> Self { Self { t: x.into(), v: x.into(), } } pub fn from_explicit_type(t: PrimitiveType, v: u64) -> Value { Self { t, v: InternalValue { i64: v as i64 }, } } #[inline] pub fn as_i32_unchecked(&self) -> i32 { unsafe { self.v.i32 } } #[inline] pub fn as_i64_unchecked(&self) -> i64 { unsafe { self.v.i64 } } #[inline] pub fn as_f32_unchecked(&self) -> f32 { unsafe { self.v.f32 } } #[inline] pub fn as_f64_unchecked(&self) -> f64 { unsafe { self.v.f64 } } } impl From<i32> for Value { fn from(v: i32) -> Self { Self { t: PrimitiveType::from(v), v: InternalValue::from(v), } } } impl From<i64> for Value { fn from(v: i64) -> Self { Self { t: PrimitiveType::from(v), v: InternalValue::from(v), } } } impl From<f32> for Value { fn from(v: f32) -> Self { Self { t: PrimitiveType::from(v), v: InternalValue::from(v), } } } impl From<f64> for Value { fn from(v: f64) -> Self { Self { t: PrimitiveType::from(v), v: InternalValue::from(v), } } } impl TryFrom<Value> for u32 { type Error = Error; fn try_from(x: Value) -> Result<u32, Error> { match x.t { PrimitiveType::I32 => Ok(unsafe { x.v.i32 as u32 }), _ => Err(Error::Misc("Cannot extract as u32 from incorrect type")), } } } impl From<&PrimitiveType> for Value { fn from(x: &PrimitiveType) -> Value { match x { PrimitiveType::I32 => Value::new(0_i32), PrimitiveType::I64 => Value::new(0_i64), PrimitiveType::F32 => Value::new(0_f32), PrimitiveType::F64 => Value::new(0_f64), } } } impl std::fmt::Display for Value { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { unsafe { match self.t { PrimitiveType::I32 => { write!(f, "(i32:{})", self.v.i32) } PrimitiveType::I64 => { write!(f, "(i64:{})", self.v.i64) } PrimitiveType::F32 => { write!(f, "(f32:{})", self.v.f32) } PrimitiveType::F64 => { write!(f, "(f64:{})", self.v.f64) } } } } } /// Represents expected runtime errors, i.e. problems with the program, not the interpreter pub enum Trap { MemoryOutOfBounds, UndefinedDivision, } pub enum ControlInfo { Branch(u32), Return, Trap(Trap), None, } /// Representation of a wasm stack. /// All functions use a new stack when called. #[derive(Default)] pub struct Stack { values: Vec<Value>, } impl Stack { fn new() -> Self { Self::default() } fn push_value(&mut self, v: Value) { log::debug!("Pushing {}", v); self.values.push(v); } pub fn pop_value(&mut self) -> Result<Value, Error> { log::debug!("Current stack len {}", self.values.len()); if self.values.is_empty() { Err(Error::StackViolation) } else { unsafe { Ok(self.values.pop().unwrap_unchecked()) } } } /// Return the 0-indexed offset'th value from the stack (such that 0 is the most recently pushed value) pub fn fetch_value(&self, offset: usize) -> Result<&Value, Error> { let stack_size = self.values.len(); let offset_to_fetch = stack_size - 1 - offset; match self.values.get(offset_to_fetch) { Some(n) => Ok(n), None => { log::debug!("Try to read {} stack size {}", offset_to_fetch, stack_size); Err(Error::StackViolation) } } } pub fn assert_empty(&self) -> Result<(), Error> { if self.values.is_empty() { Ok(()) } else { Err(Error::StackViolation) } } } impl std::fmt::Display for Stack { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { write!(f, "Current stack:\n[")?; for v in self.values.iter() { writeln!(f, " {}", v)?; } write!(f, "]\n\n")?; Ok(()) } } pub trait Instruction { /// A wasm instruction may modify any state of the program fn execute( &self, stack: &mut Stack, memory: &mut Memory, locals: &mut Vec<Value>, functions: &Vec<Function>, ) -> Result<ControlInfo, Error>; } pub mod inst; #[derive(Default)] struct Table { functions: Vec<usize>, } pub struct Function { r#type: FunctionType, local_types: Vec<PrimitiveType>, instructions: Vec<Box<dyn Instruction>>, } impl Function { pub fn new(r#type: FunctionType) -> Self { Self { r#type, local_types: Vec::new(), instructions: Vec::new(), } } pub fn push_inst(&mut self, i: Box<dyn Instruction>) { self.instructions.push(i); } pub fn num_params(&self) -> usize { self.r#type.num_params() } pub fn num_locals(&self) -> usize { self.local_types.len() } pub fn new_locals(&mut self, count: usize, t: PrimitiveType) { self.local_types.reserve(count); for _ in 0..count { self.local_types.push(t); } } fn do_return(mut stack: Stack) -> Result<Value, Error> { let ret = stack.pop_value(); stack.assert_empty()?; ret } pub fn call( &self, functions: &Vec<Function>, memory: &mut Memory, args: Vec<Value>, ) -> Result<Value, Error> { let mut stack = Stack::new(); let mut locals = Vec::with_capacity(self.num_params() + self.num_locals()); for arg in args { locals.push(arg); } for t in &self.local_types { locals.push(Value::from(t)); } for instruction in &self.instructions { match instruction.execute(&mut stack, memory, &mut locals, functions)? { ControlInfo::Return => { return Self::do_return(stack); } ControlInfo::Trap(Trap::MemoryOutOfBounds) => panic!(), //TODO: don't panic, handle traps gracefully ControlInfo::Trap(Trap::UndefinedDivision) => panic!(), _ => (), }; } Self::do_return(stack) } } #[derive(Default)] pub struct Memory { bytes: Vec<u8>, virtual_size_pages: u32, upper_limit_pages: u32, } const PAGE_SIZE: u64 = 0x10000; impl Memory { pub fn new(min: u32, max: u32) -> Self { let mut s = Self { bytes: Vec::with_capacity((PAGE_SIZE * min as u64) as usize), virtual_size_pages: min, upper_limit_pages: max, }; s.write(PAGE_SIZE * min as u64, 32, 4); // It looks like s } pub fn write(&mut self, mut value: u64, bitwidth: u8, address: u64) -> Option<()> { log::debug!( "Write to address 0x{:x} with bitwidth {} and value 0x{:x}", address, bitwidth, value ); if bitwidth % 8 != 0 { // Probably don't even need to implement this panic!(); } let bytes_to_write = bitwidth / 8; let last_write_address = address + bytes_to_write as u64; // Check for out of bounds access if last_write_address > PAGE_SIZE * self.virtual_size_pages as u64 { return None; } // Resize internal vector if needed if self.bytes.is_empty() || last_write_address > (self.bytes.len() - 1) as u64 { self.bytes.resize((last_write_address + 1) as usize, 0); } for i in (address..(address + bytes_to_write as u64)).rev() { self.bytes[i as usize] = (value & 0xFF) as u8; value >>= 8; } Some(()) } pub fn read( &mut self, result_type: PrimitiveType, bitwidth: u8, address: u64, ) -> Option<Value> { let bytes_to_read = (bitwidth / 8) as u64; let mut result = 0_u64; for i in address..(address + bytes_to_read) { result <<= 8; result += self.bytes[i as usize] as u64; } log::debug!( "Read from address 0x{:x} with bitwidth {} and value 0x{:x}", address, bitwidth, result ); Some(Value::from_explicit_type(result_type, result)) } } #[derive(Default, Clone)] pub struct FunctionType { pub params: Vec<PrimitiveType>, pub returns: Vec<PrimitiveType>, } impl FunctionType { pub fn new(params: Vec<PrimitiveType>, returns: Vec<PrimitiveType>) -> Self { Self { params, returns } } pub fn num_params(&self) -> usize { self.params.len() } pub fn params_iter(&self) -> std::slice::Iter<PrimitiveType> { self.params.iter() } } pub enum Export { Function(usize), Table(usize), Memory(usize), Global(usize), } #[derive(Default)] pub struct Module { function_types: Vec<FunctionType>, functions: Vec<Function>, exports: HashMap<String, Export>, table: Table, memory: Memory, globals: Vec<Value>, } impl Module { pub fn new() -> Self { Self::default() } pub fn call(&mut self, function_name: &str, args: Vec<Value>) -> Result<Value, Error> { let function_index = match self.exports.get(function_name) { Some(Export::Function(n)) => *n, _ => return Err(Error::Misc("On module call, given name is not a function")), }; let function = match self.functions.get(function_index) { Some(n) => n, None => { return Err(Error::Misc( "Function index given by export section is not valid", )) } }; function.call(&self.functions, &mut self.memory, args) }
self.function_types.push(ft); } pub fn get_function_type(&self, i: usize) -> FunctionType { self.function_types[i].clone() } pub fn add_function(&mut self, f: Function) { self.functions.push(f); } pub fn add_memory(&mut self, m: Memory) { self.memory = m; } pub fn add_export(&mut self, name: String, export: Export) -> Result<(), Error> { if self.exports.contains_key(&name) { return Err(Error::UnexpectedData("Expected a unique export name")); } self.exports.insert(name, export); Ok(()) } pub fn get_mut_function(&mut self, i: usize) -> &mut Function { &mut self.functions[i] } }
pub fn add_function_type(&mut self, ft: FunctionType) {
random_line_split
wasm.rs
use std::collections::HashMap; use std::convert::TryFrom; use crate::error::Error; /// The allowable types for any real value in wasm (u8 and others are packed) #[derive(Copy, Clone, PartialEq)] pub enum PrimitiveType { I32, I64, F32, F64, } impl From<i32> for PrimitiveType { fn from(_: i32) -> PrimitiveType { PrimitiveType::I32 } } impl From<i64> for PrimitiveType { fn from(_: i64) -> PrimitiveType { PrimitiveType::I64 } } impl From<f32> for PrimitiveType { fn from(_: f32) -> PrimitiveType { PrimitiveType::F32 } } impl From<f64> for PrimitiveType { fn from(_: f64) -> PrimitiveType { PrimitiveType::F64 } } /// Storage type for all wasm values #[derive(Copy, Clone)] pub union InternalValue { i32: i32, i64: i64, f32: f32, f64: f64, } impl From<i32> for InternalValue { fn from(x: i32) -> InternalValue { InternalValue { i32: x } } } impl From<i64> for InternalValue { fn from(x: i64) -> InternalValue { InternalValue { i64: x } } } impl From<f32> for InternalValue { fn from(x: f32) -> InternalValue { InternalValue { f32: x } } } impl From<f64> for InternalValue { fn from(x: f64) -> InternalValue { InternalValue { f64: x } } } /// Representation of all wasm values #[derive(Copy, Clone)] pub struct Value { t: PrimitiveType, v: InternalValue, } impl Value { pub fn new<T: Into<InternalValue> + Into<PrimitiveType> + Copy>(x: T) -> Self { Self { t: x.into(), v: x.into(), } } pub fn from_explicit_type(t: PrimitiveType, v: u64) -> Value { Self { t, v: InternalValue { i64: v as i64 }, } } #[inline] pub fn as_i32_unchecked(&self) -> i32
#[inline] pub fn as_i64_unchecked(&self) -> i64 { unsafe { self.v.i64 } } #[inline] pub fn as_f32_unchecked(&self) -> f32 { unsafe { self.v.f32 } } #[inline] pub fn as_f64_unchecked(&self) -> f64 { unsafe { self.v.f64 } } } impl From<i32> for Value { fn from(v: i32) -> Self { Self { t: PrimitiveType::from(v), v: InternalValue::from(v), } } } impl From<i64> for Value { fn from(v: i64) -> Self { Self { t: PrimitiveType::from(v), v: InternalValue::from(v), } } } impl From<f32> for Value { fn from(v: f32) -> Self { Self { t: PrimitiveType::from(v), v: InternalValue::from(v), } } } impl From<f64> for Value { fn from(v: f64) -> Self { Self { t: PrimitiveType::from(v), v: InternalValue::from(v), } } } impl TryFrom<Value> for u32 { type Error = Error; fn try_from(x: Value) -> Result<u32, Error> { match x.t { PrimitiveType::I32 => Ok(unsafe { x.v.i32 as u32 }), _ => Err(Error::Misc("Cannot extract as u32 from incorrect type")), } } } impl From<&PrimitiveType> for Value { fn from(x: &PrimitiveType) -> Value { match x { PrimitiveType::I32 => Value::new(0_i32), PrimitiveType::I64 => Value::new(0_i64), PrimitiveType::F32 => Value::new(0_f32), PrimitiveType::F64 => Value::new(0_f64), } } } impl std::fmt::Display for Value { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { unsafe { match self.t { PrimitiveType::I32 => { write!(f, "(i32:{})", self.v.i32) } PrimitiveType::I64 => { write!(f, "(i64:{})", self.v.i64) } PrimitiveType::F32 => { write!(f, "(f32:{})", self.v.f32) } PrimitiveType::F64 => { write!(f, "(f64:{})", self.v.f64) } } } } } /// Represents expected runtime errors, i.e. problems with the program, not the interpreter pub enum Trap { MemoryOutOfBounds, UndefinedDivision, } pub enum ControlInfo { Branch(u32), Return, Trap(Trap), None, } /// Representation of a wasm stack. /// All functions use a new stack when called. #[derive(Default)] pub struct Stack { values: Vec<Value>, } impl Stack { fn new() -> Self { Self::default() } fn push_value(&mut self, v: Value) { log::debug!("Pushing {}", v); self.values.push(v); } pub fn pop_value(&mut self) -> Result<Value, Error> { log::debug!("Current stack len {}", self.values.len()); if self.values.is_empty() { Err(Error::StackViolation) } else { unsafe { Ok(self.values.pop().unwrap_unchecked()) } } } /// Return the 0-indexed offset'th value from the stack (such that 0 is the most recently pushed value) pub fn fetch_value(&self, offset: usize) -> Result<&Value, Error> { let stack_size = self.values.len(); let offset_to_fetch = stack_size - 1 - offset; match self.values.get(offset_to_fetch) { Some(n) => Ok(n), None => { log::debug!("Try to read {} stack size {}", offset_to_fetch, stack_size); Err(Error::StackViolation) } } } pub fn assert_empty(&self) -> Result<(), Error> { if self.values.is_empty() { Ok(()) } else { Err(Error::StackViolation) } } } impl std::fmt::Display for Stack { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { write!(f, "Current stack:\n[")?; for v in self.values.iter() { writeln!(f, " {}", v)?; } write!(f, "]\n\n")?; Ok(()) } } pub trait Instruction { /// A wasm instruction may modify any state of the program fn execute( &self, stack: &mut Stack, memory: &mut Memory, locals: &mut Vec<Value>, functions: &Vec<Function>, ) -> Result<ControlInfo, Error>; } pub mod inst; #[derive(Default)] struct Table { functions: Vec<usize>, } pub struct Function { r#type: FunctionType, local_types: Vec<PrimitiveType>, instructions: Vec<Box<dyn Instruction>>, } impl Function { pub fn new(r#type: FunctionType) -> Self { Self { r#type, local_types: Vec::new(), instructions: Vec::new(), } } pub fn push_inst(&mut self, i: Box<dyn Instruction>) { self.instructions.push(i); } pub fn num_params(&self) -> usize { self.r#type.num_params() } pub fn num_locals(&self) -> usize { self.local_types.len() } pub fn new_locals(&mut self, count: usize, t: PrimitiveType) { self.local_types.reserve(count); for _ in 0..count { self.local_types.push(t); } } fn do_return(mut stack: Stack) -> Result<Value, Error> { let ret = stack.pop_value(); stack.assert_empty()?; ret } pub fn call( &self, functions: &Vec<Function>, memory: &mut Memory, args: Vec<Value>, ) -> Result<Value, Error> { let mut stack = Stack::new(); let mut locals = Vec::with_capacity(self.num_params() + self.num_locals()); for arg in args { locals.push(arg); } for t in &self.local_types { locals.push(Value::from(t)); } for instruction in &self.instructions { match instruction.execute(&mut stack, memory, &mut locals, functions)? { ControlInfo::Return => { return Self::do_return(stack); } ControlInfo::Trap(Trap::MemoryOutOfBounds) => panic!(), //TODO: don't panic, handle traps gracefully ControlInfo::Trap(Trap::UndefinedDivision) => panic!(), _ => (), }; } Self::do_return(stack) } } #[derive(Default)] pub struct Memory { bytes: Vec<u8>, virtual_size_pages: u32, upper_limit_pages: u32, } const PAGE_SIZE: u64 = 0x10000; impl Memory { pub fn new(min: u32, max: u32) -> Self { let mut s = Self { bytes: Vec::with_capacity((PAGE_SIZE * min as u64) as usize), virtual_size_pages: min, upper_limit_pages: max, }; s.write(PAGE_SIZE * min as u64, 32, 4); // It looks like s } pub fn write(&mut self, mut value: u64, bitwidth: u8, address: u64) -> Option<()> { log::debug!( "Write to address 0x{:x} with bitwidth {} and value 0x{:x}", address, bitwidth, value ); if bitwidth % 8 != 0 { // Probably don't even need to implement this panic!(); } let bytes_to_write = bitwidth / 8; let last_write_address = address + bytes_to_write as u64; // Check for out of bounds access if last_write_address > PAGE_SIZE * self.virtual_size_pages as u64 { return None; } // Resize internal vector if needed if self.bytes.is_empty() || last_write_address > (self.bytes.len() - 1) as u64 { self.bytes.resize((last_write_address + 1) as usize, 0); } for i in (address..(address + bytes_to_write as u64)).rev() { self.bytes[i as usize] = (value & 0xFF) as u8; value >>= 8; } Some(()) } pub fn read( &mut self, result_type: PrimitiveType, bitwidth: u8, address: u64, ) -> Option<Value> { let bytes_to_read = (bitwidth / 8) as u64; let mut result = 0_u64; for i in address..(address + bytes_to_read) { result <<= 8; result += self.bytes[i as usize] as u64; } log::debug!( "Read from address 0x{:x} with bitwidth {} and value 0x{:x}", address, bitwidth, result ); Some(Value::from_explicit_type(result_type, result)) } } #[derive(Default, Clone)] pub struct FunctionType { pub params: Vec<PrimitiveType>, pub returns: Vec<PrimitiveType>, } impl FunctionType { pub fn new(params: Vec<PrimitiveType>, returns: Vec<PrimitiveType>) -> Self { Self { params, returns } } pub fn num_params(&self) -> usize { self.params.len() } pub fn params_iter(&self) -> std::slice::Iter<PrimitiveType> { self.params.iter() } } pub enum Export { Function(usize), Table(usize), Memory(usize), Global(usize), } #[derive(Default)] pub struct Module { function_types: Vec<FunctionType>, functions: Vec<Function>, exports: HashMap<String, Export>, table: Table, memory: Memory, globals: Vec<Value>, } impl Module { pub fn new() -> Self { Self::default() } pub fn call(&mut self, function_name: &str, args: Vec<Value>) -> Result<Value, Error> { let function_index = match self.exports.get(function_name) { Some(Export::Function(n)) => *n, _ => return Err(Error::Misc("On module call, given name is not a function")), }; let function = match self.functions.get(function_index) { Some(n) => n, None => { return Err(Error::Misc( "Function index given by export section is not valid", )) } }; function.call(&self.functions, &mut self.memory, args) } pub fn add_function_type(&mut self, ft: FunctionType) { self.function_types.push(ft); } pub fn get_function_type(&self, i: usize) -> FunctionType { self.function_types[i].clone() } pub fn add_function(&mut self, f: Function) { self.functions.push(f); } pub fn add_memory(&mut self, m: Memory) { self.memory = m; } pub fn add_export(&mut self, name: String, export: Export) -> Result<(), Error> { if self.exports.contains_key(&name) { return Err(Error::UnexpectedData("Expected a unique export name")); } self.exports.insert(name, export); Ok(()) } pub fn get_mut_function(&mut self, i: usize) -> &mut Function { &mut self.functions[i] } }
{ unsafe { self.v.i32 } }
identifier_body
main.go
package main import ( "bufio" "bytes" "crypto/sha256" "encoding/csv" "encoding/gob" "encoding/hex" "flag" "fmt" "io" "io/ioutil" "log" "math" "os" "os/exec" "os/user" "path" "regexp" "sort" "strconv" "strings" "time" "github.com/boltdb/bolt" "github.com/eiannone/keyboard" "github.com/fatih/color" "github.com/jbrukh/bayesian" "github.com/manishrjain/keys" "github.com/pkg/errors" mathex "github.com/pkg/math" yaml "gopkg.in/yaml.v2" ) func homeDir() string { currentUser, err := user.Current() if err != nil { return "" } return currentUser.HomeDir } var ( debug = flag.Bool("debug", false, "Additional debug information if set.") journal = flag.String("j", "", "Existing journal to learn from.") output = flag.String("o", "out.ldg", "Journal file to write to.") csvFile = flag.String("csv", "", "File path of CSV file containing new transactions.") account = flag.String("a", "", "Name of bank account transactions belong to.") currency = flag.String("c", "", "Set currency if any.") ignore = flag.String("ic", "", "Comma separated list of columns to ignore in CSV.") dateFormat = flag.String("d", "01/02/2006", "Express your date format in numeric form w.r.t. Jan 02, 2006, separated by slashes (/). See: https://golang.org/pkg/time/") skip = flag.Int("s", 0, "Number of header lines in CSV to skip") configDir = flag.String("conf", homeDir()+"/.into-ledger", "Config directory to store various into-ledger configs in.") shortcuts = flag.String("short", "shortcuts.yaml", "Name of shortcuts file.") inverseSign = flag.Bool("inverseSign", false, "Inverse sign of transaction amounts in CSV.") reverseCSV = flag.Bool("reverseCSV", false, "Reverse order of transactions in CSV") allowDups = flag.Bool("allowDups", false, "Don't filter out duplicate transactions") tfidf = flag.Bool("tfidf", false, "Use TF-IDF classification algorithm instead of Bayesian") rtxn = regexp.MustCompile(`(\d{4}/\d{2}/\d{2})[\W]*(\w.*)`) rto = regexp.MustCompile(`\W*([:\w]+)(.*)`) rfrom = regexp.MustCompile(`\W*([:\w]+).*`) rcur = regexp.MustCompile(`(\d+\.\d+|\d+)`) racc = regexp.MustCompile(`^account[\W]+(.*)`) ralias = regexp.MustCompile(`\balias\s(.*)`) stamp = "2006/01/02" bucketName = []byte("txns") descLength = 40 catLength = 20 short *keys.Shortcuts ) type accountFlags struct { flags map[string]string } type configs struct { Accounts map[string]map[string]string // account and the corresponding config. } type txn struct { Date time.Time Desc string To string From string Cur float64 CurName string Key []byte skipClassification bool Done bool } type byTime []txn func (b byTime) Len() int { return len(b) } func (b byTime) Less(i int, j int) bool { return b[i].Date.Before(b[j].Date) } func (b byTime) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } func checkf(err error, format string, args ...interface{}) { if err != nil { log.Printf(format, args...) log.Println() log.Fatalf("%+v", errors.WithStack(err)) } } func assertf(ok bool, format string, args ...interface{}) { if !ok { log.Printf(format, args...) log.Println() log.Fatalf("%+v", errors.Errorf("Should be true, but is false")) } } func assignForAccount(account string) { tree := strings.Split(account, ":") assertf(len(tree) > 0, "Expected at least one result. Found none for: %v", account) short.AutoAssign(tree[0], "default") prev := tree[0] for _, c := range tree[1:] { if len(c) == 0 { continue } short.AutoAssign(c, prev) prev = c } } type parser struct { db *bolt.DB data []byte txns []txn classes []bayesian.Class cl *bayesian.Classifier accounts []string } func (p *parser) parseTransactions() { out, err := exec.Command("ledger", "-f", *journal, "csv").Output() checkf(err, "Unable to convert journal to csv. Possibly an issue with your ledger installation.") r := csv.NewReader(newConverter(bytes.NewReader(out))) var t txn for { cols, err := r.Read() if err == io.EOF { break } checkf(err, "Unable to read a csv line.") t = txn{} t.Date, err = time.Parse(stamp, cols[0]) checkf(err, "Unable to parse time: %v", cols[0]) t.Desc = strings.Trim(cols[2], " \n\t") t.To = cols[3] assertf(len(t.To) > 0, "Expected TO, found empty.") if strings.HasPrefix(t.To, "Equity:") { // Don't pick up Equity. t.skipClassification = true } t.CurName = cols[4] t.Cur, err = strconv.ParseFloat(cols[5], 64) checkf(err, "Unable to parse amount.") p.txns = append(p.txns, t) assignForAccount(t.To) } } func (p *parser) parseAccounts() { s := bufio.NewScanner(bytes.NewReader(p.data)) var acc string for s.Scan() { m := racc.FindStringSubmatch(s.Text()) if len(m) < 2 { continue } acc = m[1] if len(acc) == 0 { continue } p.accounts = append(p.accounts, acc) assignForAccount(acc) } } func (p *parser) generateClasses() { p.classes = make([]bayesian.Class, 0, 10) tomap := make(map[string]bool) for _, t := range p.txns { if t.skipClassification { continue } tomap[t.To] = true } for _, a := range p.accounts { tomap[a] = true } // remove this account as it would appear in many relevant transactions delete(tomap, *account) for to := range tomap { p.classes = append(p.classes, bayesian.Class(to)) } assertf(len(p.classes) > 1, "Expected some categories. Found none.") if *tfidf { p.cl = bayesian.NewClassifierTfIdf(p.classes...) } else { p.cl = bayesian.NewClassifier(p.classes...) } assertf(p.cl != nil, "Expected a valid classifier. Found nil.") for _, t := range p.txns { if _, has := tomap[t.To]; !has { continue } p.cl.Learn(t.getTerms(), bayesian.Class(t.To)) } if *tfidf { p.cl.ConvertTermsFreqToTfIdf() } }
score float64 pos int } type byScore []pair func (b byScore) Len() int { return len(b) } func (b byScore) Less(i int, j int) bool { return b[i].score > b[j].score } func (b byScore) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } var trimWhitespace = regexp.MustCompile(`^[\s]+|[\s}]+$`) var dedupWhitespace = regexp.MustCompile(`[\s]{2,}`) func (t *txn) isFromJournal() bool { return t.Key == nil } func (t *txn) getTerms() []string { desc := strings.ToUpper(t.Desc) desc = trimWhitespace.ReplaceAllString(desc, "") desc = dedupWhitespace.ReplaceAllString(desc, " ") terms := strings.Split(desc, " ") terms = append(terms, "FullDesc: "+desc) var cur float64 if t.isFromJournal() { cur = t.Cur } else { cur = -t.Cur // we are looking for the opposite } var kind string if cur >= 0 { kind = "credit" } else { kind = "debit" } terms = append(terms, "Kind: "+kind) terms = append(terms, "AmountClassFine: "+strconv.Itoa(getAmountClassFine(cur))) terms = append(terms, "AmountClassCoarse: "+strconv.Itoa(getAmountClassCoarse(cur))) if *debug { fmt.Printf("getTerms(%s, %.2f) = %v\n", t.Desc, t.Cur, terms) } return terms } func getAmountClassFine(amount float64) int { if amount == 0 { return 0 } log := math.Round(math.Log10(math.Abs(amount)) * 4) class := int(math.Round(math.Pow(10, log/4))) return class } func getAmountClassCoarse(amount float64) int { if amount == 0 { return 0 } log := int(math.Ceil(math.Log10(math.Abs(amount)))) class := int(math.Round(math.Pow10(log))) return class } func (p *parser) topHits(t *txn) []bayesian.Class { terms := t.getTerms() scores, _, _ := p.cl.LogScores(terms) pairs := make([]pair, 0, len(scores)) var mean, stddev float64 for pos, score := range scores { pairs = append(pairs, pair{score, pos}) mean += score } mean /= float64(len(scores)) for _, score := range scores { stddev += math.Pow(score-mean, 2) } stddev /= float64(len(scores) - 1) stddev = math.Sqrt(stddev) if *debug { fmt.Printf("stddev=%f\n", stddev) } sort.Sort(byScore(pairs)) result := make([]bayesian.Class, 0, 5) last := pairs[0].score for i := 0; i < mathex.Min(10, len(pairs)); i++ { pr := pairs[i] if math.Abs(pr.score-last) > stddev { break } if *debug { fmt.Printf("i=%d s=%.3g Class=%v\n", i, pr.score, p.classes[pr.pos]) } result = append(result, p.classes[pr.pos]) last = pr.score } return result } func includeAll(dir string, data []byte) []byte { final := make([]byte, len(data)) copy(final, data) b := bytes.NewBuffer(data) s := bufio.NewScanner(b) for s.Scan() { line := s.Text() if !strings.HasPrefix(line, "include ") { continue } fname := strings.Trim(line[8:], " \n") include, err := ioutil.ReadFile(path.Join(dir, fname)) checkf(err, "Unable to read file: %v", fname) final = append(final, include...) } return final } func parseDate(col string) (time.Time, bool) { tm, err := time.Parse(*dateFormat, col) if err == nil { return tm, true } return time.Time{}, false } func parseCurrency(col string) (float64, bool) { f, err := strconv.ParseFloat(col, 64) return f, err == nil } func parseDescription(col string) (string, bool) { return strings.Map(func(r rune) rune { if r == '"' { return -1 } return r }, col), true } func (p *parser) parseTransactionsFromCSV(in []byte) []txn { ignored := make(map[int]bool) if len(*ignore) > 0 { for _, i := range strings.Split(*ignore, ",") { pos, err := strconv.Atoi(i) checkf(err, "Unable to convert to integer: %v", i) ignored[pos] = true } } result := make([]txn, 0, 100) r := csv.NewReader(bytes.NewReader(in)) var t txn var skipped int for { t = txn{} cols, err := r.Read() if err == io.EOF { break } checkf(err, "Unable to read line: %v", strings.Join(cols, ", ")) if *skip > skipped { skipped++ continue } var picked []string for i, col := range cols { if ignored[i] { continue } picked = append(picked, col) if date, ok := parseDate(col); ok { t.Date = date } else if f, ok := parseCurrency(col); ok { if *inverseSign { f = -f } t.Cur = f } else if d, ok := parseDescription(col); ok { t.Desc = d } } if len(t.Desc) != 0 && !t.Date.IsZero() && t.Cur != 0.0 { y, m, d := t.Date.Year(), t.Date.Month(), t.Date.Day() t.Date = time.Date(y, m, d, 0, 0, 0, 0, time.UTC) // Have a unique key for each transaction in CSV, so we can uniquely identify and // persist them as we modify their category. hash := sha256.New() fmt.Fprintf(hash, "%s\t%s\t%.2f", t.Date.Format(stamp), t.Desc, t.Cur) t.Key = hash.Sum(nil) // check if it was reconciled before (in case we are restarted after a crash) p.db.View(func(tx *bolt.Tx) error { b := tx.Bucket(bucketName) v := b.Get(t.Key) if v != nil { dec := gob.NewDecoder(bytes.NewBuffer(v)) var td txn if err := dec.Decode(&td); err == nil { if t.Cur < 0 { t.To = td.To } else { t.From = td.From } t.Done = true } } return nil }) result = append(result, t) } else { fmt.Println() fmt.Printf("ERROR : Unable to parse transaction from the selected columns in CSV.\n") fmt.Printf("Selected CSV : %v\n", strings.Join(picked, ", ")) fmt.Printf("Parsed Date : %v\n", t.Date) fmt.Printf("Parsed Desc : %v\n", t.Desc) fmt.Printf("Parsed Currency : %v\n", t.Cur) log.Fatalln("Please ensure that the above CSV contains ALL the 3 required fields.") } } return result } func assignFor(opt string, cl bayesian.Class, keys map[rune]string) bool { for i := 0; i < len(opt); i++ { ch := rune(opt[i]) if _, has := keys[ch]; !has { keys[ch] = string(cl) return true } } return false } func setDefaultMappings(ks *keys.Shortcuts) { ks.BestEffortAssign('b', ".back", "default") ks.BestEffortAssign('q', ".quit", "default") ks.BestEffortAssign('a', ".show all", "default") ks.BestEffortAssign('s', ".skip", "default") } type kv struct { key rune val string } type byVal []kv func (b byVal) Len() int { return len(b) } func (b byVal) Less(i int, j int) bool { return b[i].val < b[j].val } func (b byVal) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } func singleCharMode() { // disable input buffering exec.Command("stty", "-F", "/dev/tty", "cbreak", "min", "1").Run() // do not display entered characters on the screen exec.Command("stty", "-F", "/dev/tty", "-echo").Run() } func saneMode() { exec.Command("stty", "-F", "/dev/tty", "sane").Run() } func getCategory(t txn) (prefix, cat string) { if t.Cur > 0 { prefix = "[FROM]" cat = t.From } else { prefix = "[TO]" cat = t.To } return } func printCategory(t txn) { prefix, cat := getCategory(t) if len(cat) == 0 { return } if len(cat) > catLength { cat = cat[len(cat)-catLength:] } color.New(color.BgGreen, color.FgBlack).Printf(" %6s %-20s ", prefix, cat) } func printSummary(t txn, idx, total int) { idx++ if t.Done { color.New(color.BgGreen, color.FgBlack).Printf(" R ") } else { color.New(color.BgRed, color.FgWhite).Printf(" N ") } if total > 999 { color.New(color.BgBlue, color.FgWhite).Printf(" [%4d of %4d] ", idx, total) } else if total > 99 { color.New(color.BgBlue, color.FgWhite).Printf(" [%3d of %3d] ", idx, total) } else if total > 0 { color.New(color.BgBlue, color.FgWhite).Printf(" [%2d of %2d] ", idx, total) } else if total == 0 { // A bit of a hack, but will do. color.New(color.BgBlue, color.FgWhite).Printf(" [DUPLICATE] ") } else { log.Fatalf("Unhandled case for total: %v", total) } color.New(color.BgYellow, color.FgBlack).Printf(" %10s ", t.Date.Format(stamp)) desc := t.Desc if len(desc) > descLength { desc = desc[:descLength] } color.New(color.BgWhite, color.FgBlack).Printf(" %-40s", desc) // descLength used in Printf. printCategory(t) color.New(color.BgRed, color.FgWhite).Printf(" %9.2f %3s ", t.Cur, t.CurName) if *debug { fmt.Printf(" hash: %s", hex.EncodeToString(t.Key)) } fmt.Println() } func clear() { cmd := exec.Command("clear") cmd.Stdout = os.Stdout cmd.Run() fmt.Println() } func (p *parser) writeToDB(t txn) { if err := p.db.Update(func(tx *bolt.Tx) error { b := tx.Bucket(bucketName) var val bytes.Buffer enc := gob.NewEncoder(&val) checkf(enc.Encode(t), "Unable to encode txn: %v", t) return b.Put(t.Key, val.Bytes()) }); err != nil { log.Fatalf("Write to db failed with error: %v", err) } } func (p *parser) printAndGetResult(ks keys.Shortcuts, t *txn) int { label := "default" var repeat bool var category []string LOOP: if len(category) > 0 { fmt.Println() color.New(color.BgWhite, color.FgBlack).Printf("Selected [%s]", strings.Join(category, ":")) // descLength used in Printf. fmt.Println() } ks.Print(label, false) ch, key, _ := keyboard.GetSingleKey() if ch == 0 && key == keyboard.KeyEnter && len(t.To) > 0 && len(t.From) > 0 { t.Done = true p.writeToDB(*t) if repeat { return 0 } return 1 } if opt, has := ks.MapsTo(ch, label); has { switch opt { case ".back": return -1 case ".skip": t.Done = false p.db.Update(func(tx *bolt.Tx) error { b := tx.Bucket(bucketName) b.Delete(t.Key) return nil }) return 1 case ".quit": return 9999 case ".show all": return math.MaxInt16 } category = append(category, opt) if t.Cur > 0 { t.From = strings.Join(category, ":") } else { t.To = strings.Join(category, ":") } label = opt if ks.HasLabel(label) { repeat = true goto LOOP } } return 0 } func (p *parser) printTxn(t *txn, idx, total int) int { clear() printSummary(*t, idx, total) fmt.Println() if len(t.Desc) > descLength { color.New(color.BgWhite, color.FgBlack).Printf("%6s %s ", "[DESC]", t.Desc) // descLength used in Printf. fmt.Println() } { prefix, cat := getCategory(*t) if len(cat) > catLength { color.New(color.BgGreen, color.FgBlack).Printf("%6s %s", prefix, cat) fmt.Println() } } fmt.Println() hits := p.topHits(t) var ks keys.Shortcuts setDefaultMappings(&ks) for _, hit := range hits { ks.AutoAssign(string(hit), "default") } res := p.printAndGetResult(ks, t) if res != math.MaxInt16 { return res } clear() printSummary(*t, idx, total) res = p.printAndGetResult(*short, t) return res } func (p *parser) showAndCategorizeTxns(rtxns []txn) { txns := rtxns for { for i := 0; i < len(txns); i++ { // for i := range txns { t := &txns[i] if !t.Done { hits := p.topHits(t) if t.Cur < 0 { t.To = string(hits[0]) } else { t.From = string(hits[0]) } } printSummary(*t, i, len(txns)) } fmt.Println() fmt.Printf("Found %d transactions. Review (Y/a/n/q)? ", len(txns)) ch, _, _ := keyboard.GetSingleKey() if ch == 'q' { return } if ch == 'n' || ch == 'a' { fmt.Printf("\n\nMarking all transactions as accepted\n\n") for i := 0; i < len(txns); i++ { txns[i].Done = true p.writeToDB(txns[i]) } if ch == 'n' { return } continue } for i := 0; i < len(txns) && i >= 0; { t := &txns[i] i += p.printTxn(t, i, len(txns)) } } } func ledgerFormat(t txn) string { var b bytes.Buffer b.WriteString(fmt.Sprintf("%s %s\n", t.Date.Format(stamp), t.Desc)) b.WriteString(fmt.Sprintf("\t%-20s \t", t.To)) if len([]rune(t.CurName)) <= 1 { b.WriteString(fmt.Sprintf("%s%.2f\n", t.CurName, math.Abs(t.Cur))) } else { b.WriteString(fmt.Sprintf("%.2f %s\n", math.Abs(t.Cur), t.CurName)) } b.WriteString(fmt.Sprintf("\t%s\n\n", t.From)) return b.String() } func sanitize(a string) string { return strings.Map(func(r rune) rune { if r >= 'a' && r <= 'z' { return r } if r >= 'A' && r <= 'Z' { return r } if r >= '0' && r <= '9' { return r } switch r { case '*': fallthrough case ':': fallthrough case '/': fallthrough case '.': fallthrough case '-': return r default: return -1 } }, a) } func (p *parser) removeDuplicates(txns []txn) []txn { if len(txns) == 0 { return txns } sort.Stable(byTime(txns)) if *allowDups { return txns } sort.Sort(byTime(p.txns)) prev := p.txns first := txns[0].Date.Add(-24 * time.Hour) for i, t := range p.txns { if t.Date.After(first) { prev = p.txns[i:] break } } final := txns[:0] for _, t := range txns { var found bool tdesc := sanitize(t.Desc) for _, pr := range prev { if pr.Date.After(t.Date) { break } pdesc := sanitize(pr.Desc) if tdesc == pdesc && pr.Date.Equal(t.Date) && math.Abs(pr.Cur) == math.Abs(t.Cur) { printSummary(t, 0, 0) found = true break } } if !found { final = append(final, t) } } fmt.Printf("\t%d duplicates found and ignored.\n\n", len(txns)-len(final)) return final } var errc = color.New(color.BgRed, color.FgWhite).PrintfFunc() func oerr(msg string) { errc("\tERROR: " + msg + " ") fmt.Println() fmt.Println("Flags available:") flag.PrintDefaults() fmt.Println() } func reverseSlice(s []txn) { for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { s[i], s[j] = s[j], s[i] } } func main() { flag.Parse() defer saneMode() singleCharMode() checkf(os.MkdirAll(*configDir, 0755), "Unable to create directory: %v", *configDir) if len(*account) == 0 { oerr("Please specify the account transactions are coming from") return } configPath := path.Join(*configDir, "config.yaml") data, err := ioutil.ReadFile(configPath) if err == nil { var c configs checkf(yaml.Unmarshal(data, &c), "Unable to unmarshal yaml config at %v", configPath) if ac, has := c.Accounts[*account]; has { fmt.Printf("Using flags from config: %+v\n", ac) for k, v := range ac { flag.Set(k, v) } } } keyfile := path.Join(*configDir, *shortcuts) short = keys.ParseConfig(keyfile) setDefaultMappings(short) defer short.Persist(keyfile) if len(*journal) == 0 { oerr("Please specify the input ledger journal file") return } data, err = ioutil.ReadFile(*journal) checkf(err, "Unable to read file: %v", *journal) alldata := includeAll(path.Dir(*journal), data) if len(*output) == 0 { oerr("Please specify the output file") return } if _, err := os.Stat(*output); os.IsNotExist(err) { _, err := os.Create(*output) checkf(err, "Unable to check for output file: %v", *output) } tf := path.Join(os.TempDir(), "ledger-csv-txns") defer os.Remove(tf) db, err := bolt.Open(tf, 0600, nil) checkf(err, "Unable to open boltdb at %v", tf) defer db.Close() db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucketIfNotExists(bucketName) checkf(err, "Unable to create default bucket in boltdb.") return nil }) of, err := os.OpenFile(*output, os.O_APPEND|os.O_WRONLY, 0600) checkf(err, "Unable to open output file: %v", *output) p := parser{data: alldata, db: db} p.parseAccounts() p.parseTransactions() // Scanning done. Now train classifier. p.generateClasses() in, err := ioutil.ReadFile(*csvFile) checkf(err, "Unable to read csv file: %v", *csvFile) txns := p.parseTransactionsFromCSV(in) if *reverseCSV { reverseSlice(txns) } for i := range txns { if txns[i].Cur > 0 { txns[i].To = *account } else { txns[i].From = *account } txns[i].CurName = *currency } txns = p.removeDuplicates(txns) p.showAndCategorizeTxns(txns) _, err = of.WriteString(fmt.Sprintf("; into-ledger run at %v\n\n", time.Now())) checkf(err, "Unable to write into output file: %v", of.Name()) for _, t := range txns { if t.Done { if _, err := of.WriteString(ledgerFormat(t)); err != nil { log.Fatalf("Unable to write to output: %v", err) } } } checkf(of.Close(), "Unable to close output file: %v", of.Name()) }
type pair struct {
random_line_split
main.go
package main import ( "bufio" "bytes" "crypto/sha256" "encoding/csv" "encoding/gob" "encoding/hex" "flag" "fmt" "io" "io/ioutil" "log" "math" "os" "os/exec" "os/user" "path" "regexp" "sort" "strconv" "strings" "time" "github.com/boltdb/bolt" "github.com/eiannone/keyboard" "github.com/fatih/color" "github.com/jbrukh/bayesian" "github.com/manishrjain/keys" "github.com/pkg/errors" mathex "github.com/pkg/math" yaml "gopkg.in/yaml.v2" ) func homeDir() string { currentUser, err := user.Current() if err != nil { return "" } return currentUser.HomeDir } var ( debug = flag.Bool("debug", false, "Additional debug information if set.") journal = flag.String("j", "", "Existing journal to learn from.") output = flag.String("o", "out.ldg", "Journal file to write to.") csvFile = flag.String("csv", "", "File path of CSV file containing new transactions.") account = flag.String("a", "", "Name of bank account transactions belong to.") currency = flag.String("c", "", "Set currency if any.") ignore = flag.String("ic", "", "Comma separated list of columns to ignore in CSV.") dateFormat = flag.String("d", "01/02/2006", "Express your date format in numeric form w.r.t. Jan 02, 2006, separated by slashes (/). See: https://golang.org/pkg/time/") skip = flag.Int("s", 0, "Number of header lines in CSV to skip") configDir = flag.String("conf", homeDir()+"/.into-ledger", "Config directory to store various into-ledger configs in.") shortcuts = flag.String("short", "shortcuts.yaml", "Name of shortcuts file.") inverseSign = flag.Bool("inverseSign", false, "Inverse sign of transaction amounts in CSV.") reverseCSV = flag.Bool("reverseCSV", false, "Reverse order of transactions in CSV") allowDups = flag.Bool("allowDups", false, "Don't filter out duplicate transactions") tfidf = flag.Bool("tfidf", false, "Use TF-IDF classification algorithm instead of Bayesian") rtxn = regexp.MustCompile(`(\d{4}/\d{2}/\d{2})[\W]*(\w.*)`) rto = regexp.MustCompile(`\W*([:\w]+)(.*)`) rfrom = regexp.MustCompile(`\W*([:\w]+).*`) rcur = regexp.MustCompile(`(\d+\.\d+|\d+)`) racc = regexp.MustCompile(`^account[\W]+(.*)`) ralias = regexp.MustCompile(`\balias\s(.*)`) stamp = "2006/01/02" bucketName = []byte("txns") descLength = 40 catLength = 20 short *keys.Shortcuts ) type accountFlags struct { flags map[string]string } type configs struct { Accounts map[string]map[string]string // account and the corresponding config. } type txn struct { Date time.Time Desc string To string From string Cur float64 CurName string Key []byte skipClassification bool Done bool } type byTime []txn func (b byTime) Len() int { return len(b) } func (b byTime) Less(i int, j int) bool { return b[i].Date.Before(b[j].Date) } func (b byTime) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } func checkf(err error, format string, args ...interface{}) { if err != nil { log.Printf(format, args...) log.Println() log.Fatalf("%+v", errors.WithStack(err)) } } func assertf(ok bool, format string, args ...interface{}) { if !ok { log.Printf(format, args...) log.Println() log.Fatalf("%+v", errors.Errorf("Should be true, but is false")) } } func assignForAccount(account string) { tree := strings.Split(account, ":") assertf(len(tree) > 0, "Expected at least one result. Found none for: %v", account) short.AutoAssign(tree[0], "default") prev := tree[0] for _, c := range tree[1:] { if len(c) == 0 { continue } short.AutoAssign(c, prev) prev = c } } type parser struct { db *bolt.DB data []byte txns []txn classes []bayesian.Class cl *bayesian.Classifier accounts []string } func (p *parser) parseTransactions() { out, err := exec.Command("ledger", "-f", *journal, "csv").Output() checkf(err, "Unable to convert journal to csv. Possibly an issue with your ledger installation.") r := csv.NewReader(newConverter(bytes.NewReader(out))) var t txn for { cols, err := r.Read() if err == io.EOF { break } checkf(err, "Unable to read a csv line.") t = txn{} t.Date, err = time.Parse(stamp, cols[0]) checkf(err, "Unable to parse time: %v", cols[0]) t.Desc = strings.Trim(cols[2], " \n\t") t.To = cols[3] assertf(len(t.To) > 0, "Expected TO, found empty.") if strings.HasPrefix(t.To, "Equity:") { // Don't pick up Equity. t.skipClassification = true } t.CurName = cols[4] t.Cur, err = strconv.ParseFloat(cols[5], 64) checkf(err, "Unable to parse amount.") p.txns = append(p.txns, t) assignForAccount(t.To) } } func (p *parser) parseAccounts() { s := bufio.NewScanner(bytes.NewReader(p.data)) var acc string for s.Scan() { m := racc.FindStringSubmatch(s.Text()) if len(m) < 2 { continue } acc = m[1] if len(acc) == 0 { continue } p.accounts = append(p.accounts, acc) assignForAccount(acc) } } func (p *parser) generateClasses() { p.classes = make([]bayesian.Class, 0, 10) tomap := make(map[string]bool) for _, t := range p.txns { if t.skipClassification { continue } tomap[t.To] = true } for _, a := range p.accounts { tomap[a] = true } // remove this account as it would appear in many relevant transactions delete(tomap, *account) for to := range tomap { p.classes = append(p.classes, bayesian.Class(to)) } assertf(len(p.classes) > 1, "Expected some categories. Found none.") if *tfidf { p.cl = bayesian.NewClassifierTfIdf(p.classes...) } else { p.cl = bayesian.NewClassifier(p.classes...) } assertf(p.cl != nil, "Expected a valid classifier. Found nil.") for _, t := range p.txns { if _, has := tomap[t.To]; !has { continue } p.cl.Learn(t.getTerms(), bayesian.Class(t.To)) } if *tfidf { p.cl.ConvertTermsFreqToTfIdf() } } type pair struct { score float64 pos int } type byScore []pair func (b byScore) Len() int { return len(b) } func (b byScore) Less(i int, j int) bool { return b[i].score > b[j].score } func (b byScore) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } var trimWhitespace = regexp.MustCompile(`^[\s]+|[\s}]+$`) var dedupWhitespace = regexp.MustCompile(`[\s]{2,}`) func (t *txn) isFromJournal() bool { return t.Key == nil } func (t *txn) getTerms() []string { desc := strings.ToUpper(t.Desc) desc = trimWhitespace.ReplaceAllString(desc, "") desc = dedupWhitespace.ReplaceAllString(desc, " ") terms := strings.Split(desc, " ") terms = append(terms, "FullDesc: "+desc) var cur float64 if t.isFromJournal() { cur = t.Cur } else { cur = -t.Cur // we are looking for the opposite } var kind string if cur >= 0 { kind = "credit" } else { kind = "debit" } terms = append(terms, "Kind: "+kind) terms = append(terms, "AmountClassFine: "+strconv.Itoa(getAmountClassFine(cur))) terms = append(terms, "AmountClassCoarse: "+strconv.Itoa(getAmountClassCoarse(cur))) if *debug { fmt.Printf("getTerms(%s, %.2f) = %v\n", t.Desc, t.Cur, terms) } return terms } func getAmountClassFine(amount float64) int { if amount == 0 { return 0 } log := math.Round(math.Log10(math.Abs(amount)) * 4) class := int(math.Round(math.Pow(10, log/4))) return class } func getAmountClassCoarse(amount float64) int { if amount == 0 { return 0 } log := int(math.Ceil(math.Log10(math.Abs(amount)))) class := int(math.Round(math.Pow10(log))) return class } func (p *parser) topHits(t *txn) []bayesian.Class { terms := t.getTerms() scores, _, _ := p.cl.LogScores(terms) pairs := make([]pair, 0, len(scores)) var mean, stddev float64 for pos, score := range scores { pairs = append(pairs, pair{score, pos}) mean += score } mean /= float64(len(scores)) for _, score := range scores { stddev += math.Pow(score-mean, 2) } stddev /= float64(len(scores) - 1) stddev = math.Sqrt(stddev) if *debug { fmt.Printf("stddev=%f\n", stddev) } sort.Sort(byScore(pairs)) result := make([]bayesian.Class, 0, 5) last := pairs[0].score for i := 0; i < mathex.Min(10, len(pairs)); i++ { pr := pairs[i] if math.Abs(pr.score-last) > stddev { break } if *debug { fmt.Printf("i=%d s=%.3g Class=%v\n", i, pr.score, p.classes[pr.pos]) } result = append(result, p.classes[pr.pos]) last = pr.score } return result } func includeAll(dir string, data []byte) []byte { final := make([]byte, len(data)) copy(final, data) b := bytes.NewBuffer(data) s := bufio.NewScanner(b) for s.Scan() { line := s.Text() if !strings.HasPrefix(line, "include ") { continue } fname := strings.Trim(line[8:], " \n") include, err := ioutil.ReadFile(path.Join(dir, fname)) checkf(err, "Unable to read file: %v", fname) final = append(final, include...) } return final } func parseDate(col string) (time.Time, bool) { tm, err := time.Parse(*dateFormat, col) if err == nil { return tm, true } return time.Time{}, false } func parseCurrency(col string) (float64, bool) { f, err := strconv.ParseFloat(col, 64) return f, err == nil } func parseDescription(col string) (string, bool) { return strings.Map(func(r rune) rune { if r == '"' { return -1 } return r }, col), true } func (p *parser) parseTransactionsFromCSV(in []byte) []txn { ignored := make(map[int]bool) if len(*ignore) > 0 { for _, i := range strings.Split(*ignore, ",") { pos, err := strconv.Atoi(i) checkf(err, "Unable to convert to integer: %v", i) ignored[pos] = true } } result := make([]txn, 0, 100) r := csv.NewReader(bytes.NewReader(in)) var t txn var skipped int for { t = txn{} cols, err := r.Read() if err == io.EOF { break } checkf(err, "Unable to read line: %v", strings.Join(cols, ", ")) if *skip > skipped { skipped++ continue } var picked []string for i, col := range cols { if ignored[i] { continue } picked = append(picked, col) if date, ok := parseDate(col); ok { t.Date = date } else if f, ok := parseCurrency(col); ok { if *inverseSign { f = -f } t.Cur = f } else if d, ok := parseDescription(col); ok { t.Desc = d } } if len(t.Desc) != 0 && !t.Date.IsZero() && t.Cur != 0.0 { y, m, d := t.Date.Year(), t.Date.Month(), t.Date.Day() t.Date = time.Date(y, m, d, 0, 0, 0, 0, time.UTC) // Have a unique key for each transaction in CSV, so we can uniquely identify and // persist them as we modify their category. hash := sha256.New() fmt.Fprintf(hash, "%s\t%s\t%.2f", t.Date.Format(stamp), t.Desc, t.Cur) t.Key = hash.Sum(nil) // check if it was reconciled before (in case we are restarted after a crash) p.db.View(func(tx *bolt.Tx) error { b := tx.Bucket(bucketName) v := b.Get(t.Key) if v != nil { dec := gob.NewDecoder(bytes.NewBuffer(v)) var td txn if err := dec.Decode(&td); err == nil { if t.Cur < 0 { t.To = td.To } else { t.From = td.From } t.Done = true } } return nil }) result = append(result, t) } else { fmt.Println() fmt.Printf("ERROR : Unable to parse transaction from the selected columns in CSV.\n") fmt.Printf("Selected CSV : %v\n", strings.Join(picked, ", ")) fmt.Printf("Parsed Date : %v\n", t.Date) fmt.Printf("Parsed Desc : %v\n", t.Desc) fmt.Printf("Parsed Currency : %v\n", t.Cur) log.Fatalln("Please ensure that the above CSV contains ALL the 3 required fields.") } } return result } func assignFor(opt string, cl bayesian.Class, keys map[rune]string) bool { for i := 0; i < len(opt); i++ { ch := rune(opt[i]) if _, has := keys[ch]; !has { keys[ch] = string(cl) return true } } return false } func setDefaultMappings(ks *keys.Shortcuts) { ks.BestEffortAssign('b', ".back", "default") ks.BestEffortAssign('q', ".quit", "default") ks.BestEffortAssign('a', ".show all", "default") ks.BestEffortAssign('s', ".skip", "default") } type kv struct { key rune val string } type byVal []kv func (b byVal) Len() int { return len(b) } func (b byVal) Less(i int, j int) bool { return b[i].val < b[j].val } func (b byVal) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } func singleCharMode() { // disable input buffering exec.Command("stty", "-F", "/dev/tty", "cbreak", "min", "1").Run() // do not display entered characters on the screen exec.Command("stty", "-F", "/dev/tty", "-echo").Run() } func saneMode() { exec.Command("stty", "-F", "/dev/tty", "sane").Run() } func
(t txn) (prefix, cat string) { if t.Cur > 0 { prefix = "[FROM]" cat = t.From } else { prefix = "[TO]" cat = t.To } return } func printCategory(t txn) { prefix, cat := getCategory(t) if len(cat) == 0 { return } if len(cat) > catLength { cat = cat[len(cat)-catLength:] } color.New(color.BgGreen, color.FgBlack).Printf(" %6s %-20s ", prefix, cat) } func printSummary(t txn, idx, total int) { idx++ if t.Done { color.New(color.BgGreen, color.FgBlack).Printf(" R ") } else { color.New(color.BgRed, color.FgWhite).Printf(" N ") } if total > 999 { color.New(color.BgBlue, color.FgWhite).Printf(" [%4d of %4d] ", idx, total) } else if total > 99 { color.New(color.BgBlue, color.FgWhite).Printf(" [%3d of %3d] ", idx, total) } else if total > 0 { color.New(color.BgBlue, color.FgWhite).Printf(" [%2d of %2d] ", idx, total) } else if total == 0 { // A bit of a hack, but will do. color.New(color.BgBlue, color.FgWhite).Printf(" [DUPLICATE] ") } else { log.Fatalf("Unhandled case for total: %v", total) } color.New(color.BgYellow, color.FgBlack).Printf(" %10s ", t.Date.Format(stamp)) desc := t.Desc if len(desc) > descLength { desc = desc[:descLength] } color.New(color.BgWhite, color.FgBlack).Printf(" %-40s", desc) // descLength used in Printf. printCategory(t) color.New(color.BgRed, color.FgWhite).Printf(" %9.2f %3s ", t.Cur, t.CurName) if *debug { fmt.Printf(" hash: %s", hex.EncodeToString(t.Key)) } fmt.Println() } func clear() { cmd := exec.Command("clear") cmd.Stdout = os.Stdout cmd.Run() fmt.Println() } func (p *parser) writeToDB(t txn) { if err := p.db.Update(func(tx *bolt.Tx) error { b := tx.Bucket(bucketName) var val bytes.Buffer enc := gob.NewEncoder(&val) checkf(enc.Encode(t), "Unable to encode txn: %v", t) return b.Put(t.Key, val.Bytes()) }); err != nil { log.Fatalf("Write to db failed with error: %v", err) } } func (p *parser) printAndGetResult(ks keys.Shortcuts, t *txn) int { label := "default" var repeat bool var category []string LOOP: if len(category) > 0 { fmt.Println() color.New(color.BgWhite, color.FgBlack).Printf("Selected [%s]", strings.Join(category, ":")) // descLength used in Printf. fmt.Println() } ks.Print(label, false) ch, key, _ := keyboard.GetSingleKey() if ch == 0 && key == keyboard.KeyEnter && len(t.To) > 0 && len(t.From) > 0 { t.Done = true p.writeToDB(*t) if repeat { return 0 } return 1 } if opt, has := ks.MapsTo(ch, label); has { switch opt { case ".back": return -1 case ".skip": t.Done = false p.db.Update(func(tx *bolt.Tx) error { b := tx.Bucket(bucketName) b.Delete(t.Key) return nil }) return 1 case ".quit": return 9999 case ".show all": return math.MaxInt16 } category = append(category, opt) if t.Cur > 0 { t.From = strings.Join(category, ":") } else { t.To = strings.Join(category, ":") } label = opt if ks.HasLabel(label) { repeat = true goto LOOP } } return 0 } func (p *parser) printTxn(t *txn, idx, total int) int { clear() printSummary(*t, idx, total) fmt.Println() if len(t.Desc) > descLength { color.New(color.BgWhite, color.FgBlack).Printf("%6s %s ", "[DESC]", t.Desc) // descLength used in Printf. fmt.Println() } { prefix, cat := getCategory(*t) if len(cat) > catLength { color.New(color.BgGreen, color.FgBlack).Printf("%6s %s", prefix, cat) fmt.Println() } } fmt.Println() hits := p.topHits(t) var ks keys.Shortcuts setDefaultMappings(&ks) for _, hit := range hits { ks.AutoAssign(string(hit), "default") } res := p.printAndGetResult(ks, t) if res != math.MaxInt16 { return res } clear() printSummary(*t, idx, total) res = p.printAndGetResult(*short, t) return res } func (p *parser) showAndCategorizeTxns(rtxns []txn) { txns := rtxns for { for i := 0; i < len(txns); i++ { // for i := range txns { t := &txns[i] if !t.Done { hits := p.topHits(t) if t.Cur < 0 { t.To = string(hits[0]) } else { t.From = string(hits[0]) } } printSummary(*t, i, len(txns)) } fmt.Println() fmt.Printf("Found %d transactions. Review (Y/a/n/q)? ", len(txns)) ch, _, _ := keyboard.GetSingleKey() if ch == 'q' { return } if ch == 'n' || ch == 'a' { fmt.Printf("\n\nMarking all transactions as accepted\n\n") for i := 0; i < len(txns); i++ { txns[i].Done = true p.writeToDB(txns[i]) } if ch == 'n' { return } continue } for i := 0; i < len(txns) && i >= 0; { t := &txns[i] i += p.printTxn(t, i, len(txns)) } } } func ledgerFormat(t txn) string { var b bytes.Buffer b.WriteString(fmt.Sprintf("%s %s\n", t.Date.Format(stamp), t.Desc)) b.WriteString(fmt.Sprintf("\t%-20s \t", t.To)) if len([]rune(t.CurName)) <= 1 { b.WriteString(fmt.Sprintf("%s%.2f\n", t.CurName, math.Abs(t.Cur))) } else { b.WriteString(fmt.Sprintf("%.2f %s\n", math.Abs(t.Cur), t.CurName)) } b.WriteString(fmt.Sprintf("\t%s\n\n", t.From)) return b.String() } func sanitize(a string) string { return strings.Map(func(r rune) rune { if r >= 'a' && r <= 'z' { return r } if r >= 'A' && r <= 'Z' { return r } if r >= '0' && r <= '9' { return r } switch r { case '*': fallthrough case ':': fallthrough case '/': fallthrough case '.': fallthrough case '-': return r default: return -1 } }, a) } func (p *parser) removeDuplicates(txns []txn) []txn { if len(txns) == 0 { return txns } sort.Stable(byTime(txns)) if *allowDups { return txns } sort.Sort(byTime(p.txns)) prev := p.txns first := txns[0].Date.Add(-24 * time.Hour) for i, t := range p.txns { if t.Date.After(first) { prev = p.txns[i:] break } } final := txns[:0] for _, t := range txns { var found bool tdesc := sanitize(t.Desc) for _, pr := range prev { if pr.Date.After(t.Date) { break } pdesc := sanitize(pr.Desc) if tdesc == pdesc && pr.Date.Equal(t.Date) && math.Abs(pr.Cur) == math.Abs(t.Cur) { printSummary(t, 0, 0) found = true break } } if !found { final = append(final, t) } } fmt.Printf("\t%d duplicates found and ignored.\n\n", len(txns)-len(final)) return final } var errc = color.New(color.BgRed, color.FgWhite).PrintfFunc() func oerr(msg string) { errc("\tERROR: " + msg + " ") fmt.Println() fmt.Println("Flags available:") flag.PrintDefaults() fmt.Println() } func reverseSlice(s []txn) { for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { s[i], s[j] = s[j], s[i] } } func main() { flag.Parse() defer saneMode() singleCharMode() checkf(os.MkdirAll(*configDir, 0755), "Unable to create directory: %v", *configDir) if len(*account) == 0 { oerr("Please specify the account transactions are coming from") return } configPath := path.Join(*configDir, "config.yaml") data, err := ioutil.ReadFile(configPath) if err == nil { var c configs checkf(yaml.Unmarshal(data, &c), "Unable to unmarshal yaml config at %v", configPath) if ac, has := c.Accounts[*account]; has { fmt.Printf("Using flags from config: %+v\n", ac) for k, v := range ac { flag.Set(k, v) } } } keyfile := path.Join(*configDir, *shortcuts) short = keys.ParseConfig(keyfile) setDefaultMappings(short) defer short.Persist(keyfile) if len(*journal) == 0 { oerr("Please specify the input ledger journal file") return } data, err = ioutil.ReadFile(*journal) checkf(err, "Unable to read file: %v", *journal) alldata := includeAll(path.Dir(*journal), data) if len(*output) == 0 { oerr("Please specify the output file") return } if _, err := os.Stat(*output); os.IsNotExist(err) { _, err := os.Create(*output) checkf(err, "Unable to check for output file: %v", *output) } tf := path.Join(os.TempDir(), "ledger-csv-txns") defer os.Remove(tf) db, err := bolt.Open(tf, 0600, nil) checkf(err, "Unable to open boltdb at %v", tf) defer db.Close() db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucketIfNotExists(bucketName) checkf(err, "Unable to create default bucket in boltdb.") return nil }) of, err := os.OpenFile(*output, os.O_APPEND|os.O_WRONLY, 0600) checkf(err, "Unable to open output file: %v", *output) p := parser{data: alldata, db: db} p.parseAccounts() p.parseTransactions() // Scanning done. Now train classifier. p.generateClasses() in, err := ioutil.ReadFile(*csvFile) checkf(err, "Unable to read csv file: %v", *csvFile) txns := p.parseTransactionsFromCSV(in) if *reverseCSV { reverseSlice(txns) } for i := range txns { if txns[i].Cur > 0 { txns[i].To = *account } else { txns[i].From = *account } txns[i].CurName = *currency } txns = p.removeDuplicates(txns) p.showAndCategorizeTxns(txns) _, err = of.WriteString(fmt.Sprintf("; into-ledger run at %v\n\n", time.Now())) checkf(err, "Unable to write into output file: %v", of.Name()) for _, t := range txns { if t.Done { if _, err := of.WriteString(ledgerFormat(t)); err != nil { log.Fatalf("Unable to write to output: %v", err) } } } checkf(of.Close(), "Unable to close output file: %v", of.Name()) }
getCategory
identifier_name
main.go
package main import ( "bufio" "bytes" "crypto/sha256" "encoding/csv" "encoding/gob" "encoding/hex" "flag" "fmt" "io" "io/ioutil" "log" "math" "os" "os/exec" "os/user" "path" "regexp" "sort" "strconv" "strings" "time" "github.com/boltdb/bolt" "github.com/eiannone/keyboard" "github.com/fatih/color" "github.com/jbrukh/bayesian" "github.com/manishrjain/keys" "github.com/pkg/errors" mathex "github.com/pkg/math" yaml "gopkg.in/yaml.v2" ) func homeDir() string { currentUser, err := user.Current() if err != nil
return currentUser.HomeDir } var ( debug = flag.Bool("debug", false, "Additional debug information if set.") journal = flag.String("j", "", "Existing journal to learn from.") output = flag.String("o", "out.ldg", "Journal file to write to.") csvFile = flag.String("csv", "", "File path of CSV file containing new transactions.") account = flag.String("a", "", "Name of bank account transactions belong to.") currency = flag.String("c", "", "Set currency if any.") ignore = flag.String("ic", "", "Comma separated list of columns to ignore in CSV.") dateFormat = flag.String("d", "01/02/2006", "Express your date format in numeric form w.r.t. Jan 02, 2006, separated by slashes (/). See: https://golang.org/pkg/time/") skip = flag.Int("s", 0, "Number of header lines in CSV to skip") configDir = flag.String("conf", homeDir()+"/.into-ledger", "Config directory to store various into-ledger configs in.") shortcuts = flag.String("short", "shortcuts.yaml", "Name of shortcuts file.") inverseSign = flag.Bool("inverseSign", false, "Inverse sign of transaction amounts in CSV.") reverseCSV = flag.Bool("reverseCSV", false, "Reverse order of transactions in CSV") allowDups = flag.Bool("allowDups", false, "Don't filter out duplicate transactions") tfidf = flag.Bool("tfidf", false, "Use TF-IDF classification algorithm instead of Bayesian") rtxn = regexp.MustCompile(`(\d{4}/\d{2}/\d{2})[\W]*(\w.*)`) rto = regexp.MustCompile(`\W*([:\w]+)(.*)`) rfrom = regexp.MustCompile(`\W*([:\w]+).*`) rcur = regexp.MustCompile(`(\d+\.\d+|\d+)`) racc = regexp.MustCompile(`^account[\W]+(.*)`) ralias = regexp.MustCompile(`\balias\s(.*)`) stamp = "2006/01/02" bucketName = []byte("txns") descLength = 40 catLength = 20 short *keys.Shortcuts ) type accountFlags struct { flags map[string]string } type configs struct { Accounts map[string]map[string]string // account and the corresponding config. } type txn struct { Date time.Time Desc string To string From string Cur float64 CurName string Key []byte skipClassification bool Done bool } type byTime []txn func (b byTime) Len() int { return len(b) } func (b byTime) Less(i int, j int) bool { return b[i].Date.Before(b[j].Date) } func (b byTime) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } func checkf(err error, format string, args ...interface{}) { if err != nil { log.Printf(format, args...) log.Println() log.Fatalf("%+v", errors.WithStack(err)) } } func assertf(ok bool, format string, args ...interface{}) { if !ok { log.Printf(format, args...) log.Println() log.Fatalf("%+v", errors.Errorf("Should be true, but is false")) } } func assignForAccount(account string) { tree := strings.Split(account, ":") assertf(len(tree) > 0, "Expected at least one result. Found none for: %v", account) short.AutoAssign(tree[0], "default") prev := tree[0] for _, c := range tree[1:] { if len(c) == 0 { continue } short.AutoAssign(c, prev) prev = c } } type parser struct { db *bolt.DB data []byte txns []txn classes []bayesian.Class cl *bayesian.Classifier accounts []string } func (p *parser) parseTransactions() { out, err := exec.Command("ledger", "-f", *journal, "csv").Output() checkf(err, "Unable to convert journal to csv. Possibly an issue with your ledger installation.") r := csv.NewReader(newConverter(bytes.NewReader(out))) var t txn for { cols, err := r.Read() if err == io.EOF { break } checkf(err, "Unable to read a csv line.") t = txn{} t.Date, err = time.Parse(stamp, cols[0]) checkf(err, "Unable to parse time: %v", cols[0]) t.Desc = strings.Trim(cols[2], " \n\t") t.To = cols[3] assertf(len(t.To) > 0, "Expected TO, found empty.") if strings.HasPrefix(t.To, "Equity:") { // Don't pick up Equity. t.skipClassification = true } t.CurName = cols[4] t.Cur, err = strconv.ParseFloat(cols[5], 64) checkf(err, "Unable to parse amount.") p.txns = append(p.txns, t) assignForAccount(t.To) } } func (p *parser) parseAccounts() { s := bufio.NewScanner(bytes.NewReader(p.data)) var acc string for s.Scan() { m := racc.FindStringSubmatch(s.Text()) if len(m) < 2 { continue } acc = m[1] if len(acc) == 0 { continue } p.accounts = append(p.accounts, acc) assignForAccount(acc) } } func (p *parser) generateClasses() { p.classes = make([]bayesian.Class, 0, 10) tomap := make(map[string]bool) for _, t := range p.txns { if t.skipClassification { continue } tomap[t.To] = true } for _, a := range p.accounts { tomap[a] = true } // remove this account as it would appear in many relevant transactions delete(tomap, *account) for to := range tomap { p.classes = append(p.classes, bayesian.Class(to)) } assertf(len(p.classes) > 1, "Expected some categories. Found none.") if *tfidf { p.cl = bayesian.NewClassifierTfIdf(p.classes...) } else { p.cl = bayesian.NewClassifier(p.classes...) } assertf(p.cl != nil, "Expected a valid classifier. Found nil.") for _, t := range p.txns { if _, has := tomap[t.To]; !has { continue } p.cl.Learn(t.getTerms(), bayesian.Class(t.To)) } if *tfidf { p.cl.ConvertTermsFreqToTfIdf() } } type pair struct { score float64 pos int } type byScore []pair func (b byScore) Len() int { return len(b) } func (b byScore) Less(i int, j int) bool { return b[i].score > b[j].score } func (b byScore) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } var trimWhitespace = regexp.MustCompile(`^[\s]+|[\s}]+$`) var dedupWhitespace = regexp.MustCompile(`[\s]{2,}`) func (t *txn) isFromJournal() bool { return t.Key == nil } func (t *txn) getTerms() []string { desc := strings.ToUpper(t.Desc) desc = trimWhitespace.ReplaceAllString(desc, "") desc = dedupWhitespace.ReplaceAllString(desc, " ") terms := strings.Split(desc, " ") terms = append(terms, "FullDesc: "+desc) var cur float64 if t.isFromJournal() { cur = t.Cur } else { cur = -t.Cur // we are looking for the opposite } var kind string if cur >= 0 { kind = "credit" } else { kind = "debit" } terms = append(terms, "Kind: "+kind) terms = append(terms, "AmountClassFine: "+strconv.Itoa(getAmountClassFine(cur))) terms = append(terms, "AmountClassCoarse: "+strconv.Itoa(getAmountClassCoarse(cur))) if *debug { fmt.Printf("getTerms(%s, %.2f) = %v\n", t.Desc, t.Cur, terms) } return terms } func getAmountClassFine(amount float64) int { if amount == 0 { return 0 } log := math.Round(math.Log10(math.Abs(amount)) * 4) class := int(math.Round(math.Pow(10, log/4))) return class } func getAmountClassCoarse(amount float64) int { if amount == 0 { return 0 } log := int(math.Ceil(math.Log10(math.Abs(amount)))) class := int(math.Round(math.Pow10(log))) return class } func (p *parser) topHits(t *txn) []bayesian.Class { terms := t.getTerms() scores, _, _ := p.cl.LogScores(terms) pairs := make([]pair, 0, len(scores)) var mean, stddev float64 for pos, score := range scores { pairs = append(pairs, pair{score, pos}) mean += score } mean /= float64(len(scores)) for _, score := range scores { stddev += math.Pow(score-mean, 2) } stddev /= float64(len(scores) - 1) stddev = math.Sqrt(stddev) if *debug { fmt.Printf("stddev=%f\n", stddev) } sort.Sort(byScore(pairs)) result := make([]bayesian.Class, 0, 5) last := pairs[0].score for i := 0; i < mathex.Min(10, len(pairs)); i++ { pr := pairs[i] if math.Abs(pr.score-last) > stddev { break } if *debug { fmt.Printf("i=%d s=%.3g Class=%v\n", i, pr.score, p.classes[pr.pos]) } result = append(result, p.classes[pr.pos]) last = pr.score } return result } func includeAll(dir string, data []byte) []byte { final := make([]byte, len(data)) copy(final, data) b := bytes.NewBuffer(data) s := bufio.NewScanner(b) for s.Scan() { line := s.Text() if !strings.HasPrefix(line, "include ") { continue } fname := strings.Trim(line[8:], " \n") include, err := ioutil.ReadFile(path.Join(dir, fname)) checkf(err, "Unable to read file: %v", fname) final = append(final, include...) } return final } func parseDate(col string) (time.Time, bool) { tm, err := time.Parse(*dateFormat, col) if err == nil { return tm, true } return time.Time{}, false } func parseCurrency(col string) (float64, bool) { f, err := strconv.ParseFloat(col, 64) return f, err == nil } func parseDescription(col string) (string, bool) { return strings.Map(func(r rune) rune { if r == '"' { return -1 } return r }, col), true } func (p *parser) parseTransactionsFromCSV(in []byte) []txn { ignored := make(map[int]bool) if len(*ignore) > 0 { for _, i := range strings.Split(*ignore, ",") { pos, err := strconv.Atoi(i) checkf(err, "Unable to convert to integer: %v", i) ignored[pos] = true } } result := make([]txn, 0, 100) r := csv.NewReader(bytes.NewReader(in)) var t txn var skipped int for { t = txn{} cols, err := r.Read() if err == io.EOF { break } checkf(err, "Unable to read line: %v", strings.Join(cols, ", ")) if *skip > skipped { skipped++ continue } var picked []string for i, col := range cols { if ignored[i] { continue } picked = append(picked, col) if date, ok := parseDate(col); ok { t.Date = date } else if f, ok := parseCurrency(col); ok { if *inverseSign { f = -f } t.Cur = f } else if d, ok := parseDescription(col); ok { t.Desc = d } } if len(t.Desc) != 0 && !t.Date.IsZero() && t.Cur != 0.0 { y, m, d := t.Date.Year(), t.Date.Month(), t.Date.Day() t.Date = time.Date(y, m, d, 0, 0, 0, 0, time.UTC) // Have a unique key for each transaction in CSV, so we can uniquely identify and // persist them as we modify their category. hash := sha256.New() fmt.Fprintf(hash, "%s\t%s\t%.2f", t.Date.Format(stamp), t.Desc, t.Cur) t.Key = hash.Sum(nil) // check if it was reconciled before (in case we are restarted after a crash) p.db.View(func(tx *bolt.Tx) error { b := tx.Bucket(bucketName) v := b.Get(t.Key) if v != nil { dec := gob.NewDecoder(bytes.NewBuffer(v)) var td txn if err := dec.Decode(&td); err == nil { if t.Cur < 0 { t.To = td.To } else { t.From = td.From } t.Done = true } } return nil }) result = append(result, t) } else { fmt.Println() fmt.Printf("ERROR : Unable to parse transaction from the selected columns in CSV.\n") fmt.Printf("Selected CSV : %v\n", strings.Join(picked, ", ")) fmt.Printf("Parsed Date : %v\n", t.Date) fmt.Printf("Parsed Desc : %v\n", t.Desc) fmt.Printf("Parsed Currency : %v\n", t.Cur) log.Fatalln("Please ensure that the above CSV contains ALL the 3 required fields.") } } return result } func assignFor(opt string, cl bayesian.Class, keys map[rune]string) bool { for i := 0; i < len(opt); i++ { ch := rune(opt[i]) if _, has := keys[ch]; !has { keys[ch] = string(cl) return true } } return false } func setDefaultMappings(ks *keys.Shortcuts) { ks.BestEffortAssign('b', ".back", "default") ks.BestEffortAssign('q', ".quit", "default") ks.BestEffortAssign('a', ".show all", "default") ks.BestEffortAssign('s', ".skip", "default") } type kv struct { key rune val string } type byVal []kv func (b byVal) Len() int { return len(b) } func (b byVal) Less(i int, j int) bool { return b[i].val < b[j].val } func (b byVal) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } func singleCharMode() { // disable input buffering exec.Command("stty", "-F", "/dev/tty", "cbreak", "min", "1").Run() // do not display entered characters on the screen exec.Command("stty", "-F", "/dev/tty", "-echo").Run() } func saneMode() { exec.Command("stty", "-F", "/dev/tty", "sane").Run() } func getCategory(t txn) (prefix, cat string) { if t.Cur > 0 { prefix = "[FROM]" cat = t.From } else { prefix = "[TO]" cat = t.To } return } func printCategory(t txn) { prefix, cat := getCategory(t) if len(cat) == 0 { return } if len(cat) > catLength { cat = cat[len(cat)-catLength:] } color.New(color.BgGreen, color.FgBlack).Printf(" %6s %-20s ", prefix, cat) } func printSummary(t txn, idx, total int) { idx++ if t.Done { color.New(color.BgGreen, color.FgBlack).Printf(" R ") } else { color.New(color.BgRed, color.FgWhite).Printf(" N ") } if total > 999 { color.New(color.BgBlue, color.FgWhite).Printf(" [%4d of %4d] ", idx, total) } else if total > 99 { color.New(color.BgBlue, color.FgWhite).Printf(" [%3d of %3d] ", idx, total) } else if total > 0 { color.New(color.BgBlue, color.FgWhite).Printf(" [%2d of %2d] ", idx, total) } else if total == 0 { // A bit of a hack, but will do. color.New(color.BgBlue, color.FgWhite).Printf(" [DUPLICATE] ") } else { log.Fatalf("Unhandled case for total: %v", total) } color.New(color.BgYellow, color.FgBlack).Printf(" %10s ", t.Date.Format(stamp)) desc := t.Desc if len(desc) > descLength { desc = desc[:descLength] } color.New(color.BgWhite, color.FgBlack).Printf(" %-40s", desc) // descLength used in Printf. printCategory(t) color.New(color.BgRed, color.FgWhite).Printf(" %9.2f %3s ", t.Cur, t.CurName) if *debug { fmt.Printf(" hash: %s", hex.EncodeToString(t.Key)) } fmt.Println() } func clear() { cmd := exec.Command("clear") cmd.Stdout = os.Stdout cmd.Run() fmt.Println() } func (p *parser) writeToDB(t txn) { if err := p.db.Update(func(tx *bolt.Tx) error { b := tx.Bucket(bucketName) var val bytes.Buffer enc := gob.NewEncoder(&val) checkf(enc.Encode(t), "Unable to encode txn: %v", t) return b.Put(t.Key, val.Bytes()) }); err != nil { log.Fatalf("Write to db failed with error: %v", err) } } func (p *parser) printAndGetResult(ks keys.Shortcuts, t *txn) int { label := "default" var repeat bool var category []string LOOP: if len(category) > 0 { fmt.Println() color.New(color.BgWhite, color.FgBlack).Printf("Selected [%s]", strings.Join(category, ":")) // descLength used in Printf. fmt.Println() } ks.Print(label, false) ch, key, _ := keyboard.GetSingleKey() if ch == 0 && key == keyboard.KeyEnter && len(t.To) > 0 && len(t.From) > 0 { t.Done = true p.writeToDB(*t) if repeat { return 0 } return 1 } if opt, has := ks.MapsTo(ch, label); has { switch opt { case ".back": return -1 case ".skip": t.Done = false p.db.Update(func(tx *bolt.Tx) error { b := tx.Bucket(bucketName) b.Delete(t.Key) return nil }) return 1 case ".quit": return 9999 case ".show all": return math.MaxInt16 } category = append(category, opt) if t.Cur > 0 { t.From = strings.Join(category, ":") } else { t.To = strings.Join(category, ":") } label = opt if ks.HasLabel(label) { repeat = true goto LOOP } } return 0 } func (p *parser) printTxn(t *txn, idx, total int) int { clear() printSummary(*t, idx, total) fmt.Println() if len(t.Desc) > descLength { color.New(color.BgWhite, color.FgBlack).Printf("%6s %s ", "[DESC]", t.Desc) // descLength used in Printf. fmt.Println() } { prefix, cat := getCategory(*t) if len(cat) > catLength { color.New(color.BgGreen, color.FgBlack).Printf("%6s %s", prefix, cat) fmt.Println() } } fmt.Println() hits := p.topHits(t) var ks keys.Shortcuts setDefaultMappings(&ks) for _, hit := range hits { ks.AutoAssign(string(hit), "default") } res := p.printAndGetResult(ks, t) if res != math.MaxInt16 { return res } clear() printSummary(*t, idx, total) res = p.printAndGetResult(*short, t) return res } func (p *parser) showAndCategorizeTxns(rtxns []txn) { txns := rtxns for { for i := 0; i < len(txns); i++ { // for i := range txns { t := &txns[i] if !t.Done { hits := p.topHits(t) if t.Cur < 0 { t.To = string(hits[0]) } else { t.From = string(hits[0]) } } printSummary(*t, i, len(txns)) } fmt.Println() fmt.Printf("Found %d transactions. Review (Y/a/n/q)? ", len(txns)) ch, _, _ := keyboard.GetSingleKey() if ch == 'q' { return } if ch == 'n' || ch == 'a' { fmt.Printf("\n\nMarking all transactions as accepted\n\n") for i := 0; i < len(txns); i++ { txns[i].Done = true p.writeToDB(txns[i]) } if ch == 'n' { return } continue } for i := 0; i < len(txns) && i >= 0; { t := &txns[i] i += p.printTxn(t, i, len(txns)) } } } func ledgerFormat(t txn) string { var b bytes.Buffer b.WriteString(fmt.Sprintf("%s %s\n", t.Date.Format(stamp), t.Desc)) b.WriteString(fmt.Sprintf("\t%-20s \t", t.To)) if len([]rune(t.CurName)) <= 1 { b.WriteString(fmt.Sprintf("%s%.2f\n", t.CurName, math.Abs(t.Cur))) } else { b.WriteString(fmt.Sprintf("%.2f %s\n", math.Abs(t.Cur), t.CurName)) } b.WriteString(fmt.Sprintf("\t%s\n\n", t.From)) return b.String() } func sanitize(a string) string { return strings.Map(func(r rune) rune { if r >= 'a' && r <= 'z' { return r } if r >= 'A' && r <= 'Z' { return r } if r >= '0' && r <= '9' { return r } switch r { case '*': fallthrough case ':': fallthrough case '/': fallthrough case '.': fallthrough case '-': return r default: return -1 } }, a) } func (p *parser) removeDuplicates(txns []txn) []txn { if len(txns) == 0 { return txns } sort.Stable(byTime(txns)) if *allowDups { return txns } sort.Sort(byTime(p.txns)) prev := p.txns first := txns[0].Date.Add(-24 * time.Hour) for i, t := range p.txns { if t.Date.After(first) { prev = p.txns[i:] break } } final := txns[:0] for _, t := range txns { var found bool tdesc := sanitize(t.Desc) for _, pr := range prev { if pr.Date.After(t.Date) { break } pdesc := sanitize(pr.Desc) if tdesc == pdesc && pr.Date.Equal(t.Date) && math.Abs(pr.Cur) == math.Abs(t.Cur) { printSummary(t, 0, 0) found = true break } } if !found { final = append(final, t) } } fmt.Printf("\t%d duplicates found and ignored.\n\n", len(txns)-len(final)) return final } var errc = color.New(color.BgRed, color.FgWhite).PrintfFunc() func oerr(msg string) { errc("\tERROR: " + msg + " ") fmt.Println() fmt.Println("Flags available:") flag.PrintDefaults() fmt.Println() } func reverseSlice(s []txn) { for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { s[i], s[j] = s[j], s[i] } } func main() { flag.Parse() defer saneMode() singleCharMode() checkf(os.MkdirAll(*configDir, 0755), "Unable to create directory: %v", *configDir) if len(*account) == 0 { oerr("Please specify the account transactions are coming from") return } configPath := path.Join(*configDir, "config.yaml") data, err := ioutil.ReadFile(configPath) if err == nil { var c configs checkf(yaml.Unmarshal(data, &c), "Unable to unmarshal yaml config at %v", configPath) if ac, has := c.Accounts[*account]; has { fmt.Printf("Using flags from config: %+v\n", ac) for k, v := range ac { flag.Set(k, v) } } } keyfile := path.Join(*configDir, *shortcuts) short = keys.ParseConfig(keyfile) setDefaultMappings(short) defer short.Persist(keyfile) if len(*journal) == 0 { oerr("Please specify the input ledger journal file") return } data, err = ioutil.ReadFile(*journal) checkf(err, "Unable to read file: %v", *journal) alldata := includeAll(path.Dir(*journal), data) if len(*output) == 0 { oerr("Please specify the output file") return } if _, err := os.Stat(*output); os.IsNotExist(err) { _, err := os.Create(*output) checkf(err, "Unable to check for output file: %v", *output) } tf := path.Join(os.TempDir(), "ledger-csv-txns") defer os.Remove(tf) db, err := bolt.Open(tf, 0600, nil) checkf(err, "Unable to open boltdb at %v", tf) defer db.Close() db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucketIfNotExists(bucketName) checkf(err, "Unable to create default bucket in boltdb.") return nil }) of, err := os.OpenFile(*output, os.O_APPEND|os.O_WRONLY, 0600) checkf(err, "Unable to open output file: %v", *output) p := parser{data: alldata, db: db} p.parseAccounts() p.parseTransactions() // Scanning done. Now train classifier. p.generateClasses() in, err := ioutil.ReadFile(*csvFile) checkf(err, "Unable to read csv file: %v", *csvFile) txns := p.parseTransactionsFromCSV(in) if *reverseCSV { reverseSlice(txns) } for i := range txns { if txns[i].Cur > 0 { txns[i].To = *account } else { txns[i].From = *account } txns[i].CurName = *currency } txns = p.removeDuplicates(txns) p.showAndCategorizeTxns(txns) _, err = of.WriteString(fmt.Sprintf("; into-ledger run at %v\n\n", time.Now())) checkf(err, "Unable to write into output file: %v", of.Name()) for _, t := range txns { if t.Done { if _, err := of.WriteString(ledgerFormat(t)); err != nil { log.Fatalf("Unable to write to output: %v", err) } } } checkf(of.Close(), "Unable to close output file: %v", of.Name()) }
{ return "" }
conditional_block
main.go
package main import ( "bufio" "bytes" "crypto/sha256" "encoding/csv" "encoding/gob" "encoding/hex" "flag" "fmt" "io" "io/ioutil" "log" "math" "os" "os/exec" "os/user" "path" "regexp" "sort" "strconv" "strings" "time" "github.com/boltdb/bolt" "github.com/eiannone/keyboard" "github.com/fatih/color" "github.com/jbrukh/bayesian" "github.com/manishrjain/keys" "github.com/pkg/errors" mathex "github.com/pkg/math" yaml "gopkg.in/yaml.v2" ) func homeDir() string { currentUser, err := user.Current() if err != nil { return "" } return currentUser.HomeDir } var ( debug = flag.Bool("debug", false, "Additional debug information if set.") journal = flag.String("j", "", "Existing journal to learn from.") output = flag.String("o", "out.ldg", "Journal file to write to.") csvFile = flag.String("csv", "", "File path of CSV file containing new transactions.") account = flag.String("a", "", "Name of bank account transactions belong to.") currency = flag.String("c", "", "Set currency if any.") ignore = flag.String("ic", "", "Comma separated list of columns to ignore in CSV.") dateFormat = flag.String("d", "01/02/2006", "Express your date format in numeric form w.r.t. Jan 02, 2006, separated by slashes (/). See: https://golang.org/pkg/time/") skip = flag.Int("s", 0, "Number of header lines in CSV to skip") configDir = flag.String("conf", homeDir()+"/.into-ledger", "Config directory to store various into-ledger configs in.") shortcuts = flag.String("short", "shortcuts.yaml", "Name of shortcuts file.") inverseSign = flag.Bool("inverseSign", false, "Inverse sign of transaction amounts in CSV.") reverseCSV = flag.Bool("reverseCSV", false, "Reverse order of transactions in CSV") allowDups = flag.Bool("allowDups", false, "Don't filter out duplicate transactions") tfidf = flag.Bool("tfidf", false, "Use TF-IDF classification algorithm instead of Bayesian") rtxn = regexp.MustCompile(`(\d{4}/\d{2}/\d{2})[\W]*(\w.*)`) rto = regexp.MustCompile(`\W*([:\w]+)(.*)`) rfrom = regexp.MustCompile(`\W*([:\w]+).*`) rcur = regexp.MustCompile(`(\d+\.\d+|\d+)`) racc = regexp.MustCompile(`^account[\W]+(.*)`) ralias = regexp.MustCompile(`\balias\s(.*)`) stamp = "2006/01/02" bucketName = []byte("txns") descLength = 40 catLength = 20 short *keys.Shortcuts ) type accountFlags struct { flags map[string]string } type configs struct { Accounts map[string]map[string]string // account and the corresponding config. } type txn struct { Date time.Time Desc string To string From string Cur float64 CurName string Key []byte skipClassification bool Done bool } type byTime []txn func (b byTime) Len() int { return len(b) } func (b byTime) Less(i int, j int) bool { return b[i].Date.Before(b[j].Date) } func (b byTime) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } func checkf(err error, format string, args ...interface{}) { if err != nil { log.Printf(format, args...) log.Println() log.Fatalf("%+v", errors.WithStack(err)) } } func assertf(ok bool, format string, args ...interface{}) { if !ok { log.Printf(format, args...) log.Println() log.Fatalf("%+v", errors.Errorf("Should be true, but is false")) } } func assignForAccount(account string) { tree := strings.Split(account, ":") assertf(len(tree) > 0, "Expected at least one result. Found none for: %v", account) short.AutoAssign(tree[0], "default") prev := tree[0] for _, c := range tree[1:] { if len(c) == 0 { continue } short.AutoAssign(c, prev) prev = c } } type parser struct { db *bolt.DB data []byte txns []txn classes []bayesian.Class cl *bayesian.Classifier accounts []string } func (p *parser) parseTransactions() { out, err := exec.Command("ledger", "-f", *journal, "csv").Output() checkf(err, "Unable to convert journal to csv. Possibly an issue with your ledger installation.") r := csv.NewReader(newConverter(bytes.NewReader(out))) var t txn for { cols, err := r.Read() if err == io.EOF { break } checkf(err, "Unable to read a csv line.") t = txn{} t.Date, err = time.Parse(stamp, cols[0]) checkf(err, "Unable to parse time: %v", cols[0]) t.Desc = strings.Trim(cols[2], " \n\t") t.To = cols[3] assertf(len(t.To) > 0, "Expected TO, found empty.") if strings.HasPrefix(t.To, "Equity:") { // Don't pick up Equity. t.skipClassification = true } t.CurName = cols[4] t.Cur, err = strconv.ParseFloat(cols[5], 64) checkf(err, "Unable to parse amount.") p.txns = append(p.txns, t) assignForAccount(t.To) } } func (p *parser) parseAccounts() { s := bufio.NewScanner(bytes.NewReader(p.data)) var acc string for s.Scan() { m := racc.FindStringSubmatch(s.Text()) if len(m) < 2 { continue } acc = m[1] if len(acc) == 0 { continue } p.accounts = append(p.accounts, acc) assignForAccount(acc) } } func (p *parser) generateClasses() { p.classes = make([]bayesian.Class, 0, 10) tomap := make(map[string]bool) for _, t := range p.txns { if t.skipClassification { continue } tomap[t.To] = true } for _, a := range p.accounts { tomap[a] = true } // remove this account as it would appear in many relevant transactions delete(tomap, *account) for to := range tomap { p.classes = append(p.classes, bayesian.Class(to)) } assertf(len(p.classes) > 1, "Expected some categories. Found none.") if *tfidf { p.cl = bayesian.NewClassifierTfIdf(p.classes...) } else { p.cl = bayesian.NewClassifier(p.classes...) } assertf(p.cl != nil, "Expected a valid classifier. Found nil.") for _, t := range p.txns { if _, has := tomap[t.To]; !has { continue } p.cl.Learn(t.getTerms(), bayesian.Class(t.To)) } if *tfidf { p.cl.ConvertTermsFreqToTfIdf() } } type pair struct { score float64 pos int } type byScore []pair func (b byScore) Len() int { return len(b) } func (b byScore) Less(i int, j int) bool { return b[i].score > b[j].score } func (b byScore) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } var trimWhitespace = regexp.MustCompile(`^[\s]+|[\s}]+$`) var dedupWhitespace = regexp.MustCompile(`[\s]{2,}`) func (t *txn) isFromJournal() bool { return t.Key == nil } func (t *txn) getTerms() []string { desc := strings.ToUpper(t.Desc) desc = trimWhitespace.ReplaceAllString(desc, "") desc = dedupWhitespace.ReplaceAllString(desc, " ") terms := strings.Split(desc, " ") terms = append(terms, "FullDesc: "+desc) var cur float64 if t.isFromJournal() { cur = t.Cur } else { cur = -t.Cur // we are looking for the opposite } var kind string if cur >= 0 { kind = "credit" } else { kind = "debit" } terms = append(terms, "Kind: "+kind) terms = append(terms, "AmountClassFine: "+strconv.Itoa(getAmountClassFine(cur))) terms = append(terms, "AmountClassCoarse: "+strconv.Itoa(getAmountClassCoarse(cur))) if *debug { fmt.Printf("getTerms(%s, %.2f) = %v\n", t.Desc, t.Cur, terms) } return terms } func getAmountClassFine(amount float64) int { if amount == 0 { return 0 } log := math.Round(math.Log10(math.Abs(amount)) * 4) class := int(math.Round(math.Pow(10, log/4))) return class } func getAmountClassCoarse(amount float64) int { if amount == 0 { return 0 } log := int(math.Ceil(math.Log10(math.Abs(amount)))) class := int(math.Round(math.Pow10(log))) return class } func (p *parser) topHits(t *txn) []bayesian.Class { terms := t.getTerms() scores, _, _ := p.cl.LogScores(terms) pairs := make([]pair, 0, len(scores)) var mean, stddev float64 for pos, score := range scores { pairs = append(pairs, pair{score, pos}) mean += score } mean /= float64(len(scores)) for _, score := range scores { stddev += math.Pow(score-mean, 2) } stddev /= float64(len(scores) - 1) stddev = math.Sqrt(stddev) if *debug { fmt.Printf("stddev=%f\n", stddev) } sort.Sort(byScore(pairs)) result := make([]bayesian.Class, 0, 5) last := pairs[0].score for i := 0; i < mathex.Min(10, len(pairs)); i++ { pr := pairs[i] if math.Abs(pr.score-last) > stddev { break } if *debug { fmt.Printf("i=%d s=%.3g Class=%v\n", i, pr.score, p.classes[pr.pos]) } result = append(result, p.classes[pr.pos]) last = pr.score } return result } func includeAll(dir string, data []byte) []byte { final := make([]byte, len(data)) copy(final, data) b := bytes.NewBuffer(data) s := bufio.NewScanner(b) for s.Scan() { line := s.Text() if !strings.HasPrefix(line, "include ") { continue } fname := strings.Trim(line[8:], " \n") include, err := ioutil.ReadFile(path.Join(dir, fname)) checkf(err, "Unable to read file: %v", fname) final = append(final, include...) } return final } func parseDate(col string) (time.Time, bool) { tm, err := time.Parse(*dateFormat, col) if err == nil { return tm, true } return time.Time{}, false } func parseCurrency(col string) (float64, bool) { f, err := strconv.ParseFloat(col, 64) return f, err == nil } func parseDescription(col string) (string, bool) { return strings.Map(func(r rune) rune { if r == '"' { return -1 } return r }, col), true } func (p *parser) parseTransactionsFromCSV(in []byte) []txn { ignored := make(map[int]bool) if len(*ignore) > 0 { for _, i := range strings.Split(*ignore, ",") { pos, err := strconv.Atoi(i) checkf(err, "Unable to convert to integer: %v", i) ignored[pos] = true } } result := make([]txn, 0, 100) r := csv.NewReader(bytes.NewReader(in)) var t txn var skipped int for { t = txn{} cols, err := r.Read() if err == io.EOF { break } checkf(err, "Unable to read line: %v", strings.Join(cols, ", ")) if *skip > skipped { skipped++ continue } var picked []string for i, col := range cols { if ignored[i] { continue } picked = append(picked, col) if date, ok := parseDate(col); ok { t.Date = date } else if f, ok := parseCurrency(col); ok { if *inverseSign { f = -f } t.Cur = f } else if d, ok := parseDescription(col); ok { t.Desc = d } } if len(t.Desc) != 0 && !t.Date.IsZero() && t.Cur != 0.0 { y, m, d := t.Date.Year(), t.Date.Month(), t.Date.Day() t.Date = time.Date(y, m, d, 0, 0, 0, 0, time.UTC) // Have a unique key for each transaction in CSV, so we can uniquely identify and // persist them as we modify their category. hash := sha256.New() fmt.Fprintf(hash, "%s\t%s\t%.2f", t.Date.Format(stamp), t.Desc, t.Cur) t.Key = hash.Sum(nil) // check if it was reconciled before (in case we are restarted after a crash) p.db.View(func(tx *bolt.Tx) error { b := tx.Bucket(bucketName) v := b.Get(t.Key) if v != nil { dec := gob.NewDecoder(bytes.NewBuffer(v)) var td txn if err := dec.Decode(&td); err == nil { if t.Cur < 0 { t.To = td.To } else { t.From = td.From } t.Done = true } } return nil }) result = append(result, t) } else { fmt.Println() fmt.Printf("ERROR : Unable to parse transaction from the selected columns in CSV.\n") fmt.Printf("Selected CSV : %v\n", strings.Join(picked, ", ")) fmt.Printf("Parsed Date : %v\n", t.Date) fmt.Printf("Parsed Desc : %v\n", t.Desc) fmt.Printf("Parsed Currency : %v\n", t.Cur) log.Fatalln("Please ensure that the above CSV contains ALL the 3 required fields.") } } return result } func assignFor(opt string, cl bayesian.Class, keys map[rune]string) bool { for i := 0; i < len(opt); i++ { ch := rune(opt[i]) if _, has := keys[ch]; !has { keys[ch] = string(cl) return true } } return false } func setDefaultMappings(ks *keys.Shortcuts) { ks.BestEffortAssign('b', ".back", "default") ks.BestEffortAssign('q', ".quit", "default") ks.BestEffortAssign('a', ".show all", "default") ks.BestEffortAssign('s', ".skip", "default") } type kv struct { key rune val string } type byVal []kv func (b byVal) Len() int
func (b byVal) Less(i int, j int) bool { return b[i].val < b[j].val } func (b byVal) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } func singleCharMode() { // disable input buffering exec.Command("stty", "-F", "/dev/tty", "cbreak", "min", "1").Run() // do not display entered characters on the screen exec.Command("stty", "-F", "/dev/tty", "-echo").Run() } func saneMode() { exec.Command("stty", "-F", "/dev/tty", "sane").Run() } func getCategory(t txn) (prefix, cat string) { if t.Cur > 0 { prefix = "[FROM]" cat = t.From } else { prefix = "[TO]" cat = t.To } return } func printCategory(t txn) { prefix, cat := getCategory(t) if len(cat) == 0 { return } if len(cat) > catLength { cat = cat[len(cat)-catLength:] } color.New(color.BgGreen, color.FgBlack).Printf(" %6s %-20s ", prefix, cat) } func printSummary(t txn, idx, total int) { idx++ if t.Done { color.New(color.BgGreen, color.FgBlack).Printf(" R ") } else { color.New(color.BgRed, color.FgWhite).Printf(" N ") } if total > 999 { color.New(color.BgBlue, color.FgWhite).Printf(" [%4d of %4d] ", idx, total) } else if total > 99 { color.New(color.BgBlue, color.FgWhite).Printf(" [%3d of %3d] ", idx, total) } else if total > 0 { color.New(color.BgBlue, color.FgWhite).Printf(" [%2d of %2d] ", idx, total) } else if total == 0 { // A bit of a hack, but will do. color.New(color.BgBlue, color.FgWhite).Printf(" [DUPLICATE] ") } else { log.Fatalf("Unhandled case for total: %v", total) } color.New(color.BgYellow, color.FgBlack).Printf(" %10s ", t.Date.Format(stamp)) desc := t.Desc if len(desc) > descLength { desc = desc[:descLength] } color.New(color.BgWhite, color.FgBlack).Printf(" %-40s", desc) // descLength used in Printf. printCategory(t) color.New(color.BgRed, color.FgWhite).Printf(" %9.2f %3s ", t.Cur, t.CurName) if *debug { fmt.Printf(" hash: %s", hex.EncodeToString(t.Key)) } fmt.Println() } func clear() { cmd := exec.Command("clear") cmd.Stdout = os.Stdout cmd.Run() fmt.Println() } func (p *parser) writeToDB(t txn) { if err := p.db.Update(func(tx *bolt.Tx) error { b := tx.Bucket(bucketName) var val bytes.Buffer enc := gob.NewEncoder(&val) checkf(enc.Encode(t), "Unable to encode txn: %v", t) return b.Put(t.Key, val.Bytes()) }); err != nil { log.Fatalf("Write to db failed with error: %v", err) } } func (p *parser) printAndGetResult(ks keys.Shortcuts, t *txn) int { label := "default" var repeat bool var category []string LOOP: if len(category) > 0 { fmt.Println() color.New(color.BgWhite, color.FgBlack).Printf("Selected [%s]", strings.Join(category, ":")) // descLength used in Printf. fmt.Println() } ks.Print(label, false) ch, key, _ := keyboard.GetSingleKey() if ch == 0 && key == keyboard.KeyEnter && len(t.To) > 0 && len(t.From) > 0 { t.Done = true p.writeToDB(*t) if repeat { return 0 } return 1 } if opt, has := ks.MapsTo(ch, label); has { switch opt { case ".back": return -1 case ".skip": t.Done = false p.db.Update(func(tx *bolt.Tx) error { b := tx.Bucket(bucketName) b.Delete(t.Key) return nil }) return 1 case ".quit": return 9999 case ".show all": return math.MaxInt16 } category = append(category, opt) if t.Cur > 0 { t.From = strings.Join(category, ":") } else { t.To = strings.Join(category, ":") } label = opt if ks.HasLabel(label) { repeat = true goto LOOP } } return 0 } func (p *parser) printTxn(t *txn, idx, total int) int { clear() printSummary(*t, idx, total) fmt.Println() if len(t.Desc) > descLength { color.New(color.BgWhite, color.FgBlack).Printf("%6s %s ", "[DESC]", t.Desc) // descLength used in Printf. fmt.Println() } { prefix, cat := getCategory(*t) if len(cat) > catLength { color.New(color.BgGreen, color.FgBlack).Printf("%6s %s", prefix, cat) fmt.Println() } } fmt.Println() hits := p.topHits(t) var ks keys.Shortcuts setDefaultMappings(&ks) for _, hit := range hits { ks.AutoAssign(string(hit), "default") } res := p.printAndGetResult(ks, t) if res != math.MaxInt16 { return res } clear() printSummary(*t, idx, total) res = p.printAndGetResult(*short, t) return res } func (p *parser) showAndCategorizeTxns(rtxns []txn) { txns := rtxns for { for i := 0; i < len(txns); i++ { // for i := range txns { t := &txns[i] if !t.Done { hits := p.topHits(t) if t.Cur < 0 { t.To = string(hits[0]) } else { t.From = string(hits[0]) } } printSummary(*t, i, len(txns)) } fmt.Println() fmt.Printf("Found %d transactions. Review (Y/a/n/q)? ", len(txns)) ch, _, _ := keyboard.GetSingleKey() if ch == 'q' { return } if ch == 'n' || ch == 'a' { fmt.Printf("\n\nMarking all transactions as accepted\n\n") for i := 0; i < len(txns); i++ { txns[i].Done = true p.writeToDB(txns[i]) } if ch == 'n' { return } continue } for i := 0; i < len(txns) && i >= 0; { t := &txns[i] i += p.printTxn(t, i, len(txns)) } } } func ledgerFormat(t txn) string { var b bytes.Buffer b.WriteString(fmt.Sprintf("%s %s\n", t.Date.Format(stamp), t.Desc)) b.WriteString(fmt.Sprintf("\t%-20s \t", t.To)) if len([]rune(t.CurName)) <= 1 { b.WriteString(fmt.Sprintf("%s%.2f\n", t.CurName, math.Abs(t.Cur))) } else { b.WriteString(fmt.Sprintf("%.2f %s\n", math.Abs(t.Cur), t.CurName)) } b.WriteString(fmt.Sprintf("\t%s\n\n", t.From)) return b.String() } func sanitize(a string) string { return strings.Map(func(r rune) rune { if r >= 'a' && r <= 'z' { return r } if r >= 'A' && r <= 'Z' { return r } if r >= '0' && r <= '9' { return r } switch r { case '*': fallthrough case ':': fallthrough case '/': fallthrough case '.': fallthrough case '-': return r default: return -1 } }, a) } func (p *parser) removeDuplicates(txns []txn) []txn { if len(txns) == 0 { return txns } sort.Stable(byTime(txns)) if *allowDups { return txns } sort.Sort(byTime(p.txns)) prev := p.txns first := txns[0].Date.Add(-24 * time.Hour) for i, t := range p.txns { if t.Date.After(first) { prev = p.txns[i:] break } } final := txns[:0] for _, t := range txns { var found bool tdesc := sanitize(t.Desc) for _, pr := range prev { if pr.Date.After(t.Date) { break } pdesc := sanitize(pr.Desc) if tdesc == pdesc && pr.Date.Equal(t.Date) && math.Abs(pr.Cur) == math.Abs(t.Cur) { printSummary(t, 0, 0) found = true break } } if !found { final = append(final, t) } } fmt.Printf("\t%d duplicates found and ignored.\n\n", len(txns)-len(final)) return final } var errc = color.New(color.BgRed, color.FgWhite).PrintfFunc() func oerr(msg string) { errc("\tERROR: " + msg + " ") fmt.Println() fmt.Println("Flags available:") flag.PrintDefaults() fmt.Println() } func reverseSlice(s []txn) { for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { s[i], s[j] = s[j], s[i] } } func main() { flag.Parse() defer saneMode() singleCharMode() checkf(os.MkdirAll(*configDir, 0755), "Unable to create directory: %v", *configDir) if len(*account) == 0 { oerr("Please specify the account transactions are coming from") return } configPath := path.Join(*configDir, "config.yaml") data, err := ioutil.ReadFile(configPath) if err == nil { var c configs checkf(yaml.Unmarshal(data, &c), "Unable to unmarshal yaml config at %v", configPath) if ac, has := c.Accounts[*account]; has { fmt.Printf("Using flags from config: %+v\n", ac) for k, v := range ac { flag.Set(k, v) } } } keyfile := path.Join(*configDir, *shortcuts) short = keys.ParseConfig(keyfile) setDefaultMappings(short) defer short.Persist(keyfile) if len(*journal) == 0 { oerr("Please specify the input ledger journal file") return } data, err = ioutil.ReadFile(*journal) checkf(err, "Unable to read file: %v", *journal) alldata := includeAll(path.Dir(*journal), data) if len(*output) == 0 { oerr("Please specify the output file") return } if _, err := os.Stat(*output); os.IsNotExist(err) { _, err := os.Create(*output) checkf(err, "Unable to check for output file: %v", *output) } tf := path.Join(os.TempDir(), "ledger-csv-txns") defer os.Remove(tf) db, err := bolt.Open(tf, 0600, nil) checkf(err, "Unable to open boltdb at %v", tf) defer db.Close() db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucketIfNotExists(bucketName) checkf(err, "Unable to create default bucket in boltdb.") return nil }) of, err := os.OpenFile(*output, os.O_APPEND|os.O_WRONLY, 0600) checkf(err, "Unable to open output file: %v", *output) p := parser{data: alldata, db: db} p.parseAccounts() p.parseTransactions() // Scanning done. Now train classifier. p.generateClasses() in, err := ioutil.ReadFile(*csvFile) checkf(err, "Unable to read csv file: %v", *csvFile) txns := p.parseTransactionsFromCSV(in) if *reverseCSV { reverseSlice(txns) } for i := range txns { if txns[i].Cur > 0 { txns[i].To = *account } else { txns[i].From = *account } txns[i].CurName = *currency } txns = p.removeDuplicates(txns) p.showAndCategorizeTxns(txns) _, err = of.WriteString(fmt.Sprintf("; into-ledger run at %v\n\n", time.Now())) checkf(err, "Unable to write into output file: %v", of.Name()) for _, t := range txns { if t.Done { if _, err := of.WriteString(ledgerFormat(t)); err != nil { log.Fatalf("Unable to write to output: %v", err) } } } checkf(of.Close(), "Unable to close output file: %v", of.Name()) }
{ return len(b) }
identifier_body
cargo_test.rs
use crate::core::compiler::{Compilation, CompileKind, Doctest, Metadata, Unit, UnitOutput}; use crate::core::shell::Verbosity; use crate::core::{TargetKind, Workspace}; use crate::ops; use crate::util::errors::CargoResult; use crate::util::{add_path_args, CargoTestError, Config, Test}; use cargo_util::{ProcessBuilder, ProcessError}; use std::ffi::OsString; use std::path::{Path, PathBuf}; pub struct TestOptions { pub compile_opts: ops::CompileOptions, pub no_run: bool, pub no_fail_fast: bool, } pub fn run_tests( ws: &Workspace<'_>, options: &TestOptions, test_args: &[&str], ) -> CargoResult<Option<CargoTestError>> { let compilation = compile_tests(ws, options)?; if options.no_run { if !options.compile_opts.build_config.emit_json() { display_no_run_information(ws, test_args, &compilation, "unittests")?; } return Ok(None); } let (test, mut errors) = run_unit_tests(ws.config(), options, test_args, &compilation)?; // If we have an error and want to fail fast, then return. if !errors.is_empty() && !options.no_fail_fast { return Ok(Some(CargoTestError::new(test, errors))); } let (doctest, docerrors) = run_doc_tests(ws, options, test_args, &compilation)?; let test = if docerrors.is_empty()
else { doctest }; errors.extend(docerrors); if errors.is_empty() { Ok(None) } else { Ok(Some(CargoTestError::new(test, errors))) } } pub fn run_benches( ws: &Workspace<'_>, options: &TestOptions, args: &[&str], ) -> CargoResult<Option<CargoTestError>> { let compilation = compile_tests(ws, options)?; if options.no_run { if !options.compile_opts.build_config.emit_json() { display_no_run_information(ws, args, &compilation, "benches")?; } return Ok(None); } let mut args = args.to_vec(); args.push("--bench"); let (test, errors) = run_unit_tests(ws.config(), options, &args, &compilation)?; match errors.len() { 0 => Ok(None), _ => Ok(Some(CargoTestError::new(test, errors))), } } fn compile_tests<'a>(ws: &Workspace<'a>, options: &TestOptions) -> CargoResult<Compilation<'a>> { let mut compilation = ops::compile(ws, &options.compile_opts)?; compilation.tests.sort(); Ok(compilation) } /// Runs the unit and integration tests of a package. fn run_unit_tests( config: &Config, options: &TestOptions, test_args: &[&str], compilation: &Compilation<'_>, ) -> CargoResult<(Test, Vec<ProcessError>)> { let cwd = config.cwd(); let mut errors = Vec::new(); for UnitOutput { unit, path, script_meta, } in compilation.tests.iter() { let (exe_display, cmd) = cmd_builds( config, cwd, unit, path, script_meta, test_args, compilation, "unittests", )?; config .shell() .concise(|shell| shell.status("Running", &exe_display))?; config .shell() .verbose(|shell| shell.status("Running", &cmd))?; let result = cmd.exec(); if let Err(e) = result { let e = e.downcast::<ProcessError>()?; errors.push(( unit.target.kind().clone(), unit.target.name().to_string(), unit.pkg.name().to_string(), e, )); if !options.no_fail_fast { break; } } } if errors.len() == 1 { let (kind, name, pkg_name, e) = errors.pop().unwrap(); Ok(( Test::UnitTest { kind, name, pkg_name, }, vec![e], )) } else { Ok(( Test::Multiple, errors.into_iter().map(|(_, _, _, e)| e).collect(), )) } } fn run_doc_tests( ws: &Workspace<'_>, options: &TestOptions, test_args: &[&str], compilation: &Compilation<'_>, ) -> CargoResult<(Test, Vec<ProcessError>)> { let config = ws.config(); let mut errors = Vec::new(); let doctest_xcompile = config.cli_unstable().doctest_xcompile; let doctest_in_workspace = config.cli_unstable().doctest_in_workspace; for doctest_info in &compilation.to_doc_test { let Doctest { args, unstable_opts, unit, linker, script_meta, env, } = doctest_info; if !doctest_xcompile { match unit.kind { CompileKind::Host => {} CompileKind::Target(target) => { if target.short_name() != compilation.host { // Skip doctests, -Zdoctest-xcompile not enabled. config.shell().verbose(|shell| { shell.note(format!( "skipping doctests for {} ({}), \ cross-compilation doctests are not yet supported\n\ See https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#doctest-xcompile \ for more information.", unit.pkg, unit.target.description_named() )) })?; continue; } } } } config.shell().status("Doc-tests", unit.target.name())?; let mut p = compilation.rustdoc_process(unit, *script_meta)?; for (var, value) in env { p.env(var, value); } p.arg("--crate-name").arg(&unit.target.crate_name()); p.arg("--test"); if doctest_in_workspace { add_path_args(ws, unit, &mut p); // FIXME(swatinem): remove the `unstable-options` once rustdoc stabilizes the `test-run-directory` option p.arg("-Z").arg("unstable-options"); p.arg("--test-run-directory") .arg(unit.pkg.root().to_path_buf()); } else { p.arg(unit.target.src_path().path().unwrap()); } if let CompileKind::Target(target) = unit.kind { // use `rustc_target()` to properly handle JSON target paths p.arg("--target").arg(target.rustc_target()); } if doctest_xcompile { p.arg("-Zunstable-options"); p.arg("--enable-per-target-ignores"); if let Some((runtool, runtool_args)) = compilation.target_runner(unit.kind) { p.arg("--runtool").arg(runtool); for arg in runtool_args { p.arg("--runtool-arg").arg(arg); } } if let Some(linker) = linker { let mut joined = OsString::from("linker="); joined.push(linker); p.arg("-C").arg(joined); } } for &rust_dep in &[ &compilation.deps_output[&unit.kind], &compilation.deps_output[&CompileKind::Host], ] { let mut arg = OsString::from("dependency="); arg.push(rust_dep); p.arg("-L").arg(arg); } for native_dep in compilation.native_dirs.iter() { p.arg("-L").arg(native_dep); } for arg in test_args { p.arg("--test-args").arg(arg); } if config.shell().verbosity() == Verbosity::Quiet { p.arg("--test-args").arg("--quiet"); } p.args(args); if *unstable_opts { p.arg("-Zunstable-options"); } config .shell() .verbose(|shell| shell.status("Running", p.to_string()))?; if let Err(e) = p.exec() { let e = e.downcast::<ProcessError>()?; errors.push(e); if !options.no_fail_fast { return Ok((Test::Doc, errors)); } } } Ok((Test::Doc, errors)) } fn display_no_run_information( ws: &Workspace<'_>, test_args: &[&str], compilation: &Compilation<'_>, exec_type: &str, ) -> CargoResult<()> { let config = ws.config(); let cwd = config.cwd(); for UnitOutput { unit, path, script_meta, } in compilation.tests.iter() { let (exe_display, cmd) = cmd_builds( config, cwd, unit, path, script_meta, test_args, compilation, exec_type, )?; config .shell() .concise(|shell| shell.status("Executable", &exe_display))?; config .shell() .verbose(|shell| shell.status("Executable", &cmd))?; } return Ok(()); } fn cmd_builds( config: &Config, cwd: &Path, unit: &Unit, path: &PathBuf, script_meta: &Option<Metadata>, test_args: &[&str], compilation: &Compilation<'_>, exec_type: &str, ) -> CargoResult<(String, ProcessBuilder)> { let test_path = unit.target.src_path().path().unwrap(); let short_test_path = test_path .strip_prefix(unit.pkg.root()) .unwrap_or(test_path) .display(); let exe_display = match unit.target.kind() { TargetKind::Test | TargetKind::Bench => format!( "{} ({})", short_test_path, path.strip_prefix(cwd).unwrap_or(path).display() ), _ => format!( "{} {} ({})", exec_type, short_test_path, path.strip_prefix(cwd).unwrap_or(path).display() ), }; let mut cmd = compilation.target_process(path, unit.kind, &unit.pkg, *script_meta)?; cmd.args(test_args); if unit.target.harness() && config.shell().verbosity() == Verbosity::Quiet { cmd.arg("--quiet"); } Ok((exe_display, cmd)) }
{ test }
conditional_block
cargo_test.rs
use crate::core::compiler::{Compilation, CompileKind, Doctest, Metadata, Unit, UnitOutput}; use crate::core::shell::Verbosity; use crate::core::{TargetKind, Workspace}; use crate::ops; use crate::util::errors::CargoResult; use crate::util::{add_path_args, CargoTestError, Config, Test}; use cargo_util::{ProcessBuilder, ProcessError}; use std::ffi::OsString; use std::path::{Path, PathBuf}; pub struct TestOptions { pub compile_opts: ops::CompileOptions, pub no_run: bool, pub no_fail_fast: bool, } pub fn run_tests( ws: &Workspace<'_>, options: &TestOptions, test_args: &[&str], ) -> CargoResult<Option<CargoTestError>> { let compilation = compile_tests(ws, options)?; if options.no_run { if !options.compile_opts.build_config.emit_json() { display_no_run_information(ws, test_args, &compilation, "unittests")?; } return Ok(None); } let (test, mut errors) = run_unit_tests(ws.config(), options, test_args, &compilation)?; // If we have an error and want to fail fast, then return. if !errors.is_empty() && !options.no_fail_fast { return Ok(Some(CargoTestError::new(test, errors))); } let (doctest, docerrors) = run_doc_tests(ws, options, test_args, &compilation)?; let test = if docerrors.is_empty() { test } else { doctest }; errors.extend(docerrors); if errors.is_empty() { Ok(None) } else { Ok(Some(CargoTestError::new(test, errors))) } } pub fn run_benches( ws: &Workspace<'_>, options: &TestOptions, args: &[&str], ) -> CargoResult<Option<CargoTestError>> { let compilation = compile_tests(ws, options)?; if options.no_run { if !options.compile_opts.build_config.emit_json() { display_no_run_information(ws, args, &compilation, "benches")?; } return Ok(None); } let mut args = args.to_vec(); args.push("--bench"); let (test, errors) = run_unit_tests(ws.config(), options, &args, &compilation)?; match errors.len() {
_ => Ok(Some(CargoTestError::new(test, errors))), } } fn compile_tests<'a>(ws: &Workspace<'a>, options: &TestOptions) -> CargoResult<Compilation<'a>> { let mut compilation = ops::compile(ws, &options.compile_opts)?; compilation.tests.sort(); Ok(compilation) } /// Runs the unit and integration tests of a package. fn run_unit_tests( config: &Config, options: &TestOptions, test_args: &[&str], compilation: &Compilation<'_>, ) -> CargoResult<(Test, Vec<ProcessError>)> { let cwd = config.cwd(); let mut errors = Vec::new(); for UnitOutput { unit, path, script_meta, } in compilation.tests.iter() { let (exe_display, cmd) = cmd_builds( config, cwd, unit, path, script_meta, test_args, compilation, "unittests", )?; config .shell() .concise(|shell| shell.status("Running", &exe_display))?; config .shell() .verbose(|shell| shell.status("Running", &cmd))?; let result = cmd.exec(); if let Err(e) = result { let e = e.downcast::<ProcessError>()?; errors.push(( unit.target.kind().clone(), unit.target.name().to_string(), unit.pkg.name().to_string(), e, )); if !options.no_fail_fast { break; } } } if errors.len() == 1 { let (kind, name, pkg_name, e) = errors.pop().unwrap(); Ok(( Test::UnitTest { kind, name, pkg_name, }, vec![e], )) } else { Ok(( Test::Multiple, errors.into_iter().map(|(_, _, _, e)| e).collect(), )) } } fn run_doc_tests( ws: &Workspace<'_>, options: &TestOptions, test_args: &[&str], compilation: &Compilation<'_>, ) -> CargoResult<(Test, Vec<ProcessError>)> { let config = ws.config(); let mut errors = Vec::new(); let doctest_xcompile = config.cli_unstable().doctest_xcompile; let doctest_in_workspace = config.cli_unstable().doctest_in_workspace; for doctest_info in &compilation.to_doc_test { let Doctest { args, unstable_opts, unit, linker, script_meta, env, } = doctest_info; if !doctest_xcompile { match unit.kind { CompileKind::Host => {} CompileKind::Target(target) => { if target.short_name() != compilation.host { // Skip doctests, -Zdoctest-xcompile not enabled. config.shell().verbose(|shell| { shell.note(format!( "skipping doctests for {} ({}), \ cross-compilation doctests are not yet supported\n\ See https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#doctest-xcompile \ for more information.", unit.pkg, unit.target.description_named() )) })?; continue; } } } } config.shell().status("Doc-tests", unit.target.name())?; let mut p = compilation.rustdoc_process(unit, *script_meta)?; for (var, value) in env { p.env(var, value); } p.arg("--crate-name").arg(&unit.target.crate_name()); p.arg("--test"); if doctest_in_workspace { add_path_args(ws, unit, &mut p); // FIXME(swatinem): remove the `unstable-options` once rustdoc stabilizes the `test-run-directory` option p.arg("-Z").arg("unstable-options"); p.arg("--test-run-directory") .arg(unit.pkg.root().to_path_buf()); } else { p.arg(unit.target.src_path().path().unwrap()); } if let CompileKind::Target(target) = unit.kind { // use `rustc_target()` to properly handle JSON target paths p.arg("--target").arg(target.rustc_target()); } if doctest_xcompile { p.arg("-Zunstable-options"); p.arg("--enable-per-target-ignores"); if let Some((runtool, runtool_args)) = compilation.target_runner(unit.kind) { p.arg("--runtool").arg(runtool); for arg in runtool_args { p.arg("--runtool-arg").arg(arg); } } if let Some(linker) = linker { let mut joined = OsString::from("linker="); joined.push(linker); p.arg("-C").arg(joined); } } for &rust_dep in &[ &compilation.deps_output[&unit.kind], &compilation.deps_output[&CompileKind::Host], ] { let mut arg = OsString::from("dependency="); arg.push(rust_dep); p.arg("-L").arg(arg); } for native_dep in compilation.native_dirs.iter() { p.arg("-L").arg(native_dep); } for arg in test_args { p.arg("--test-args").arg(arg); } if config.shell().verbosity() == Verbosity::Quiet { p.arg("--test-args").arg("--quiet"); } p.args(args); if *unstable_opts { p.arg("-Zunstable-options"); } config .shell() .verbose(|shell| shell.status("Running", p.to_string()))?; if let Err(e) = p.exec() { let e = e.downcast::<ProcessError>()?; errors.push(e); if !options.no_fail_fast { return Ok((Test::Doc, errors)); } } } Ok((Test::Doc, errors)) } fn display_no_run_information( ws: &Workspace<'_>, test_args: &[&str], compilation: &Compilation<'_>, exec_type: &str, ) -> CargoResult<()> { let config = ws.config(); let cwd = config.cwd(); for UnitOutput { unit, path, script_meta, } in compilation.tests.iter() { let (exe_display, cmd) = cmd_builds( config, cwd, unit, path, script_meta, test_args, compilation, exec_type, )?; config .shell() .concise(|shell| shell.status("Executable", &exe_display))?; config .shell() .verbose(|shell| shell.status("Executable", &cmd))?; } return Ok(()); } fn cmd_builds( config: &Config, cwd: &Path, unit: &Unit, path: &PathBuf, script_meta: &Option<Metadata>, test_args: &[&str], compilation: &Compilation<'_>, exec_type: &str, ) -> CargoResult<(String, ProcessBuilder)> { let test_path = unit.target.src_path().path().unwrap(); let short_test_path = test_path .strip_prefix(unit.pkg.root()) .unwrap_or(test_path) .display(); let exe_display = match unit.target.kind() { TargetKind::Test | TargetKind::Bench => format!( "{} ({})", short_test_path, path.strip_prefix(cwd).unwrap_or(path).display() ), _ => format!( "{} {} ({})", exec_type, short_test_path, path.strip_prefix(cwd).unwrap_or(path).display() ), }; let mut cmd = compilation.target_process(path, unit.kind, &unit.pkg, *script_meta)?; cmd.args(test_args); if unit.target.harness() && config.shell().verbosity() == Verbosity::Quiet { cmd.arg("--quiet"); } Ok((exe_display, cmd)) }
0 => Ok(None),
random_line_split
cargo_test.rs
use crate::core::compiler::{Compilation, CompileKind, Doctest, Metadata, Unit, UnitOutput}; use crate::core::shell::Verbosity; use crate::core::{TargetKind, Workspace}; use crate::ops; use crate::util::errors::CargoResult; use crate::util::{add_path_args, CargoTestError, Config, Test}; use cargo_util::{ProcessBuilder, ProcessError}; use std::ffi::OsString; use std::path::{Path, PathBuf}; pub struct TestOptions { pub compile_opts: ops::CompileOptions, pub no_run: bool, pub no_fail_fast: bool, } pub fn run_tests( ws: &Workspace<'_>, options: &TestOptions, test_args: &[&str], ) -> CargoResult<Option<CargoTestError>> { let compilation = compile_tests(ws, options)?; if options.no_run { if !options.compile_opts.build_config.emit_json() { display_no_run_information(ws, test_args, &compilation, "unittests")?; } return Ok(None); } let (test, mut errors) = run_unit_tests(ws.config(), options, test_args, &compilation)?; // If we have an error and want to fail fast, then return. if !errors.is_empty() && !options.no_fail_fast { return Ok(Some(CargoTestError::new(test, errors))); } let (doctest, docerrors) = run_doc_tests(ws, options, test_args, &compilation)?; let test = if docerrors.is_empty() { test } else { doctest }; errors.extend(docerrors); if errors.is_empty() { Ok(None) } else { Ok(Some(CargoTestError::new(test, errors))) } } pub fn run_benches( ws: &Workspace<'_>, options: &TestOptions, args: &[&str], ) -> CargoResult<Option<CargoTestError>> { let compilation = compile_tests(ws, options)?; if options.no_run { if !options.compile_opts.build_config.emit_json() { display_no_run_information(ws, args, &compilation, "benches")?; } return Ok(None); } let mut args = args.to_vec(); args.push("--bench"); let (test, errors) = run_unit_tests(ws.config(), options, &args, &compilation)?; match errors.len() { 0 => Ok(None), _ => Ok(Some(CargoTestError::new(test, errors))), } } fn compile_tests<'a>(ws: &Workspace<'a>, options: &TestOptions) -> CargoResult<Compilation<'a>> { let mut compilation = ops::compile(ws, &options.compile_opts)?; compilation.tests.sort(); Ok(compilation) } /// Runs the unit and integration tests of a package. fn run_unit_tests( config: &Config, options: &TestOptions, test_args: &[&str], compilation: &Compilation<'_>, ) -> CargoResult<(Test, Vec<ProcessError>)> { let cwd = config.cwd(); let mut errors = Vec::new(); for UnitOutput { unit, path, script_meta, } in compilation.tests.iter() { let (exe_display, cmd) = cmd_builds( config, cwd, unit, path, script_meta, test_args, compilation, "unittests", )?; config .shell() .concise(|shell| shell.status("Running", &exe_display))?; config .shell() .verbose(|shell| shell.status("Running", &cmd))?; let result = cmd.exec(); if let Err(e) = result { let e = e.downcast::<ProcessError>()?; errors.push(( unit.target.kind().clone(), unit.target.name().to_string(), unit.pkg.name().to_string(), e, )); if !options.no_fail_fast { break; } } } if errors.len() == 1 { let (kind, name, pkg_name, e) = errors.pop().unwrap(); Ok(( Test::UnitTest { kind, name, pkg_name, }, vec![e], )) } else { Ok(( Test::Multiple, errors.into_iter().map(|(_, _, _, e)| e).collect(), )) } } fn
( ws: &Workspace<'_>, options: &TestOptions, test_args: &[&str], compilation: &Compilation<'_>, ) -> CargoResult<(Test, Vec<ProcessError>)> { let config = ws.config(); let mut errors = Vec::new(); let doctest_xcompile = config.cli_unstable().doctest_xcompile; let doctest_in_workspace = config.cli_unstable().doctest_in_workspace; for doctest_info in &compilation.to_doc_test { let Doctest { args, unstable_opts, unit, linker, script_meta, env, } = doctest_info; if !doctest_xcompile { match unit.kind { CompileKind::Host => {} CompileKind::Target(target) => { if target.short_name() != compilation.host { // Skip doctests, -Zdoctest-xcompile not enabled. config.shell().verbose(|shell| { shell.note(format!( "skipping doctests for {} ({}), \ cross-compilation doctests are not yet supported\n\ See https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#doctest-xcompile \ for more information.", unit.pkg, unit.target.description_named() )) })?; continue; } } } } config.shell().status("Doc-tests", unit.target.name())?; let mut p = compilation.rustdoc_process(unit, *script_meta)?; for (var, value) in env { p.env(var, value); } p.arg("--crate-name").arg(&unit.target.crate_name()); p.arg("--test"); if doctest_in_workspace { add_path_args(ws, unit, &mut p); // FIXME(swatinem): remove the `unstable-options` once rustdoc stabilizes the `test-run-directory` option p.arg("-Z").arg("unstable-options"); p.arg("--test-run-directory") .arg(unit.pkg.root().to_path_buf()); } else { p.arg(unit.target.src_path().path().unwrap()); } if let CompileKind::Target(target) = unit.kind { // use `rustc_target()` to properly handle JSON target paths p.arg("--target").arg(target.rustc_target()); } if doctest_xcompile { p.arg("-Zunstable-options"); p.arg("--enable-per-target-ignores"); if let Some((runtool, runtool_args)) = compilation.target_runner(unit.kind) { p.arg("--runtool").arg(runtool); for arg in runtool_args { p.arg("--runtool-arg").arg(arg); } } if let Some(linker) = linker { let mut joined = OsString::from("linker="); joined.push(linker); p.arg("-C").arg(joined); } } for &rust_dep in &[ &compilation.deps_output[&unit.kind], &compilation.deps_output[&CompileKind::Host], ] { let mut arg = OsString::from("dependency="); arg.push(rust_dep); p.arg("-L").arg(arg); } for native_dep in compilation.native_dirs.iter() { p.arg("-L").arg(native_dep); } for arg in test_args { p.arg("--test-args").arg(arg); } if config.shell().verbosity() == Verbosity::Quiet { p.arg("--test-args").arg("--quiet"); } p.args(args); if *unstable_opts { p.arg("-Zunstable-options"); } config .shell() .verbose(|shell| shell.status("Running", p.to_string()))?; if let Err(e) = p.exec() { let e = e.downcast::<ProcessError>()?; errors.push(e); if !options.no_fail_fast { return Ok((Test::Doc, errors)); } } } Ok((Test::Doc, errors)) } fn display_no_run_information( ws: &Workspace<'_>, test_args: &[&str], compilation: &Compilation<'_>, exec_type: &str, ) -> CargoResult<()> { let config = ws.config(); let cwd = config.cwd(); for UnitOutput { unit, path, script_meta, } in compilation.tests.iter() { let (exe_display, cmd) = cmd_builds( config, cwd, unit, path, script_meta, test_args, compilation, exec_type, )?; config .shell() .concise(|shell| shell.status("Executable", &exe_display))?; config .shell() .verbose(|shell| shell.status("Executable", &cmd))?; } return Ok(()); } fn cmd_builds( config: &Config, cwd: &Path, unit: &Unit, path: &PathBuf, script_meta: &Option<Metadata>, test_args: &[&str], compilation: &Compilation<'_>, exec_type: &str, ) -> CargoResult<(String, ProcessBuilder)> { let test_path = unit.target.src_path().path().unwrap(); let short_test_path = test_path .strip_prefix(unit.pkg.root()) .unwrap_or(test_path) .display(); let exe_display = match unit.target.kind() { TargetKind::Test | TargetKind::Bench => format!( "{} ({})", short_test_path, path.strip_prefix(cwd).unwrap_or(path).display() ), _ => format!( "{} {} ({})", exec_type, short_test_path, path.strip_prefix(cwd).unwrap_or(path).display() ), }; let mut cmd = compilation.target_process(path, unit.kind, &unit.pkg, *script_meta)?; cmd.args(test_args); if unit.target.harness() && config.shell().verbosity() == Verbosity::Quiet { cmd.arg("--quiet"); } Ok((exe_display, cmd)) }
run_doc_tests
identifier_name
lib.rs
/*! This crate allows you to easily write text. Usage: ```no_run # extern crate glium; # extern crate glium_text; # extern crate cgmath; # fn main() { # let display: glium::Display = unsafe { std::mem::uninitialized() }; // The `TextSystem` contains the shaders and elements used for text display. let system = glium_text::TextSystem::new(&display); // Creating a `FontTexture`, which a regular `Texture` which contains the font. // Note that loading the systems fonts is not covered by this library. let font = glium_text::FontTexture::new(&display, std::fs::File::open(&std::path::Path::new("my_font.ttf")).unwrap(), 24).unwrap(); // Creating a `TextDisplay` which contains the elements required to draw a specific sentence. let text = glium_text::TextDisplay::new(&system, &font, "Hello world!"); // Finally, drawing the text is done like this: let matrix = [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]; glium_text::draw(&text, &system, &mut display.draw(), matrix, (1.0, 1.0, 0.0, 1.0)); # } ``` */ #![warn(missing_docs)] extern crate libc; extern crate freetype_sys as freetype; #[macro_use] extern crate glium; use glium::DrawParameters; use glium::backend::Context; use glium::backend::Facade; use std::borrow::Cow; use std::default::Default; use std::io::Read; use std::ops::Deref; use std::rc::Rc; /// Texture which contains the characters of the font. pub struct FontTexture { texture: glium::texture::Texture2d, character_infos: Vec<(char, CharacterInfos)>, em_pixels: u32, } /// Object that contains the elements shared by all `TextDisplay` objects. /// /// Required to create a `TextDisplay`. pub struct TextSystem { context: Rc<Context>, program: glium::Program, } /// Object that will allow you to draw a text. pub struct TextDisplay<F> where F: Deref<Target=FontTexture> { context: Rc<Context>, texture: F, vertex_buffer: Option<glium::VertexBuffer<VertexFormat>>, index_buffer: Option<glium::IndexBuffer<u16>>, char_pos_x: Vec<f32>, is_empty: bool, } // structure containing informations about a character of a font #[derive(Copy, Clone, Debug)] struct CharacterInfos { // coordinates of the character top-left hand corner on the font's texture tex_coords: (f32, f32), // width and height of character in texture units tex_size: (f32, f32), // size of the character in EMs size: (f32, f32), // number of EMs between the bottom of the character and the base line of text height_over_line: f32, // number of EMs at the left of the character left_padding: f32, // number of EMs at the right of the character right_padding: f32, } struct TextureData { data: Vec<f32>, width: u32, height: u32, } impl<'a> glium::texture::Texture2dDataSource<'a> for &'a TextureData { type Data = f32; fn into_raw(self) -> glium::texture::RawImage2d<'a, f32> { glium::texture::RawImage2d { data: Cow::Borrowed(&self.data), width: self.width, height: self.height, format: glium::texture::ClientFormat::F32, } } } #[derive(Copy, Clone)] struct VertexFormat { position: [f32; 2], tex_coords: [f32; 2], } implement_vertex!(VertexFormat, position, tex_coords); impl FontTexture { /// Creates a new texture representing a font stored in a `FontTexture`. pub fn new<R, F>(facade: &F, font: R, font_size: u32) -> Result<FontTexture, ()> where R: Read, F: Facade { // building the freetype library // FIXME: call FT_Done_Library let library = unsafe { // taken from https://github.com/PistonDevelopers/freetype-rs/blob/master/src/library.rs extern "C" fn alloc_library(_memory: freetype::FT_Memory, size: libc::c_long) -> *mut libc::c_void { unsafe { libc::malloc(size as libc::size_t) } } extern "C" fn free_library(_memory: freetype::FT_Memory, block: *mut libc::c_void) { unsafe { libc::free(block) } } extern "C" fn realloc_library(_memory: freetype::FT_Memory, _cur_size: libc::c_long, new_size: libc::c_long, block: *mut libc::c_void) -> *mut libc::c_void { unsafe { libc::realloc(block, new_size as libc::size_t) } } static mut MEMORY: freetype::FT_MemoryRec = freetype::FT_MemoryRec { user: 0 as *mut libc::c_void, alloc: alloc_library, free: free_library, realloc: realloc_library, }; let mut raw = ::std::ptr::null_mut(); if freetype::FT_New_Library(&mut MEMORY, &mut raw) != freetype::FT_Err_Ok { return Err(()); } freetype::FT_Add_Default_Modules(raw); raw }; // building the freetype face object let font: Vec<u8> = font.bytes().map(|c| c.unwrap()).collect(); let face: freetype::FT_Face = unsafe { let mut face = ::std::ptr::null_mut(); let err = freetype::FT_New_Memory_Face(library, font.as_ptr(), font.len() as freetype::FT_Long, 0, &mut face); if err == freetype::FT_Err_Ok { face } else { return Err(()); } }; // computing the list of characters in the font let characters_list = unsafe { // TODO: unresolved symbol /*if freetype::FT_Select_CharMap(face, freetype::FT_ENCODING_UNICODE) != 0 { return Err(()); }*/ let mut result = Vec::new(); let mut g: freetype::FT_UInt = std::mem::uninitialized(); let mut c = freetype::FT_Get_First_Char(face, &mut g); while g != 0 { result.push(std::mem::transmute(c as u32)); // TODO: better solution? c = freetype::FT_Get_Next_Char(face, c, &mut g); } result }; // building the infos let (texture_data, chr_infos, em_pixels) = unsafe { build_font_image(face, characters_list, font_size) }; // we load the texture in the display let texture = glium::texture::Texture2d::new(facade, &texture_data).unwrap();
texture: texture, character_infos: chr_infos, em_pixels: em_pixels, }) } /// Return the size of an em-unit for the generated font texture. /// This is needed for a pixel-perfect display: the text geometry is scaled so that /// 1em == 1 unit. We must scale the geometry up by em_pixels to match the screen pixels. pub fn em_pixels(&self) -> u32 { self.em_pixels } } /*impl glium::uniforms::AsUniformValue for FontTexture { fn as_uniform_value(&self) -> glium::uniforms::UniformValue { glium::uniforms::AsUniformValue::as_uniform_value(&self.texture) } }*/ impl TextSystem { /// Builds a new text system that must be used to build `TextDisplay` objects. pub fn new<F>(facade: &F) -> TextSystem where F: Facade { TextSystem { context: facade.get_context().clone(), program: program!(facade, 140 => { vertex: " #version 140 uniform mat4 matrix; in vec2 position; in vec2 tex_coords; out vec2 v_tex_coords; void main() { gl_Position = matrix * vec4(position, 0.0, 1.0); v_tex_coords = tex_coords; } ", fragment: " #version 140 in vec2 v_tex_coords; out vec4 f_color; uniform vec4 color; uniform sampler2D tex; void main() { vec4 c = vec4(color.rgb, color.a * texture(tex, v_tex_coords)); if (c.a <= 0.01) { discard; } else { f_color = c; } } " }, 110 => { vertex: " #version 110 attribute vec2 position; attribute vec2 tex_coords; varying vec2 v_tex_coords; uniform mat4 matrix; void main() { gl_Position = matrix * vec4(position.x, position.y, 0.0, 1.0); v_tex_coords = tex_coords; } ", fragment: " #version 110 varying vec2 v_tex_coords; uniform vec4 color; uniform sampler2D tex; void main() { gl_FragColor = vec4(color.rgb, color.a * texture2D(tex, v_tex_coords)); if (gl_FragColor.a <= 0.01) { discard; } } " }, ).unwrap() } } } impl<F> TextDisplay<F> where F: Deref<Target=FontTexture> { /// Builds a new text display that allows you to draw text. pub fn new(system: &TextSystem, texture: F, text: &str) -> TextDisplay<F> { let mut text_display = TextDisplay { context: system.context.clone(), texture: texture, vertex_buffer: None, index_buffer: None, char_pos_x: vec![], is_empty: true, }; text_display.set_text(text); text_display } /// Return the x-positions (in em-units) of the breaks between characters. /// When a character starts at n-th byte, then get_char_pos_x()[n] is the x-pos of the character. /// The last value of the array is the x-pos of the end of the string pub fn get_char_pos_x(&self) -> &[f32] { &self.char_pos_x } /// Modifies the text on this display. pub fn set_text(&mut self, text: &str) { self.is_empty = true; self.char_pos_x = vec![0.]; self.vertex_buffer = None; self.index_buffer = None; // returning if no text if text.len() == 0 { return; } // these arrays will contain the vertex buffer and index buffer data let mut vertex_buffer_data = Vec::with_capacity(text.len() * 4 * 4); let mut index_buffer_data = Vec::with_capacity(text.len() * 6); // iterating over the characters of the string let mut pos_x = 0.; for character in text.chars() { // FIXME: wrong, but only thing stable let infos = match self.texture.character_infos .iter().find(|&&(chr, _)| chr == character) { Some(infos) => infos, None => continue // character not found in the font, ignoring it }; let infos = infos.1; self.is_empty = false; // adding the quad in the index buffer { let first_vertex_offset = vertex_buffer_data.len() as u16; index_buffer_data.push(first_vertex_offset); index_buffer_data.push(first_vertex_offset + 1); index_buffer_data.push(first_vertex_offset + 2); index_buffer_data.push(first_vertex_offset + 2); index_buffer_data.push(first_vertex_offset + 1); index_buffer_data.push(first_vertex_offset + 3); } // pos_x += infos.left_padding; // calculating coords let left_coord = pos_x; let right_coord = left_coord + infos.size.0; let top_coord = infos.height_over_line; let bottom_coord = infos.height_over_line - infos.size.1; // top-left vertex vertex_buffer_data.push(VertexFormat { position: [left_coord, top_coord], tex_coords: [infos.tex_coords.0, infos.tex_coords.1], }); // top-right vertex vertex_buffer_data.push(VertexFormat { position: [right_coord, top_coord], tex_coords: [infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1], }); // bottom-left vertex vertex_buffer_data.push(VertexFormat { position: [left_coord, bottom_coord], tex_coords: [infos.tex_coords.0, infos.tex_coords.1 + infos.tex_size.1], }); // bottom-right vertex vertex_buffer_data.push(VertexFormat { position: [right_coord, bottom_coord], tex_coords: [ infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1 + infos.tex_size.1 ], }); // going to next char pos_x = right_coord + infos.right_padding; for _ in 0..character.len_utf8() { self.char_pos_x.push(pos_x); } } if !vertex_buffer_data.len() != 0 { // building the vertex buffer self.vertex_buffer = Some(glium::VertexBuffer::new(&self.context, &vertex_buffer_data).unwrap()); // building the index buffer self.index_buffer = Some(glium::IndexBuffer::new(&self.context, glium::index::PrimitiveType::TrianglesList, &index_buffer_data).unwrap()); } } } /// /// ## About the matrix /// /// The matrix must be column-major post-muliplying (which is the usual way to do in OpenGL). /// /// One unit in height corresponds to a line of text, but the text can go above or under. /// The bottom of the line is at `0.0`, the top is at `1.0`. /// You need to adapt your matrix by taking these into consideration. pub fn draw<F, S: ?Sized, M>(text: &TextDisplay<F>, system: &TextSystem, target: &mut S, matrix: M, color: (f32, f32, f32, f32)) where S: glium::Surface, M: Into<[[f32; 4]; 4]>, F: Deref<Target=FontTexture> { let matrix = matrix.into(); let &TextDisplay { ref vertex_buffer, ref index_buffer, ref texture, is_empty, .. } = text; let color = [color.0, color.1, color.2, color.3]; // returning if nothing to draw if is_empty || vertex_buffer.is_none() || index_buffer.is_none() { return; } let vertex_buffer = vertex_buffer.as_ref().unwrap(); let index_buffer = index_buffer.as_ref().unwrap(); let uniforms = uniform! { matrix: matrix, color: color, tex: glium::uniforms::Sampler(&texture.texture, glium::uniforms::SamplerBehavior { magnify_filter: glium::uniforms::MagnifySamplerFilter::Linear, minify_filter: glium::uniforms::MinifySamplerFilter::Linear, .. Default::default() }) }; let params = { use glium::BlendingFunction::Addition; use glium::LinearBlendingFactor::*; let blending_function = Addition { source: SourceAlpha, destination: OneMinusSourceAlpha }; let blend = glium::Blend { color: blending_function, alpha: blending_function, constant_value: (1.0, 1.0, 1.0, 1.0), }; DrawParameters { blend: blend, .. Default::default() } }; target.draw(vertex_buffer, index_buffer, &system.program, &uniforms, &params).unwrap(); } unsafe fn build_font_image(face: freetype::FT_Face, characters_list: Vec<char>, font_size: u32) -> (TextureData, Vec<(char, CharacterInfos)>, u32) { use std::iter; // a margin around each character to prevent artifacts const MARGIN: u32 = 2; // setting the right pixel size if freetype::FT_Set_Pixel_Sizes(face, font_size, font_size) != 0 { panic!(); } // this variable will store the texture data // we set an arbitrary capacity that we think will match what we will need let mut texture_data: Vec<f32> = Vec::with_capacity(characters_list.len() * font_size as usize * font_size as usize); // the width is chosen more or less arbitrarily, because we can store everything as long as // the texture is at least as wide as the widest character // we just try to estimate a width so that width ~= height let texture_width = get_nearest_po2(std::cmp::max(font_size * 2 as u32, ((((characters_list.len() as u32) * font_size * font_size) as f32).sqrt()) as u32)); // we store the position of the "cursor" in the destination texture // this cursor points to the top-left pixel of the next character to write on the texture let mut cursor_offset = (0u32, 0u32); // number of rows to skip at next carriage return let mut rows_to_skip = 0u32; // now looping through the list of characters, filling the texture and returning the informations let mut em_pixels = font_size; let mut characters_infos: Vec<(char, CharacterInfos)> = characters_list.into_iter().filter_map(|character| { // loading wanted glyph in the font face if freetype::FT_Load_Glyph(face, freetype::FT_Get_Char_Index(face, character as freetype::FT_ULong), freetype::FT_LOAD_RENDER) != 0 { return None; } let bitmap = &(*(*face).glyph).bitmap; // adding a left margin before our character to prevent artifacts cursor_offset.0 += MARGIN; // computing em_pixels // FIXME: this is hacky if character == 'M' { // println!("M [{}x{}] bitmap: {:?}", bitmap.width, bitmap.rows, std::slice::from_raw_parts(bitmap.buffer, (bitmap.rows * bitmap.width) as usize)); em_pixels = bitmap.rows as u32; } // carriage return our cursor if we don't have enough room to write the next caracter // we add a margin to prevent artifacts if cursor_offset.0 + (bitmap.width as u32) + MARGIN >= texture_width { assert!(bitmap.width as u32 <= texture_width); // if this fails, we should increase texture_width cursor_offset.0 = 0; cursor_offset.1 += rows_to_skip; rows_to_skip = 0; } // if the texture data buffer has not enough lines, adding some if rows_to_skip < MARGIN + bitmap.rows as u32 { let diff = MARGIN + (bitmap.rows as u32) - rows_to_skip; rows_to_skip = MARGIN + bitmap.rows as u32; texture_data.extend(iter::repeat(0.0).take((diff * texture_width) as usize)); } // copying the data to the texture let offset_x_before_copy = cursor_offset.0; if bitmap.rows >= 1 { let destination = &mut texture_data[(cursor_offset.0 + cursor_offset.1 * texture_width) as usize ..]; let source = std::mem::transmute(bitmap.buffer); let source = std::slice::from_raw_parts(source, destination.len()); for y in 0 .. bitmap.rows as u32 { let source = &source[(y * bitmap.width as u32) as usize ..]; let destination = &mut destination[(y * texture_width) as usize ..]; for x in 0 .. bitmap.width { // the values in source are bytes between 0 and 255, but we want floats between 0 and 1 let val: u8 = *source.get(x as usize).unwrap(); let val = (val as f32) / (std::u8::MAX as f32); let dest = destination.get_mut(x as usize).unwrap(); *dest = val; } } cursor_offset.0 += bitmap.width as u32; debug_assert!(cursor_offset.0 <= texture_width); } // filling infos about that character // tex_size and tex_coords are in pixels for the moment ; they will be divided // by the texture dimensions later let left_padding = (*(*face).glyph).bitmap_left; Some((character, CharacterInfos { tex_size: (bitmap.width as f32, bitmap.rows as f32), tex_coords: (offset_x_before_copy as f32, cursor_offset.1 as f32), size: (bitmap.width as f32, bitmap.rows as f32), left_padding: left_padding as f32, right_padding: ((*(*face).glyph).advance.x as i32 - bitmap.width * 64 - left_padding * 64) as f32 / 64.0, height_over_line: (*(*face).glyph).bitmap_top as f32, })) }).collect(); // adding blank lines at the end until the height of the texture is a power of two { let current_height = texture_data.len() as u32 / texture_width; let requested_height = get_nearest_po2(current_height); texture_data.extend(iter::repeat(0.0).take((texture_width * (requested_height - current_height)) as usize)); } // now our texture is finished // we know its final dimensions, so we can divide all the pixels values into (0,1) range assert!((texture_data.len() as u32 % texture_width) == 0); let texture_height = (texture_data.len() as u32 / texture_width) as f32; let float_texture_width = texture_width as f32; for chr in characters_infos.iter_mut() { chr.1.tex_size.0 /= float_texture_width; chr.1.tex_size.1 /= texture_height; chr.1.tex_coords.0 /= float_texture_width; chr.1.tex_coords.1 /= texture_height; chr.1.size.0 /= em_pixels as f32; chr.1.size.1 /= em_pixels as f32; chr.1.left_padding /= em_pixels as f32; chr.1.right_padding /= em_pixels as f32; chr.1.height_over_line /= em_pixels as f32; } // returning (TextureData { data: texture_data, width: texture_width, height: texture_height as u32, }, characters_infos, em_pixels) } /// Function that will calculate the nearest power of two. fn get_nearest_po2(mut x: u32) -> u32 { assert!(x > 0); x -= 1; x = x | (x >> 1); x = x | (x >> 2); x = x | (x >> 4); x = x | (x >> 8); x = x | (x >> 16); x + 1 }
Ok(FontTexture {
random_line_split
lib.rs
/*! This crate allows you to easily write text. Usage: ```no_run # extern crate glium; # extern crate glium_text; # extern crate cgmath; # fn main() { # let display: glium::Display = unsafe { std::mem::uninitialized() }; // The `TextSystem` contains the shaders and elements used for text display. let system = glium_text::TextSystem::new(&display); // Creating a `FontTexture`, which a regular `Texture` which contains the font. // Note that loading the systems fonts is not covered by this library. let font = glium_text::FontTexture::new(&display, std::fs::File::open(&std::path::Path::new("my_font.ttf")).unwrap(), 24).unwrap(); // Creating a `TextDisplay` which contains the elements required to draw a specific sentence. let text = glium_text::TextDisplay::new(&system, &font, "Hello world!"); // Finally, drawing the text is done like this: let matrix = [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]; glium_text::draw(&text, &system, &mut display.draw(), matrix, (1.0, 1.0, 0.0, 1.0)); # } ``` */ #![warn(missing_docs)] extern crate libc; extern crate freetype_sys as freetype; #[macro_use] extern crate glium; use glium::DrawParameters; use glium::backend::Context; use glium::backend::Facade; use std::borrow::Cow; use std::default::Default; use std::io::Read; use std::ops::Deref; use std::rc::Rc; /// Texture which contains the characters of the font. pub struct FontTexture { texture: glium::texture::Texture2d, character_infos: Vec<(char, CharacterInfos)>, em_pixels: u32, } /// Object that contains the elements shared by all `TextDisplay` objects. /// /// Required to create a `TextDisplay`. pub struct TextSystem { context: Rc<Context>, program: glium::Program, } /// Object that will allow you to draw a text. pub struct TextDisplay<F> where F: Deref<Target=FontTexture> { context: Rc<Context>, texture: F, vertex_buffer: Option<glium::VertexBuffer<VertexFormat>>, index_buffer: Option<glium::IndexBuffer<u16>>, char_pos_x: Vec<f32>, is_empty: bool, } // structure containing informations about a character of a font #[derive(Copy, Clone, Debug)] struct CharacterInfos { // coordinates of the character top-left hand corner on the font's texture tex_coords: (f32, f32), // width and height of character in texture units tex_size: (f32, f32), // size of the character in EMs size: (f32, f32), // number of EMs between the bottom of the character and the base line of text height_over_line: f32, // number of EMs at the left of the character left_padding: f32, // number of EMs at the right of the character right_padding: f32, } struct TextureData { data: Vec<f32>, width: u32, height: u32, } impl<'a> glium::texture::Texture2dDataSource<'a> for &'a TextureData { type Data = f32; fn into_raw(self) -> glium::texture::RawImage2d<'a, f32> { glium::texture::RawImage2d { data: Cow::Borrowed(&self.data), width: self.width, height: self.height, format: glium::texture::ClientFormat::F32, } } } #[derive(Copy, Clone)] struct VertexFormat { position: [f32; 2], tex_coords: [f32; 2], } implement_vertex!(VertexFormat, position, tex_coords); impl FontTexture { /// Creates a new texture representing a font stored in a `FontTexture`. pub fn new<R, F>(facade: &F, font: R, font_size: u32) -> Result<FontTexture, ()> where R: Read, F: Facade { // building the freetype library // FIXME: call FT_Done_Library let library = unsafe { // taken from https://github.com/PistonDevelopers/freetype-rs/blob/master/src/library.rs extern "C" fn alloc_library(_memory: freetype::FT_Memory, size: libc::c_long) -> *mut libc::c_void { unsafe { libc::malloc(size as libc::size_t) } } extern "C" fn free_library(_memory: freetype::FT_Memory, block: *mut libc::c_void) { unsafe { libc::free(block) } } extern "C" fn realloc_library(_memory: freetype::FT_Memory, _cur_size: libc::c_long, new_size: libc::c_long, block: *mut libc::c_void) -> *mut libc::c_void { unsafe { libc::realloc(block, new_size as libc::size_t) } } static mut MEMORY: freetype::FT_MemoryRec = freetype::FT_MemoryRec { user: 0 as *mut libc::c_void, alloc: alloc_library, free: free_library, realloc: realloc_library, }; let mut raw = ::std::ptr::null_mut(); if freetype::FT_New_Library(&mut MEMORY, &mut raw) != freetype::FT_Err_Ok { return Err(()); } freetype::FT_Add_Default_Modules(raw); raw }; // building the freetype face object let font: Vec<u8> = font.bytes().map(|c| c.unwrap()).collect(); let face: freetype::FT_Face = unsafe { let mut face = ::std::ptr::null_mut(); let err = freetype::FT_New_Memory_Face(library, font.as_ptr(), font.len() as freetype::FT_Long, 0, &mut face); if err == freetype::FT_Err_Ok { face } else { return Err(()); } }; // computing the list of characters in the font let characters_list = unsafe { // TODO: unresolved symbol /*if freetype::FT_Select_CharMap(face, freetype::FT_ENCODING_UNICODE) != 0 { return Err(()); }*/ let mut result = Vec::new(); let mut g: freetype::FT_UInt = std::mem::uninitialized(); let mut c = freetype::FT_Get_First_Char(face, &mut g); while g != 0 { result.push(std::mem::transmute(c as u32)); // TODO: better solution? c = freetype::FT_Get_Next_Char(face, c, &mut g); } result }; // building the infos let (texture_data, chr_infos, em_pixels) = unsafe { build_font_image(face, characters_list, font_size) }; // we load the texture in the display let texture = glium::texture::Texture2d::new(facade, &texture_data).unwrap(); Ok(FontTexture { texture: texture, character_infos: chr_infos, em_pixels: em_pixels, }) } /// Return the size of an em-unit for the generated font texture. /// This is needed for a pixel-perfect display: the text geometry is scaled so that /// 1em == 1 unit. We must scale the geometry up by em_pixels to match the screen pixels. pub fn em_pixels(&self) -> u32 { self.em_pixels } } /*impl glium::uniforms::AsUniformValue for FontTexture { fn as_uniform_value(&self) -> glium::uniforms::UniformValue { glium::uniforms::AsUniformValue::as_uniform_value(&self.texture) } }*/ impl TextSystem { /// Builds a new text system that must be used to build `TextDisplay` objects. pub fn new<F>(facade: &F) -> TextSystem where F: Facade { TextSystem { context: facade.get_context().clone(), program: program!(facade, 140 => { vertex: " #version 140 uniform mat4 matrix; in vec2 position; in vec2 tex_coords; out vec2 v_tex_coords; void main() { gl_Position = matrix * vec4(position, 0.0, 1.0); v_tex_coords = tex_coords; } ", fragment: " #version 140 in vec2 v_tex_coords; out vec4 f_color; uniform vec4 color; uniform sampler2D tex; void main() { vec4 c = vec4(color.rgb, color.a * texture(tex, v_tex_coords)); if (c.a <= 0.01) { discard; } else { f_color = c; } } " }, 110 => { vertex: " #version 110 attribute vec2 position; attribute vec2 tex_coords; varying vec2 v_tex_coords; uniform mat4 matrix; void main() { gl_Position = matrix * vec4(position.x, position.y, 0.0, 1.0); v_tex_coords = tex_coords; } ", fragment: " #version 110 varying vec2 v_tex_coords; uniform vec4 color; uniform sampler2D tex; void main() { gl_FragColor = vec4(color.rgb, color.a * texture2D(tex, v_tex_coords)); if (gl_FragColor.a <= 0.01) { discard; } } " }, ).unwrap() } } } impl<F> TextDisplay<F> where F: Deref<Target=FontTexture> { /// Builds a new text display that allows you to draw text. pub fn new(system: &TextSystem, texture: F, text: &str) -> TextDisplay<F>
/// Return the x-positions (in em-units) of the breaks between characters. /// When a character starts at n-th byte, then get_char_pos_x()[n] is the x-pos of the character. /// The last value of the array is the x-pos of the end of the string pub fn get_char_pos_x(&self) -> &[f32] { &self.char_pos_x } /// Modifies the text on this display. pub fn set_text(&mut self, text: &str) { self.is_empty = true; self.char_pos_x = vec![0.]; self.vertex_buffer = None; self.index_buffer = None; // returning if no text if text.len() == 0 { return; } // these arrays will contain the vertex buffer and index buffer data let mut vertex_buffer_data = Vec::with_capacity(text.len() * 4 * 4); let mut index_buffer_data = Vec::with_capacity(text.len() * 6); // iterating over the characters of the string let mut pos_x = 0.; for character in text.chars() { // FIXME: wrong, but only thing stable let infos = match self.texture.character_infos .iter().find(|&&(chr, _)| chr == character) { Some(infos) => infos, None => continue // character not found in the font, ignoring it }; let infos = infos.1; self.is_empty = false; // adding the quad in the index buffer { let first_vertex_offset = vertex_buffer_data.len() as u16; index_buffer_data.push(first_vertex_offset); index_buffer_data.push(first_vertex_offset + 1); index_buffer_data.push(first_vertex_offset + 2); index_buffer_data.push(first_vertex_offset + 2); index_buffer_data.push(first_vertex_offset + 1); index_buffer_data.push(first_vertex_offset + 3); } // pos_x += infos.left_padding; // calculating coords let left_coord = pos_x; let right_coord = left_coord + infos.size.0; let top_coord = infos.height_over_line; let bottom_coord = infos.height_over_line - infos.size.1; // top-left vertex vertex_buffer_data.push(VertexFormat { position: [left_coord, top_coord], tex_coords: [infos.tex_coords.0, infos.tex_coords.1], }); // top-right vertex vertex_buffer_data.push(VertexFormat { position: [right_coord, top_coord], tex_coords: [infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1], }); // bottom-left vertex vertex_buffer_data.push(VertexFormat { position: [left_coord, bottom_coord], tex_coords: [infos.tex_coords.0, infos.tex_coords.1 + infos.tex_size.1], }); // bottom-right vertex vertex_buffer_data.push(VertexFormat { position: [right_coord, bottom_coord], tex_coords: [ infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1 + infos.tex_size.1 ], }); // going to next char pos_x = right_coord + infos.right_padding; for _ in 0..character.len_utf8() { self.char_pos_x.push(pos_x); } } if !vertex_buffer_data.len() != 0 { // building the vertex buffer self.vertex_buffer = Some(glium::VertexBuffer::new(&self.context, &vertex_buffer_data).unwrap()); // building the index buffer self.index_buffer = Some(glium::IndexBuffer::new(&self.context, glium::index::PrimitiveType::TrianglesList, &index_buffer_data).unwrap()); } } } /// /// ## About the matrix /// /// The matrix must be column-major post-muliplying (which is the usual way to do in OpenGL). /// /// One unit in height corresponds to a line of text, but the text can go above or under. /// The bottom of the line is at `0.0`, the top is at `1.0`. /// You need to adapt your matrix by taking these into consideration. pub fn draw<F, S: ?Sized, M>(text: &TextDisplay<F>, system: &TextSystem, target: &mut S, matrix: M, color: (f32, f32, f32, f32)) where S: glium::Surface, M: Into<[[f32; 4]; 4]>, F: Deref<Target=FontTexture> { let matrix = matrix.into(); let &TextDisplay { ref vertex_buffer, ref index_buffer, ref texture, is_empty, .. } = text; let color = [color.0, color.1, color.2, color.3]; // returning if nothing to draw if is_empty || vertex_buffer.is_none() || index_buffer.is_none() { return; } let vertex_buffer = vertex_buffer.as_ref().unwrap(); let index_buffer = index_buffer.as_ref().unwrap(); let uniforms = uniform! { matrix: matrix, color: color, tex: glium::uniforms::Sampler(&texture.texture, glium::uniforms::SamplerBehavior { magnify_filter: glium::uniforms::MagnifySamplerFilter::Linear, minify_filter: glium::uniforms::MinifySamplerFilter::Linear, .. Default::default() }) }; let params = { use glium::BlendingFunction::Addition; use glium::LinearBlendingFactor::*; let blending_function = Addition { source: SourceAlpha, destination: OneMinusSourceAlpha }; let blend = glium::Blend { color: blending_function, alpha: blending_function, constant_value: (1.0, 1.0, 1.0, 1.0), }; DrawParameters { blend: blend, .. Default::default() } }; target.draw(vertex_buffer, index_buffer, &system.program, &uniforms, &params).unwrap(); } unsafe fn build_font_image(face: freetype::FT_Face, characters_list: Vec<char>, font_size: u32) -> (TextureData, Vec<(char, CharacterInfos)>, u32) { use std::iter; // a margin around each character to prevent artifacts const MARGIN: u32 = 2; // setting the right pixel size if freetype::FT_Set_Pixel_Sizes(face, font_size, font_size) != 0 { panic!(); } // this variable will store the texture data // we set an arbitrary capacity that we think will match what we will need let mut texture_data: Vec<f32> = Vec::with_capacity(characters_list.len() * font_size as usize * font_size as usize); // the width is chosen more or less arbitrarily, because we can store everything as long as // the texture is at least as wide as the widest character // we just try to estimate a width so that width ~= height let texture_width = get_nearest_po2(std::cmp::max(font_size * 2 as u32, ((((characters_list.len() as u32) * font_size * font_size) as f32).sqrt()) as u32)); // we store the position of the "cursor" in the destination texture // this cursor points to the top-left pixel of the next character to write on the texture let mut cursor_offset = (0u32, 0u32); // number of rows to skip at next carriage return let mut rows_to_skip = 0u32; // now looping through the list of characters, filling the texture and returning the informations let mut em_pixels = font_size; let mut characters_infos: Vec<(char, CharacterInfos)> = characters_list.into_iter().filter_map(|character| { // loading wanted glyph in the font face if freetype::FT_Load_Glyph(face, freetype::FT_Get_Char_Index(face, character as freetype::FT_ULong), freetype::FT_LOAD_RENDER) != 0 { return None; } let bitmap = &(*(*face).glyph).bitmap; // adding a left margin before our character to prevent artifacts cursor_offset.0 += MARGIN; // computing em_pixels // FIXME: this is hacky if character == 'M' { // println!("M [{}x{}] bitmap: {:?}", bitmap.width, bitmap.rows, std::slice::from_raw_parts(bitmap.buffer, (bitmap.rows * bitmap.width) as usize)); em_pixels = bitmap.rows as u32; } // carriage return our cursor if we don't have enough room to write the next caracter // we add a margin to prevent artifacts if cursor_offset.0 + (bitmap.width as u32) + MARGIN >= texture_width { assert!(bitmap.width as u32 <= texture_width); // if this fails, we should increase texture_width cursor_offset.0 = 0; cursor_offset.1 += rows_to_skip; rows_to_skip = 0; } // if the texture data buffer has not enough lines, adding some if rows_to_skip < MARGIN + bitmap.rows as u32 { let diff = MARGIN + (bitmap.rows as u32) - rows_to_skip; rows_to_skip = MARGIN + bitmap.rows as u32; texture_data.extend(iter::repeat(0.0).take((diff * texture_width) as usize)); } // copying the data to the texture let offset_x_before_copy = cursor_offset.0; if bitmap.rows >= 1 { let destination = &mut texture_data[(cursor_offset.0 + cursor_offset.1 * texture_width) as usize ..]; let source = std::mem::transmute(bitmap.buffer); let source = std::slice::from_raw_parts(source, destination.len()); for y in 0 .. bitmap.rows as u32 { let source = &source[(y * bitmap.width as u32) as usize ..]; let destination = &mut destination[(y * texture_width) as usize ..]; for x in 0 .. bitmap.width { // the values in source are bytes between 0 and 255, but we want floats between 0 and 1 let val: u8 = *source.get(x as usize).unwrap(); let val = (val as f32) / (std::u8::MAX as f32); let dest = destination.get_mut(x as usize).unwrap(); *dest = val; } } cursor_offset.0 += bitmap.width as u32; debug_assert!(cursor_offset.0 <= texture_width); } // filling infos about that character // tex_size and tex_coords are in pixels for the moment ; they will be divided // by the texture dimensions later let left_padding = (*(*face).glyph).bitmap_left; Some((character, CharacterInfos { tex_size: (bitmap.width as f32, bitmap.rows as f32), tex_coords: (offset_x_before_copy as f32, cursor_offset.1 as f32), size: (bitmap.width as f32, bitmap.rows as f32), left_padding: left_padding as f32, right_padding: ((*(*face).glyph).advance.x as i32 - bitmap.width * 64 - left_padding * 64) as f32 / 64.0, height_over_line: (*(*face).glyph).bitmap_top as f32, })) }).collect(); // adding blank lines at the end until the height of the texture is a power of two { let current_height = texture_data.len() as u32 / texture_width; let requested_height = get_nearest_po2(current_height); texture_data.extend(iter::repeat(0.0).take((texture_width * (requested_height - current_height)) as usize)); } // now our texture is finished // we know its final dimensions, so we can divide all the pixels values into (0,1) range assert!((texture_data.len() as u32 % texture_width) == 0); let texture_height = (texture_data.len() as u32 / texture_width) as f32; let float_texture_width = texture_width as f32; for chr in characters_infos.iter_mut() { chr.1.tex_size.0 /= float_texture_width; chr.1.tex_size.1 /= texture_height; chr.1.tex_coords.0 /= float_texture_width; chr.1.tex_coords.1 /= texture_height; chr.1.size.0 /= em_pixels as f32; chr.1.size.1 /= em_pixels as f32; chr.1.left_padding /= em_pixels as f32; chr.1.right_padding /= em_pixels as f32; chr.1.height_over_line /= em_pixels as f32; } // returning (TextureData { data: texture_data, width: texture_width, height: texture_height as u32, }, characters_infos, em_pixels) } /// Function that will calculate the nearest power of two. fn get_nearest_po2(mut x: u32) -> u32 { assert!(x > 0); x -= 1; x = x | (x >> 1); x = x | (x >> 2); x = x | (x >> 4); x = x | (x >> 8); x = x | (x >> 16); x + 1 }
{ let mut text_display = TextDisplay { context: system.context.clone(), texture: texture, vertex_buffer: None, index_buffer: None, char_pos_x: vec![], is_empty: true, }; text_display.set_text(text); text_display }
identifier_body
lib.rs
/*! This crate allows you to easily write text. Usage: ```no_run # extern crate glium; # extern crate glium_text; # extern crate cgmath; # fn main() { # let display: glium::Display = unsafe { std::mem::uninitialized() }; // The `TextSystem` contains the shaders and elements used for text display. let system = glium_text::TextSystem::new(&display); // Creating a `FontTexture`, which a regular `Texture` which contains the font. // Note that loading the systems fonts is not covered by this library. let font = glium_text::FontTexture::new(&display, std::fs::File::open(&std::path::Path::new("my_font.ttf")).unwrap(), 24).unwrap(); // Creating a `TextDisplay` which contains the elements required to draw a specific sentence. let text = glium_text::TextDisplay::new(&system, &font, "Hello world!"); // Finally, drawing the text is done like this: let matrix = [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]; glium_text::draw(&text, &system, &mut display.draw(), matrix, (1.0, 1.0, 0.0, 1.0)); # } ``` */ #![warn(missing_docs)] extern crate libc; extern crate freetype_sys as freetype; #[macro_use] extern crate glium; use glium::DrawParameters; use glium::backend::Context; use glium::backend::Facade; use std::borrow::Cow; use std::default::Default; use std::io::Read; use std::ops::Deref; use std::rc::Rc; /// Texture which contains the characters of the font. pub struct FontTexture { texture: glium::texture::Texture2d, character_infos: Vec<(char, CharacterInfos)>, em_pixels: u32, } /// Object that contains the elements shared by all `TextDisplay` objects. /// /// Required to create a `TextDisplay`. pub struct TextSystem { context: Rc<Context>, program: glium::Program, } /// Object that will allow you to draw a text. pub struct TextDisplay<F> where F: Deref<Target=FontTexture> { context: Rc<Context>, texture: F, vertex_buffer: Option<glium::VertexBuffer<VertexFormat>>, index_buffer: Option<glium::IndexBuffer<u16>>, char_pos_x: Vec<f32>, is_empty: bool, } // structure containing informations about a character of a font #[derive(Copy, Clone, Debug)] struct CharacterInfos { // coordinates of the character top-left hand corner on the font's texture tex_coords: (f32, f32), // width and height of character in texture units tex_size: (f32, f32), // size of the character in EMs size: (f32, f32), // number of EMs between the bottom of the character and the base line of text height_over_line: f32, // number of EMs at the left of the character left_padding: f32, // number of EMs at the right of the character right_padding: f32, } struct TextureData { data: Vec<f32>, width: u32, height: u32, } impl<'a> glium::texture::Texture2dDataSource<'a> for &'a TextureData { type Data = f32; fn into_raw(self) -> glium::texture::RawImage2d<'a, f32> { glium::texture::RawImage2d { data: Cow::Borrowed(&self.data), width: self.width, height: self.height, format: glium::texture::ClientFormat::F32, } } } #[derive(Copy, Clone)] struct VertexFormat { position: [f32; 2], tex_coords: [f32; 2], } implement_vertex!(VertexFormat, position, tex_coords); impl FontTexture { /// Creates a new texture representing a font stored in a `FontTexture`. pub fn new<R, F>(facade: &F, font: R, font_size: u32) -> Result<FontTexture, ()> where R: Read, F: Facade { // building the freetype library // FIXME: call FT_Done_Library let library = unsafe { // taken from https://github.com/PistonDevelopers/freetype-rs/blob/master/src/library.rs extern "C" fn alloc_library(_memory: freetype::FT_Memory, size: libc::c_long) -> *mut libc::c_void { unsafe { libc::malloc(size as libc::size_t) } } extern "C" fn free_library(_memory: freetype::FT_Memory, block: *mut libc::c_void) { unsafe { libc::free(block) } } extern "C" fn realloc_library(_memory: freetype::FT_Memory, _cur_size: libc::c_long, new_size: libc::c_long, block: *mut libc::c_void) -> *mut libc::c_void { unsafe { libc::realloc(block, new_size as libc::size_t) } } static mut MEMORY: freetype::FT_MemoryRec = freetype::FT_MemoryRec { user: 0 as *mut libc::c_void, alloc: alloc_library, free: free_library, realloc: realloc_library, }; let mut raw = ::std::ptr::null_mut(); if freetype::FT_New_Library(&mut MEMORY, &mut raw) != freetype::FT_Err_Ok { return Err(()); } freetype::FT_Add_Default_Modules(raw); raw }; // building the freetype face object let font: Vec<u8> = font.bytes().map(|c| c.unwrap()).collect(); let face: freetype::FT_Face = unsafe { let mut face = ::std::ptr::null_mut(); let err = freetype::FT_New_Memory_Face(library, font.as_ptr(), font.len() as freetype::FT_Long, 0, &mut face); if err == freetype::FT_Err_Ok { face } else { return Err(()); } }; // computing the list of characters in the font let characters_list = unsafe { // TODO: unresolved symbol /*if freetype::FT_Select_CharMap(face, freetype::FT_ENCODING_UNICODE) != 0 { return Err(()); }*/ let mut result = Vec::new(); let mut g: freetype::FT_UInt = std::mem::uninitialized(); let mut c = freetype::FT_Get_First_Char(face, &mut g); while g != 0 { result.push(std::mem::transmute(c as u32)); // TODO: better solution? c = freetype::FT_Get_Next_Char(face, c, &mut g); } result }; // building the infos let (texture_data, chr_infos, em_pixels) = unsafe { build_font_image(face, characters_list, font_size) }; // we load the texture in the display let texture = glium::texture::Texture2d::new(facade, &texture_data).unwrap(); Ok(FontTexture { texture: texture, character_infos: chr_infos, em_pixels: em_pixels, }) } /// Return the size of an em-unit for the generated font texture. /// This is needed for a pixel-perfect display: the text geometry is scaled so that /// 1em == 1 unit. We must scale the geometry up by em_pixels to match the screen pixels. pub fn em_pixels(&self) -> u32 { self.em_pixels } } /*impl glium::uniforms::AsUniformValue for FontTexture { fn as_uniform_value(&self) -> glium::uniforms::UniformValue { glium::uniforms::AsUniformValue::as_uniform_value(&self.texture) } }*/ impl TextSystem { /// Builds a new text system that must be used to build `TextDisplay` objects. pub fn new<F>(facade: &F) -> TextSystem where F: Facade { TextSystem { context: facade.get_context().clone(), program: program!(facade, 140 => { vertex: " #version 140 uniform mat4 matrix; in vec2 position; in vec2 tex_coords; out vec2 v_tex_coords; void main() { gl_Position = matrix * vec4(position, 0.0, 1.0); v_tex_coords = tex_coords; } ", fragment: " #version 140 in vec2 v_tex_coords; out vec4 f_color; uniform vec4 color; uniform sampler2D tex; void main() { vec4 c = vec4(color.rgb, color.a * texture(tex, v_tex_coords)); if (c.a <= 0.01) { discard; } else { f_color = c; } } " }, 110 => { vertex: " #version 110 attribute vec2 position; attribute vec2 tex_coords; varying vec2 v_tex_coords; uniform mat4 matrix; void main() { gl_Position = matrix * vec4(position.x, position.y, 0.0, 1.0); v_tex_coords = tex_coords; } ", fragment: " #version 110 varying vec2 v_tex_coords; uniform vec4 color; uniform sampler2D tex; void main() { gl_FragColor = vec4(color.rgb, color.a * texture2D(tex, v_tex_coords)); if (gl_FragColor.a <= 0.01) { discard; } } " }, ).unwrap() } } } impl<F> TextDisplay<F> where F: Deref<Target=FontTexture> { /// Builds a new text display that allows you to draw text. pub fn new(system: &TextSystem, texture: F, text: &str) -> TextDisplay<F> { let mut text_display = TextDisplay { context: system.context.clone(), texture: texture, vertex_buffer: None, index_buffer: None, char_pos_x: vec![], is_empty: true, }; text_display.set_text(text); text_display } /// Return the x-positions (in em-units) of the breaks between characters. /// When a character starts at n-th byte, then get_char_pos_x()[n] is the x-pos of the character. /// The last value of the array is the x-pos of the end of the string pub fn get_char_pos_x(&self) -> &[f32] { &self.char_pos_x } /// Modifies the text on this display. pub fn set_text(&mut self, text: &str) { self.is_empty = true; self.char_pos_x = vec![0.]; self.vertex_buffer = None; self.index_buffer = None; // returning if no text if text.len() == 0 { return; } // these arrays will contain the vertex buffer and index buffer data let mut vertex_buffer_data = Vec::with_capacity(text.len() * 4 * 4); let mut index_buffer_data = Vec::with_capacity(text.len() * 6); // iterating over the characters of the string let mut pos_x = 0.; for character in text.chars() { // FIXME: wrong, but only thing stable let infos = match self.texture.character_infos .iter().find(|&&(chr, _)| chr == character) { Some(infos) => infos, None => continue // character not found in the font, ignoring it }; let infos = infos.1; self.is_empty = false; // adding the quad in the index buffer { let first_vertex_offset = vertex_buffer_data.len() as u16; index_buffer_data.push(first_vertex_offset); index_buffer_data.push(first_vertex_offset + 1); index_buffer_data.push(first_vertex_offset + 2); index_buffer_data.push(first_vertex_offset + 2); index_buffer_data.push(first_vertex_offset + 1); index_buffer_data.push(first_vertex_offset + 3); } // pos_x += infos.left_padding; // calculating coords let left_coord = pos_x; let right_coord = left_coord + infos.size.0; let top_coord = infos.height_over_line; let bottom_coord = infos.height_over_line - infos.size.1; // top-left vertex vertex_buffer_data.push(VertexFormat { position: [left_coord, top_coord], tex_coords: [infos.tex_coords.0, infos.tex_coords.1], }); // top-right vertex vertex_buffer_data.push(VertexFormat { position: [right_coord, top_coord], tex_coords: [infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1], }); // bottom-left vertex vertex_buffer_data.push(VertexFormat { position: [left_coord, bottom_coord], tex_coords: [infos.tex_coords.0, infos.tex_coords.1 + infos.tex_size.1], }); // bottom-right vertex vertex_buffer_data.push(VertexFormat { position: [right_coord, bottom_coord], tex_coords: [ infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1 + infos.tex_size.1 ], }); // going to next char pos_x = right_coord + infos.right_padding; for _ in 0..character.len_utf8() { self.char_pos_x.push(pos_x); } } if !vertex_buffer_data.len() != 0 { // building the vertex buffer self.vertex_buffer = Some(glium::VertexBuffer::new(&self.context, &vertex_buffer_data).unwrap()); // building the index buffer self.index_buffer = Some(glium::IndexBuffer::new(&self.context, glium::index::PrimitiveType::TrianglesList, &index_buffer_data).unwrap()); } } } /// /// ## About the matrix /// /// The matrix must be column-major post-muliplying (which is the usual way to do in OpenGL). /// /// One unit in height corresponds to a line of text, but the text can go above or under. /// The bottom of the line is at `0.0`, the top is at `1.0`. /// You need to adapt your matrix by taking these into consideration. pub fn
<F, S: ?Sized, M>(text: &TextDisplay<F>, system: &TextSystem, target: &mut S, matrix: M, color: (f32, f32, f32, f32)) where S: glium::Surface, M: Into<[[f32; 4]; 4]>, F: Deref<Target=FontTexture> { let matrix = matrix.into(); let &TextDisplay { ref vertex_buffer, ref index_buffer, ref texture, is_empty, .. } = text; let color = [color.0, color.1, color.2, color.3]; // returning if nothing to draw if is_empty || vertex_buffer.is_none() || index_buffer.is_none() { return; } let vertex_buffer = vertex_buffer.as_ref().unwrap(); let index_buffer = index_buffer.as_ref().unwrap(); let uniforms = uniform! { matrix: matrix, color: color, tex: glium::uniforms::Sampler(&texture.texture, glium::uniforms::SamplerBehavior { magnify_filter: glium::uniforms::MagnifySamplerFilter::Linear, minify_filter: glium::uniforms::MinifySamplerFilter::Linear, .. Default::default() }) }; let params = { use glium::BlendingFunction::Addition; use glium::LinearBlendingFactor::*; let blending_function = Addition { source: SourceAlpha, destination: OneMinusSourceAlpha }; let blend = glium::Blend { color: blending_function, alpha: blending_function, constant_value: (1.0, 1.0, 1.0, 1.0), }; DrawParameters { blend: blend, .. Default::default() } }; target.draw(vertex_buffer, index_buffer, &system.program, &uniforms, &params).unwrap(); } unsafe fn build_font_image(face: freetype::FT_Face, characters_list: Vec<char>, font_size: u32) -> (TextureData, Vec<(char, CharacterInfos)>, u32) { use std::iter; // a margin around each character to prevent artifacts const MARGIN: u32 = 2; // setting the right pixel size if freetype::FT_Set_Pixel_Sizes(face, font_size, font_size) != 0 { panic!(); } // this variable will store the texture data // we set an arbitrary capacity that we think will match what we will need let mut texture_data: Vec<f32> = Vec::with_capacity(characters_list.len() * font_size as usize * font_size as usize); // the width is chosen more or less arbitrarily, because we can store everything as long as // the texture is at least as wide as the widest character // we just try to estimate a width so that width ~= height let texture_width = get_nearest_po2(std::cmp::max(font_size * 2 as u32, ((((characters_list.len() as u32) * font_size * font_size) as f32).sqrt()) as u32)); // we store the position of the "cursor" in the destination texture // this cursor points to the top-left pixel of the next character to write on the texture let mut cursor_offset = (0u32, 0u32); // number of rows to skip at next carriage return let mut rows_to_skip = 0u32; // now looping through the list of characters, filling the texture and returning the informations let mut em_pixels = font_size; let mut characters_infos: Vec<(char, CharacterInfos)> = characters_list.into_iter().filter_map(|character| { // loading wanted glyph in the font face if freetype::FT_Load_Glyph(face, freetype::FT_Get_Char_Index(face, character as freetype::FT_ULong), freetype::FT_LOAD_RENDER) != 0 { return None; } let bitmap = &(*(*face).glyph).bitmap; // adding a left margin before our character to prevent artifacts cursor_offset.0 += MARGIN; // computing em_pixels // FIXME: this is hacky if character == 'M' { // println!("M [{}x{}] bitmap: {:?}", bitmap.width, bitmap.rows, std::slice::from_raw_parts(bitmap.buffer, (bitmap.rows * bitmap.width) as usize)); em_pixels = bitmap.rows as u32; } // carriage return our cursor if we don't have enough room to write the next caracter // we add a margin to prevent artifacts if cursor_offset.0 + (bitmap.width as u32) + MARGIN >= texture_width { assert!(bitmap.width as u32 <= texture_width); // if this fails, we should increase texture_width cursor_offset.0 = 0; cursor_offset.1 += rows_to_skip; rows_to_skip = 0; } // if the texture data buffer has not enough lines, adding some if rows_to_skip < MARGIN + bitmap.rows as u32 { let diff = MARGIN + (bitmap.rows as u32) - rows_to_skip; rows_to_skip = MARGIN + bitmap.rows as u32; texture_data.extend(iter::repeat(0.0).take((diff * texture_width) as usize)); } // copying the data to the texture let offset_x_before_copy = cursor_offset.0; if bitmap.rows >= 1 { let destination = &mut texture_data[(cursor_offset.0 + cursor_offset.1 * texture_width) as usize ..]; let source = std::mem::transmute(bitmap.buffer); let source = std::slice::from_raw_parts(source, destination.len()); for y in 0 .. bitmap.rows as u32 { let source = &source[(y * bitmap.width as u32) as usize ..]; let destination = &mut destination[(y * texture_width) as usize ..]; for x in 0 .. bitmap.width { // the values in source are bytes between 0 and 255, but we want floats between 0 and 1 let val: u8 = *source.get(x as usize).unwrap(); let val = (val as f32) / (std::u8::MAX as f32); let dest = destination.get_mut(x as usize).unwrap(); *dest = val; } } cursor_offset.0 += bitmap.width as u32; debug_assert!(cursor_offset.0 <= texture_width); } // filling infos about that character // tex_size and tex_coords are in pixels for the moment ; they will be divided // by the texture dimensions later let left_padding = (*(*face).glyph).bitmap_left; Some((character, CharacterInfos { tex_size: (bitmap.width as f32, bitmap.rows as f32), tex_coords: (offset_x_before_copy as f32, cursor_offset.1 as f32), size: (bitmap.width as f32, bitmap.rows as f32), left_padding: left_padding as f32, right_padding: ((*(*face).glyph).advance.x as i32 - bitmap.width * 64 - left_padding * 64) as f32 / 64.0, height_over_line: (*(*face).glyph).bitmap_top as f32, })) }).collect(); // adding blank lines at the end until the height of the texture is a power of two { let current_height = texture_data.len() as u32 / texture_width; let requested_height = get_nearest_po2(current_height); texture_data.extend(iter::repeat(0.0).take((texture_width * (requested_height - current_height)) as usize)); } // now our texture is finished // we know its final dimensions, so we can divide all the pixels values into (0,1) range assert!((texture_data.len() as u32 % texture_width) == 0); let texture_height = (texture_data.len() as u32 / texture_width) as f32; let float_texture_width = texture_width as f32; for chr in characters_infos.iter_mut() { chr.1.tex_size.0 /= float_texture_width; chr.1.tex_size.1 /= texture_height; chr.1.tex_coords.0 /= float_texture_width; chr.1.tex_coords.1 /= texture_height; chr.1.size.0 /= em_pixels as f32; chr.1.size.1 /= em_pixels as f32; chr.1.left_padding /= em_pixels as f32; chr.1.right_padding /= em_pixels as f32; chr.1.height_over_line /= em_pixels as f32; } // returning (TextureData { data: texture_data, width: texture_width, height: texture_height as u32, }, characters_infos, em_pixels) } /// Function that will calculate the nearest power of two. fn get_nearest_po2(mut x: u32) -> u32 { assert!(x > 0); x -= 1; x = x | (x >> 1); x = x | (x >> 2); x = x | (x >> 4); x = x | (x >> 8); x = x | (x >> 16); x + 1 }
draw
identifier_name
lib.rs
/*! This crate allows you to easily write text. Usage: ```no_run # extern crate glium; # extern crate glium_text; # extern crate cgmath; # fn main() { # let display: glium::Display = unsafe { std::mem::uninitialized() }; // The `TextSystem` contains the shaders and elements used for text display. let system = glium_text::TextSystem::new(&display); // Creating a `FontTexture`, which a regular `Texture` which contains the font. // Note that loading the systems fonts is not covered by this library. let font = glium_text::FontTexture::new(&display, std::fs::File::open(&std::path::Path::new("my_font.ttf")).unwrap(), 24).unwrap(); // Creating a `TextDisplay` which contains the elements required to draw a specific sentence. let text = glium_text::TextDisplay::new(&system, &font, "Hello world!"); // Finally, drawing the text is done like this: let matrix = [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]; glium_text::draw(&text, &system, &mut display.draw(), matrix, (1.0, 1.0, 0.0, 1.0)); # } ``` */ #![warn(missing_docs)] extern crate libc; extern crate freetype_sys as freetype; #[macro_use] extern crate glium; use glium::DrawParameters; use glium::backend::Context; use glium::backend::Facade; use std::borrow::Cow; use std::default::Default; use std::io::Read; use std::ops::Deref; use std::rc::Rc; /// Texture which contains the characters of the font. pub struct FontTexture { texture: glium::texture::Texture2d, character_infos: Vec<(char, CharacterInfos)>, em_pixels: u32, } /// Object that contains the elements shared by all `TextDisplay` objects. /// /// Required to create a `TextDisplay`. pub struct TextSystem { context: Rc<Context>, program: glium::Program, } /// Object that will allow you to draw a text. pub struct TextDisplay<F> where F: Deref<Target=FontTexture> { context: Rc<Context>, texture: F, vertex_buffer: Option<glium::VertexBuffer<VertexFormat>>, index_buffer: Option<glium::IndexBuffer<u16>>, char_pos_x: Vec<f32>, is_empty: bool, } // structure containing informations about a character of a font #[derive(Copy, Clone, Debug)] struct CharacterInfos { // coordinates of the character top-left hand corner on the font's texture tex_coords: (f32, f32), // width and height of character in texture units tex_size: (f32, f32), // size of the character in EMs size: (f32, f32), // number of EMs between the bottom of the character and the base line of text height_over_line: f32, // number of EMs at the left of the character left_padding: f32, // number of EMs at the right of the character right_padding: f32, } struct TextureData { data: Vec<f32>, width: u32, height: u32, } impl<'a> glium::texture::Texture2dDataSource<'a> for &'a TextureData { type Data = f32; fn into_raw(self) -> glium::texture::RawImage2d<'a, f32> { glium::texture::RawImage2d { data: Cow::Borrowed(&self.data), width: self.width, height: self.height, format: glium::texture::ClientFormat::F32, } } } #[derive(Copy, Clone)] struct VertexFormat { position: [f32; 2], tex_coords: [f32; 2], } implement_vertex!(VertexFormat, position, tex_coords); impl FontTexture { /// Creates a new texture representing a font stored in a `FontTexture`. pub fn new<R, F>(facade: &F, font: R, font_size: u32) -> Result<FontTexture, ()> where R: Read, F: Facade { // building the freetype library // FIXME: call FT_Done_Library let library = unsafe { // taken from https://github.com/PistonDevelopers/freetype-rs/blob/master/src/library.rs extern "C" fn alloc_library(_memory: freetype::FT_Memory, size: libc::c_long) -> *mut libc::c_void { unsafe { libc::malloc(size as libc::size_t) } } extern "C" fn free_library(_memory: freetype::FT_Memory, block: *mut libc::c_void) { unsafe { libc::free(block) } } extern "C" fn realloc_library(_memory: freetype::FT_Memory, _cur_size: libc::c_long, new_size: libc::c_long, block: *mut libc::c_void) -> *mut libc::c_void { unsafe { libc::realloc(block, new_size as libc::size_t) } } static mut MEMORY: freetype::FT_MemoryRec = freetype::FT_MemoryRec { user: 0 as *mut libc::c_void, alloc: alloc_library, free: free_library, realloc: realloc_library, }; let mut raw = ::std::ptr::null_mut(); if freetype::FT_New_Library(&mut MEMORY, &mut raw) != freetype::FT_Err_Ok { return Err(()); } freetype::FT_Add_Default_Modules(raw); raw }; // building the freetype face object let font: Vec<u8> = font.bytes().map(|c| c.unwrap()).collect(); let face: freetype::FT_Face = unsafe { let mut face = ::std::ptr::null_mut(); let err = freetype::FT_New_Memory_Face(library, font.as_ptr(), font.len() as freetype::FT_Long, 0, &mut face); if err == freetype::FT_Err_Ok { face } else { return Err(()); } }; // computing the list of characters in the font let characters_list = unsafe { // TODO: unresolved symbol /*if freetype::FT_Select_CharMap(face, freetype::FT_ENCODING_UNICODE) != 0 { return Err(()); }*/ let mut result = Vec::new(); let mut g: freetype::FT_UInt = std::mem::uninitialized(); let mut c = freetype::FT_Get_First_Char(face, &mut g); while g != 0 { result.push(std::mem::transmute(c as u32)); // TODO: better solution? c = freetype::FT_Get_Next_Char(face, c, &mut g); } result }; // building the infos let (texture_data, chr_infos, em_pixels) = unsafe { build_font_image(face, characters_list, font_size) }; // we load the texture in the display let texture = glium::texture::Texture2d::new(facade, &texture_data).unwrap(); Ok(FontTexture { texture: texture, character_infos: chr_infos, em_pixels: em_pixels, }) } /// Return the size of an em-unit for the generated font texture. /// This is needed for a pixel-perfect display: the text geometry is scaled so that /// 1em == 1 unit. We must scale the geometry up by em_pixels to match the screen pixels. pub fn em_pixels(&self) -> u32 { self.em_pixels } } /*impl glium::uniforms::AsUniformValue for FontTexture { fn as_uniform_value(&self) -> glium::uniforms::UniformValue { glium::uniforms::AsUniformValue::as_uniform_value(&self.texture) } }*/ impl TextSystem { /// Builds a new text system that must be used to build `TextDisplay` objects. pub fn new<F>(facade: &F) -> TextSystem where F: Facade { TextSystem { context: facade.get_context().clone(), program: program!(facade, 140 => { vertex: " #version 140 uniform mat4 matrix; in vec2 position; in vec2 tex_coords; out vec2 v_tex_coords; void main() { gl_Position = matrix * vec4(position, 0.0, 1.0); v_tex_coords = tex_coords; } ", fragment: " #version 140 in vec2 v_tex_coords; out vec4 f_color; uniform vec4 color; uniform sampler2D tex; void main() { vec4 c = vec4(color.rgb, color.a * texture(tex, v_tex_coords)); if (c.a <= 0.01) { discard; } else { f_color = c; } } " }, 110 => { vertex: " #version 110 attribute vec2 position; attribute vec2 tex_coords; varying vec2 v_tex_coords; uniform mat4 matrix; void main() { gl_Position = matrix * vec4(position.x, position.y, 0.0, 1.0); v_tex_coords = tex_coords; } ", fragment: " #version 110 varying vec2 v_tex_coords; uniform vec4 color; uniform sampler2D tex; void main() { gl_FragColor = vec4(color.rgb, color.a * texture2D(tex, v_tex_coords)); if (gl_FragColor.a <= 0.01) { discard; } } " }, ).unwrap() } } } impl<F> TextDisplay<F> where F: Deref<Target=FontTexture> { /// Builds a new text display that allows you to draw text. pub fn new(system: &TextSystem, texture: F, text: &str) -> TextDisplay<F> { let mut text_display = TextDisplay { context: system.context.clone(), texture: texture, vertex_buffer: None, index_buffer: None, char_pos_x: vec![], is_empty: true, }; text_display.set_text(text); text_display } /// Return the x-positions (in em-units) of the breaks between characters. /// When a character starts at n-th byte, then get_char_pos_x()[n] is the x-pos of the character. /// The last value of the array is the x-pos of the end of the string pub fn get_char_pos_x(&self) -> &[f32] { &self.char_pos_x } /// Modifies the text on this display. pub fn set_text(&mut self, text: &str) { self.is_empty = true; self.char_pos_x = vec![0.]; self.vertex_buffer = None; self.index_buffer = None; // returning if no text if text.len() == 0 { return; } // these arrays will contain the vertex buffer and index buffer data let mut vertex_buffer_data = Vec::with_capacity(text.len() * 4 * 4); let mut index_buffer_data = Vec::with_capacity(text.len() * 6); // iterating over the characters of the string let mut pos_x = 0.; for character in text.chars() { // FIXME: wrong, but only thing stable let infos = match self.texture.character_infos .iter().find(|&&(chr, _)| chr == character) { Some(infos) => infos, None => continue // character not found in the font, ignoring it }; let infos = infos.1; self.is_empty = false; // adding the quad in the index buffer { let first_vertex_offset = vertex_buffer_data.len() as u16; index_buffer_data.push(first_vertex_offset); index_buffer_data.push(first_vertex_offset + 1); index_buffer_data.push(first_vertex_offset + 2); index_buffer_data.push(first_vertex_offset + 2); index_buffer_data.push(first_vertex_offset + 1); index_buffer_data.push(first_vertex_offset + 3); } // pos_x += infos.left_padding; // calculating coords let left_coord = pos_x; let right_coord = left_coord + infos.size.0; let top_coord = infos.height_over_line; let bottom_coord = infos.height_over_line - infos.size.1; // top-left vertex vertex_buffer_data.push(VertexFormat { position: [left_coord, top_coord], tex_coords: [infos.tex_coords.0, infos.tex_coords.1], }); // top-right vertex vertex_buffer_data.push(VertexFormat { position: [right_coord, top_coord], tex_coords: [infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1], }); // bottom-left vertex vertex_buffer_data.push(VertexFormat { position: [left_coord, bottom_coord], tex_coords: [infos.tex_coords.0, infos.tex_coords.1 + infos.tex_size.1], }); // bottom-right vertex vertex_buffer_data.push(VertexFormat { position: [right_coord, bottom_coord], tex_coords: [ infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1 + infos.tex_size.1 ], }); // going to next char pos_x = right_coord + infos.right_padding; for _ in 0..character.len_utf8() { self.char_pos_x.push(pos_x); } } if !vertex_buffer_data.len() != 0
} } /// /// ## About the matrix /// /// The matrix must be column-major post-muliplying (which is the usual way to do in OpenGL). /// /// One unit in height corresponds to a line of text, but the text can go above or under. /// The bottom of the line is at `0.0`, the top is at `1.0`. /// You need to adapt your matrix by taking these into consideration. pub fn draw<F, S: ?Sized, M>(text: &TextDisplay<F>, system: &TextSystem, target: &mut S, matrix: M, color: (f32, f32, f32, f32)) where S: glium::Surface, M: Into<[[f32; 4]; 4]>, F: Deref<Target=FontTexture> { let matrix = matrix.into(); let &TextDisplay { ref vertex_buffer, ref index_buffer, ref texture, is_empty, .. } = text; let color = [color.0, color.1, color.2, color.3]; // returning if nothing to draw if is_empty || vertex_buffer.is_none() || index_buffer.is_none() { return; } let vertex_buffer = vertex_buffer.as_ref().unwrap(); let index_buffer = index_buffer.as_ref().unwrap(); let uniforms = uniform! { matrix: matrix, color: color, tex: glium::uniforms::Sampler(&texture.texture, glium::uniforms::SamplerBehavior { magnify_filter: glium::uniforms::MagnifySamplerFilter::Linear, minify_filter: glium::uniforms::MinifySamplerFilter::Linear, .. Default::default() }) }; let params = { use glium::BlendingFunction::Addition; use glium::LinearBlendingFactor::*; let blending_function = Addition { source: SourceAlpha, destination: OneMinusSourceAlpha }; let blend = glium::Blend { color: blending_function, alpha: blending_function, constant_value: (1.0, 1.0, 1.0, 1.0), }; DrawParameters { blend: blend, .. Default::default() } }; target.draw(vertex_buffer, index_buffer, &system.program, &uniforms, &params).unwrap(); } unsafe fn build_font_image(face: freetype::FT_Face, characters_list: Vec<char>, font_size: u32) -> (TextureData, Vec<(char, CharacterInfos)>, u32) { use std::iter; // a margin around each character to prevent artifacts const MARGIN: u32 = 2; // setting the right pixel size if freetype::FT_Set_Pixel_Sizes(face, font_size, font_size) != 0 { panic!(); } // this variable will store the texture data // we set an arbitrary capacity that we think will match what we will need let mut texture_data: Vec<f32> = Vec::with_capacity(characters_list.len() * font_size as usize * font_size as usize); // the width is chosen more or less arbitrarily, because we can store everything as long as // the texture is at least as wide as the widest character // we just try to estimate a width so that width ~= height let texture_width = get_nearest_po2(std::cmp::max(font_size * 2 as u32, ((((characters_list.len() as u32) * font_size * font_size) as f32).sqrt()) as u32)); // we store the position of the "cursor" in the destination texture // this cursor points to the top-left pixel of the next character to write on the texture let mut cursor_offset = (0u32, 0u32); // number of rows to skip at next carriage return let mut rows_to_skip = 0u32; // now looping through the list of characters, filling the texture and returning the informations let mut em_pixels = font_size; let mut characters_infos: Vec<(char, CharacterInfos)> = characters_list.into_iter().filter_map(|character| { // loading wanted glyph in the font face if freetype::FT_Load_Glyph(face, freetype::FT_Get_Char_Index(face, character as freetype::FT_ULong), freetype::FT_LOAD_RENDER) != 0 { return None; } let bitmap = &(*(*face).glyph).bitmap; // adding a left margin before our character to prevent artifacts cursor_offset.0 += MARGIN; // computing em_pixels // FIXME: this is hacky if character == 'M' { // println!("M [{}x{}] bitmap: {:?}", bitmap.width, bitmap.rows, std::slice::from_raw_parts(bitmap.buffer, (bitmap.rows * bitmap.width) as usize)); em_pixels = bitmap.rows as u32; } // carriage return our cursor if we don't have enough room to write the next caracter // we add a margin to prevent artifacts if cursor_offset.0 + (bitmap.width as u32) + MARGIN >= texture_width { assert!(bitmap.width as u32 <= texture_width); // if this fails, we should increase texture_width cursor_offset.0 = 0; cursor_offset.1 += rows_to_skip; rows_to_skip = 0; } // if the texture data buffer has not enough lines, adding some if rows_to_skip < MARGIN + bitmap.rows as u32 { let diff = MARGIN + (bitmap.rows as u32) - rows_to_skip; rows_to_skip = MARGIN + bitmap.rows as u32; texture_data.extend(iter::repeat(0.0).take((diff * texture_width) as usize)); } // copying the data to the texture let offset_x_before_copy = cursor_offset.0; if bitmap.rows >= 1 { let destination = &mut texture_data[(cursor_offset.0 + cursor_offset.1 * texture_width) as usize ..]; let source = std::mem::transmute(bitmap.buffer); let source = std::slice::from_raw_parts(source, destination.len()); for y in 0 .. bitmap.rows as u32 { let source = &source[(y * bitmap.width as u32) as usize ..]; let destination = &mut destination[(y * texture_width) as usize ..]; for x in 0 .. bitmap.width { // the values in source are bytes between 0 and 255, but we want floats between 0 and 1 let val: u8 = *source.get(x as usize).unwrap(); let val = (val as f32) / (std::u8::MAX as f32); let dest = destination.get_mut(x as usize).unwrap(); *dest = val; } } cursor_offset.0 += bitmap.width as u32; debug_assert!(cursor_offset.0 <= texture_width); } // filling infos about that character // tex_size and tex_coords are in pixels for the moment ; they will be divided // by the texture dimensions later let left_padding = (*(*face).glyph).bitmap_left; Some((character, CharacterInfos { tex_size: (bitmap.width as f32, bitmap.rows as f32), tex_coords: (offset_x_before_copy as f32, cursor_offset.1 as f32), size: (bitmap.width as f32, bitmap.rows as f32), left_padding: left_padding as f32, right_padding: ((*(*face).glyph).advance.x as i32 - bitmap.width * 64 - left_padding * 64) as f32 / 64.0, height_over_line: (*(*face).glyph).bitmap_top as f32, })) }).collect(); // adding blank lines at the end until the height of the texture is a power of two { let current_height = texture_data.len() as u32 / texture_width; let requested_height = get_nearest_po2(current_height); texture_data.extend(iter::repeat(0.0).take((texture_width * (requested_height - current_height)) as usize)); } // now our texture is finished // we know its final dimensions, so we can divide all the pixels values into (0,1) range assert!((texture_data.len() as u32 % texture_width) == 0); let texture_height = (texture_data.len() as u32 / texture_width) as f32; let float_texture_width = texture_width as f32; for chr in characters_infos.iter_mut() { chr.1.tex_size.0 /= float_texture_width; chr.1.tex_size.1 /= texture_height; chr.1.tex_coords.0 /= float_texture_width; chr.1.tex_coords.1 /= texture_height; chr.1.size.0 /= em_pixels as f32; chr.1.size.1 /= em_pixels as f32; chr.1.left_padding /= em_pixels as f32; chr.1.right_padding /= em_pixels as f32; chr.1.height_over_line /= em_pixels as f32; } // returning (TextureData { data: texture_data, width: texture_width, height: texture_height as u32, }, characters_infos, em_pixels) } /// Function that will calculate the nearest power of two. fn get_nearest_po2(mut x: u32) -> u32 { assert!(x > 0); x -= 1; x = x | (x >> 1); x = x | (x >> 2); x = x | (x >> 4); x = x | (x >> 8); x = x | (x >> 16); x + 1 }
{ // building the vertex buffer self.vertex_buffer = Some(glium::VertexBuffer::new(&self.context, &vertex_buffer_data).unwrap()); // building the index buffer self.index_buffer = Some(glium::IndexBuffer::new(&self.context, glium::index::PrimitiveType::TrianglesList, &index_buffer_data).unwrap()); }
conditional_block
hf2.ts
import * as webusb from "webusb" import * as U from "./pxtutils" const controlTransferGetReport = 0x01; const controlTransferSetReport = 0x09; const controlTransferOutReport = 0x200; const controlTransferInReport = 0x100; // see https://github.com/microsoft/uf2/blob/master/hf2.md for full spec export const HF2_CMD_BININFO = 0x0001 // no arguments export const HF2_MODE_BOOTLOADER = 0x01 export const HF2_MODE_USERSPACE = 0x02 /* struct HF2_BININFO_Result { uint32_t mode; uint32_t flash_page_size; uint32_t flash_num_pages; uint32_t max_message_size; }; */ export const HF2_CMD_INFO = 0x0002 // no arguments // results is utf8 character array export const HF2_CMD_RESET_INTO_APP = 0x0003// no arguments, no result export const HF2_CMD_RESET_INTO_BOOTLOADER = 0x0004 // no arguments, no result export const HF2_CMD_START_FLASH = 0x0005 // no arguments, no result export const HF2_CMD_WRITE_FLASH_PAGE = 0x0006 /* struct HF2_WRITE_FLASH_PAGE_Command { uint32_t target_addr; uint32_t data[flash_page_size]; }; */ // no result export const HF2_CMD_CHKSUM_PAGES = 0x0007
/* struct HF2_CHKSUM_PAGES_Command { uint32_t target_addr; uint32_t num_pages; }; struct HF2_CHKSUM_PAGES_Result { uint16_t chksums[num_pages]; }; */ export const HF2_CMD_READ_WORDS = 0x0008 /* struct HF2_READ_WORDS_Command { uint32_t target_addr; uint32_t num_words; }; struct HF2_READ_WORDS_Result { uint32_t words[num_words]; }; */ export const HF2_CMD_WRITE_WORDS = 0x0009 /* struct HF2_WRITE_WORDS_Command { uint32_t target_addr; uint32_t num_words; uint32_t words[num_words]; }; */ // no result export const HF2_CMD_DMESG = 0x0010 // no arguments // results is utf8 character array export const HF2_FLAG_SERIAL_OUT = 0x80 export const HF2_FLAG_SERIAL_ERR = 0xC0 export const HF2_FLAG_CMDPKT_LAST = 0x40 export const HF2_FLAG_CMDPKT_BODY = 0x00 export const HF2_FLAG_MASK = 0xC0 export const HF2_SIZE_MASK = 63 export const HF2_STATUS_OK = 0x00 export const HF2_STATUS_INVALID_CMD = 0x01 export const HF2_STATUS_EXEC_ERR = 0x02 export const HF2_STATUS_EVENT = 0x80 // the eventId is overlayed on the tag+status; the mask corresponds // to the HF2_STATUS_EVENT above export const HF2_EV_MASK = 0x800000 export const HF2_CMD_JDS_CONFIG = 0x0020 export const HF2_CMD_JDS_SEND = 0x0021 export const HF2_EV_JDS_PACKET = 0x800020 export class Transport { dev: USBDevice; iface: USBInterface; altIface: USBAlternateInterface; epIn: USBEndpoint; epOut: USBEndpoint; readLoopStarted = false; ready = false; onData = (v: Uint8Array) => { }; onError = (e: Error) => { console.error("HF2 error: " + (e ? e.stack : e)) }; log(msg: string, v?: any) { if (v != undefined) console.log("HF2: " + msg, v) else console.log("HF2: " + msg) } private clearDev() { if (this.dev) { this.dev = null this.epIn = null this.epOut = null } } disconnectAsync() { this.ready = false if (!this.dev) return Promise.resolve() this.log("close device") return this.dev.close() .catch(e => { // just ignore errors closing, most likely device just disconnected }) .then(() => { this.clearDev() return U.delay(500) }) } private recvPacketAsync(): Promise<Uint8Array> { let final = (res: USBInTransferResult) => { if (res.status != "ok") this.error("USB IN transfer failed") let arr = new Uint8Array(res.data.buffer) if (arr.length == 0) return this.recvPacketAsync() return arr } if (!this.dev) return Promise.reject(new Error("Disconnected")) if (!this.epIn) { return this.dev.controlTransferIn({ requestType: "class", recipient: "interface", request: controlTransferGetReport, value: controlTransferInReport, index: this.iface.interfaceNumber }, 64).then(final) } return this.dev.transferIn(this.epIn.endpointNumber, 64) .then(final) } error(msg: string) { throw new Error(`USB error on device ${this.dev ? this.dev.productName : "n/a"} (${msg})`) } private async readLoop() { if (this.readLoopStarted) return this.readLoopStarted = true this.log("start read loop") while (true) { if (!this.ready) { break //await U.delay(300) //continue } try { const buf = await this.recvPacketAsync() if (buf[0]) { // we've got data; retry reading immedietly after processing it this.onData(buf) } else { // throttle down if no data coming await U.delay(5) } } catch (err) { if (this.dev) this.onError(err) await U.delay(300) } } } sendPacketAsync(pkt: Uint8Array) { if (!this.dev) return Promise.reject(new Error("Disconnected")) U.assert(pkt.length <= 64) if (!this.epOut) { return this.dev.controlTransferOut({ requestType: "class", recipient: "interface", request: controlTransferSetReport, value: controlTransferOutReport, index: this.iface.interfaceNumber }, pkt).then(res => { if (res.status != "ok") this.error("USB CTRL OUT transfer failed") }) } return this.dev.transferOut(this.epOut.endpointNumber, pkt) .then(res => { if (res.status != "ok") this.error("USB OUT transfer failed") }) } async init() { const usb = new webusb.USB({ devicesFound: async devices => { for (const device of devices) { if (device.deviceVersionMajor == 42) { for (const iface of device.configuration.interfaces) { const alt = iface.alternates[0] if (alt.interfaceClass == 0xff && alt.interfaceSubclass == 42) { this.dev = device this.iface = iface this.altIface = alt return device } } } } return undefined } }) this.dev = await usb.requestDevice({ filters: [{}] }) this.log("connect device: " + this.dev.manufacturerName + " " + this.dev.productName) await this.dev.open() await this.dev.selectConfiguration(1) if (this.altIface.endpoints.length) { this.epIn = this.altIface.endpoints.filter(e => e.direction == "in")[0] this.epOut = this.altIface.endpoints.filter(e => e.direction == "out")[0] U.assert(this.epIn.packetSize == 64); U.assert(this.epOut.packetSize == 64); } this.log("claim interface") await this.dev.claimInterface(this.iface.interfaceNumber) this.log("all connected") this.ready = true this.readLoop() } } export class Proto { eventHandlers: U.SMap<(buf: Uint8Array) => void> = {} msgs = new U.PromiseBuffer<Uint8Array>() cmdSeq = (Math.random() * 0xffff) | 0; private lock = new U.PromiseQueue(); constructor(public io: Transport) { let frames: Uint8Array[] = [] io.onData = buf => { let tp = buf[0] & HF2_FLAG_MASK let len = buf[0] & 63 //console.log(`msg tp=${tp} len=${len}`) let frame = new Uint8Array(len) U.memcpy(frame, 0, buf, 1, len) if (tp & HF2_FLAG_SERIAL_OUT) { this.onSerial(frame, tp == HF2_FLAG_SERIAL_ERR) return } frames.push(frame) if (tp == HF2_FLAG_CMDPKT_BODY) { return } else { U.assert(tp == HF2_FLAG_CMDPKT_LAST) let total = 0 for (let f of frames) total += f.length let r = new Uint8Array(total) let ptr = 0 for (let f of frames) { U.memcpy(r, ptr, f) ptr += f.length } frames = [] if (r[2] & HF2_STATUS_EVENT) { // asynchronous event this.handleEvent(r) } else { this.msgs.push(r) } } } } error(m: string) { return this.io.error(m) } talkAsync(cmd: number, data?: Uint8Array) { let len = 8 if (data) len += data.length let pkt = new Uint8Array(len) let seq = ++this.cmdSeq & 0xffff U.write32(pkt, 0, cmd); U.write16(pkt, 4, seq); U.write16(pkt, 6, 0); if (data) U.memcpy(pkt, 8, data, 0, data.length) let numSkipped = 0 let handleReturnAsync = (): Promise<Uint8Array> => this.msgs.shiftAsync(1000) // we wait up to a second .then(res => { if (U.read16(res, 0) != seq) { if (numSkipped < 3) { numSkipped++ this.io.log(`message out of sync, (${seq} vs ${U.read16(res, 0)}); will re-try`) return handleReturnAsync() } this.error("out of sync") } let info = "" if (res[3]) info = "; info=" + res[3] switch (res[2]) { case HF2_STATUS_OK: return res.slice(4) case HF2_STATUS_INVALID_CMD: this.error("invalid command" + info) break case HF2_STATUS_EXEC_ERR: this.error("execution error" + info) break default: this.error("error " + res[2] + info) break } return null }) return this.lock.enqueue("talk", () => this.sendMsgAsync(pkt) .then(handleReturnAsync)) } private sendMsgAsync(buf: Uint8Array, serial: number = 0) { // Util.assert(buf.length <= this.maxMsgSize) let frame = new Uint8Array(64) let loop = (pos: number): Promise<void> => { let len = buf.length - pos if (len <= 0) return Promise.resolve() if (len > 63) { len = 63 frame[0] = HF2_FLAG_CMDPKT_BODY; } else { frame[0] = HF2_FLAG_CMDPKT_LAST; } if (serial) frame[0] = serial == 1 ? HF2_FLAG_SERIAL_OUT : HF2_FLAG_SERIAL_ERR; frame[0] |= len; for (let i = 0; i < len; ++i) frame[i + 1] = buf[pos + i] return this.io.sendPacketAsync(frame) .then(() => loop(pos + len)) } return loop(0) } onEvent(id: number, f: (buf: Uint8Array) => void) { U.assert(!!(id & HF2_EV_MASK)) this.eventHandlers[id + ""] = f } onJDMessage(f: (buf: Uint8Array) => void) { this.talkAsync(HF2_CMD_JDS_CONFIG, U.encodeU32LE([1])) this.onEvent(HF2_EV_JDS_PACKET, f) } sendJDMessageAsync(buf: Uint8Array) { return this.talkAsync(HF2_CMD_JDS_SEND, buf) } handleEvent(buf: Uint8Array) { let evid = U.read32(buf, 0) let f = this.eventHandlers[evid + ""] if (f) { f(buf.slice(4)) } else { this.io.log("unhandled event: " + evid.toString(16)) } } onSerial(data: Uint8Array, iserr: boolean) { console.log("SERIAL:", U.bufferToString(data)) } async init() { await this.io.init() const buf = await this.talkAsync(HF2_CMD_INFO) this.io.log("Connected to: " + U.bufferToString(buf)) } }
random_line_split
hf2.ts
import * as webusb from "webusb" import * as U from "./pxtutils" const controlTransferGetReport = 0x01; const controlTransferSetReport = 0x09; const controlTransferOutReport = 0x200; const controlTransferInReport = 0x100; // see https://github.com/microsoft/uf2/blob/master/hf2.md for full spec export const HF2_CMD_BININFO = 0x0001 // no arguments export const HF2_MODE_BOOTLOADER = 0x01 export const HF2_MODE_USERSPACE = 0x02 /* struct HF2_BININFO_Result { uint32_t mode; uint32_t flash_page_size; uint32_t flash_num_pages; uint32_t max_message_size; }; */ export const HF2_CMD_INFO = 0x0002 // no arguments // results is utf8 character array export const HF2_CMD_RESET_INTO_APP = 0x0003// no arguments, no result export const HF2_CMD_RESET_INTO_BOOTLOADER = 0x0004 // no arguments, no result export const HF2_CMD_START_FLASH = 0x0005 // no arguments, no result export const HF2_CMD_WRITE_FLASH_PAGE = 0x0006 /* struct HF2_WRITE_FLASH_PAGE_Command { uint32_t target_addr; uint32_t data[flash_page_size]; }; */ // no result export const HF2_CMD_CHKSUM_PAGES = 0x0007 /* struct HF2_CHKSUM_PAGES_Command { uint32_t target_addr; uint32_t num_pages; }; struct HF2_CHKSUM_PAGES_Result { uint16_t chksums[num_pages]; }; */ export const HF2_CMD_READ_WORDS = 0x0008 /* struct HF2_READ_WORDS_Command { uint32_t target_addr; uint32_t num_words; }; struct HF2_READ_WORDS_Result { uint32_t words[num_words]; }; */ export const HF2_CMD_WRITE_WORDS = 0x0009 /* struct HF2_WRITE_WORDS_Command { uint32_t target_addr; uint32_t num_words; uint32_t words[num_words]; }; */ // no result export const HF2_CMD_DMESG = 0x0010 // no arguments // results is utf8 character array export const HF2_FLAG_SERIAL_OUT = 0x80 export const HF2_FLAG_SERIAL_ERR = 0xC0 export const HF2_FLAG_CMDPKT_LAST = 0x40 export const HF2_FLAG_CMDPKT_BODY = 0x00 export const HF2_FLAG_MASK = 0xC0 export const HF2_SIZE_MASK = 63 export const HF2_STATUS_OK = 0x00 export const HF2_STATUS_INVALID_CMD = 0x01 export const HF2_STATUS_EXEC_ERR = 0x02 export const HF2_STATUS_EVENT = 0x80 // the eventId is overlayed on the tag+status; the mask corresponds // to the HF2_STATUS_EVENT above export const HF2_EV_MASK = 0x800000 export const HF2_CMD_JDS_CONFIG = 0x0020 export const HF2_CMD_JDS_SEND = 0x0021 export const HF2_EV_JDS_PACKET = 0x800020 export class Transport { dev: USBDevice; iface: USBInterface; altIface: USBAlternateInterface; epIn: USBEndpoint; epOut: USBEndpoint; readLoopStarted = false; ready = false; onData = (v: Uint8Array) => { }; onError = (e: Error) => { console.error("HF2 error: " + (e ? e.stack : e)) }; log(msg: string, v?: any) { if (v != undefined) console.log("HF2: " + msg, v) else console.log("HF2: " + msg) } private clearDev() { if (this.dev) { this.dev = null this.epIn = null this.epOut = null } } disconnectAsync()
private recvPacketAsync(): Promise<Uint8Array> { let final = (res: USBInTransferResult) => { if (res.status != "ok") this.error("USB IN transfer failed") let arr = new Uint8Array(res.data.buffer) if (arr.length == 0) return this.recvPacketAsync() return arr } if (!this.dev) return Promise.reject(new Error("Disconnected")) if (!this.epIn) { return this.dev.controlTransferIn({ requestType: "class", recipient: "interface", request: controlTransferGetReport, value: controlTransferInReport, index: this.iface.interfaceNumber }, 64).then(final) } return this.dev.transferIn(this.epIn.endpointNumber, 64) .then(final) } error(msg: string) { throw new Error(`USB error on device ${this.dev ? this.dev.productName : "n/a"} (${msg})`) } private async readLoop() { if (this.readLoopStarted) return this.readLoopStarted = true this.log("start read loop") while (true) { if (!this.ready) { break //await U.delay(300) //continue } try { const buf = await this.recvPacketAsync() if (buf[0]) { // we've got data; retry reading immedietly after processing it this.onData(buf) } else { // throttle down if no data coming await U.delay(5) } } catch (err) { if (this.dev) this.onError(err) await U.delay(300) } } } sendPacketAsync(pkt: Uint8Array) { if (!this.dev) return Promise.reject(new Error("Disconnected")) U.assert(pkt.length <= 64) if (!this.epOut) { return this.dev.controlTransferOut({ requestType: "class", recipient: "interface", request: controlTransferSetReport, value: controlTransferOutReport, index: this.iface.interfaceNumber }, pkt).then(res => { if (res.status != "ok") this.error("USB CTRL OUT transfer failed") }) } return this.dev.transferOut(this.epOut.endpointNumber, pkt) .then(res => { if (res.status != "ok") this.error("USB OUT transfer failed") }) } async init() { const usb = new webusb.USB({ devicesFound: async devices => { for (const device of devices) { if (device.deviceVersionMajor == 42) { for (const iface of device.configuration.interfaces) { const alt = iface.alternates[0] if (alt.interfaceClass == 0xff && alt.interfaceSubclass == 42) { this.dev = device this.iface = iface this.altIface = alt return device } } } } return undefined } }) this.dev = await usb.requestDevice({ filters: [{}] }) this.log("connect device: " + this.dev.manufacturerName + " " + this.dev.productName) await this.dev.open() await this.dev.selectConfiguration(1) if (this.altIface.endpoints.length) { this.epIn = this.altIface.endpoints.filter(e => e.direction == "in")[0] this.epOut = this.altIface.endpoints.filter(e => e.direction == "out")[0] U.assert(this.epIn.packetSize == 64); U.assert(this.epOut.packetSize == 64); } this.log("claim interface") await this.dev.claimInterface(this.iface.interfaceNumber) this.log("all connected") this.ready = true this.readLoop() } } export class Proto { eventHandlers: U.SMap<(buf: Uint8Array) => void> = {} msgs = new U.PromiseBuffer<Uint8Array>() cmdSeq = (Math.random() * 0xffff) | 0; private lock = new U.PromiseQueue(); constructor(public io: Transport) { let frames: Uint8Array[] = [] io.onData = buf => { let tp = buf[0] & HF2_FLAG_MASK let len = buf[0] & 63 //console.log(`msg tp=${tp} len=${len}`) let frame = new Uint8Array(len) U.memcpy(frame, 0, buf, 1, len) if (tp & HF2_FLAG_SERIAL_OUT) { this.onSerial(frame, tp == HF2_FLAG_SERIAL_ERR) return } frames.push(frame) if (tp == HF2_FLAG_CMDPKT_BODY) { return } else { U.assert(tp == HF2_FLAG_CMDPKT_LAST) let total = 0 for (let f of frames) total += f.length let r = new Uint8Array(total) let ptr = 0 for (let f of frames) { U.memcpy(r, ptr, f) ptr += f.length } frames = [] if (r[2] & HF2_STATUS_EVENT) { // asynchronous event this.handleEvent(r) } else { this.msgs.push(r) } } } } error(m: string) { return this.io.error(m) } talkAsync(cmd: number, data?: Uint8Array) { let len = 8 if (data) len += data.length let pkt = new Uint8Array(len) let seq = ++this.cmdSeq & 0xffff U.write32(pkt, 0, cmd); U.write16(pkt, 4, seq); U.write16(pkt, 6, 0); if (data) U.memcpy(pkt, 8, data, 0, data.length) let numSkipped = 0 let handleReturnAsync = (): Promise<Uint8Array> => this.msgs.shiftAsync(1000) // we wait up to a second .then(res => { if (U.read16(res, 0) != seq) { if (numSkipped < 3) { numSkipped++ this.io.log(`message out of sync, (${seq} vs ${U.read16(res, 0)}); will re-try`) return handleReturnAsync() } this.error("out of sync") } let info = "" if (res[3]) info = "; info=" + res[3] switch (res[2]) { case HF2_STATUS_OK: return res.slice(4) case HF2_STATUS_INVALID_CMD: this.error("invalid command" + info) break case HF2_STATUS_EXEC_ERR: this.error("execution error" + info) break default: this.error("error " + res[2] + info) break } return null }) return this.lock.enqueue("talk", () => this.sendMsgAsync(pkt) .then(handleReturnAsync)) } private sendMsgAsync(buf: Uint8Array, serial: number = 0) { // Util.assert(buf.length <= this.maxMsgSize) let frame = new Uint8Array(64) let loop = (pos: number): Promise<void> => { let len = buf.length - pos if (len <= 0) return Promise.resolve() if (len > 63) { len = 63 frame[0] = HF2_FLAG_CMDPKT_BODY; } else { frame[0] = HF2_FLAG_CMDPKT_LAST; } if (serial) frame[0] = serial == 1 ? HF2_FLAG_SERIAL_OUT : HF2_FLAG_SERIAL_ERR; frame[0] |= len; for (let i = 0; i < len; ++i) frame[i + 1] = buf[pos + i] return this.io.sendPacketAsync(frame) .then(() => loop(pos + len)) } return loop(0) } onEvent(id: number, f: (buf: Uint8Array) => void) { U.assert(!!(id & HF2_EV_MASK)) this.eventHandlers[id + ""] = f } onJDMessage(f: (buf: Uint8Array) => void) { this.talkAsync(HF2_CMD_JDS_CONFIG, U.encodeU32LE([1])) this.onEvent(HF2_EV_JDS_PACKET, f) } sendJDMessageAsync(buf: Uint8Array) { return this.talkAsync(HF2_CMD_JDS_SEND, buf) } handleEvent(buf: Uint8Array) { let evid = U.read32(buf, 0) let f = this.eventHandlers[evid + ""] if (f) { f(buf.slice(4)) } else { this.io.log("unhandled event: " + evid.toString(16)) } } onSerial(data: Uint8Array, iserr: boolean) { console.log("SERIAL:", U.bufferToString(data)) } async init() { await this.io.init() const buf = await this.talkAsync(HF2_CMD_INFO) this.io.log("Connected to: " + U.bufferToString(buf)) } }
{ this.ready = false if (!this.dev) return Promise.resolve() this.log("close device") return this.dev.close() .catch(e => { // just ignore errors closing, most likely device just disconnected }) .then(() => { this.clearDev() return U.delay(500) }) }
identifier_body
hf2.ts
import * as webusb from "webusb" import * as U from "./pxtutils" const controlTransferGetReport = 0x01; const controlTransferSetReport = 0x09; const controlTransferOutReport = 0x200; const controlTransferInReport = 0x100; // see https://github.com/microsoft/uf2/blob/master/hf2.md for full spec export const HF2_CMD_BININFO = 0x0001 // no arguments export const HF2_MODE_BOOTLOADER = 0x01 export const HF2_MODE_USERSPACE = 0x02 /* struct HF2_BININFO_Result { uint32_t mode; uint32_t flash_page_size; uint32_t flash_num_pages; uint32_t max_message_size; }; */ export const HF2_CMD_INFO = 0x0002 // no arguments // results is utf8 character array export const HF2_CMD_RESET_INTO_APP = 0x0003// no arguments, no result export const HF2_CMD_RESET_INTO_BOOTLOADER = 0x0004 // no arguments, no result export const HF2_CMD_START_FLASH = 0x0005 // no arguments, no result export const HF2_CMD_WRITE_FLASH_PAGE = 0x0006 /* struct HF2_WRITE_FLASH_PAGE_Command { uint32_t target_addr; uint32_t data[flash_page_size]; }; */ // no result export const HF2_CMD_CHKSUM_PAGES = 0x0007 /* struct HF2_CHKSUM_PAGES_Command { uint32_t target_addr; uint32_t num_pages; }; struct HF2_CHKSUM_PAGES_Result { uint16_t chksums[num_pages]; }; */ export const HF2_CMD_READ_WORDS = 0x0008 /* struct HF2_READ_WORDS_Command { uint32_t target_addr; uint32_t num_words; }; struct HF2_READ_WORDS_Result { uint32_t words[num_words]; }; */ export const HF2_CMD_WRITE_WORDS = 0x0009 /* struct HF2_WRITE_WORDS_Command { uint32_t target_addr; uint32_t num_words; uint32_t words[num_words]; }; */ // no result export const HF2_CMD_DMESG = 0x0010 // no arguments // results is utf8 character array export const HF2_FLAG_SERIAL_OUT = 0x80 export const HF2_FLAG_SERIAL_ERR = 0xC0 export const HF2_FLAG_CMDPKT_LAST = 0x40 export const HF2_FLAG_CMDPKT_BODY = 0x00 export const HF2_FLAG_MASK = 0xC0 export const HF2_SIZE_MASK = 63 export const HF2_STATUS_OK = 0x00 export const HF2_STATUS_INVALID_CMD = 0x01 export const HF2_STATUS_EXEC_ERR = 0x02 export const HF2_STATUS_EVENT = 0x80 // the eventId is overlayed on the tag+status; the mask corresponds // to the HF2_STATUS_EVENT above export const HF2_EV_MASK = 0x800000 export const HF2_CMD_JDS_CONFIG = 0x0020 export const HF2_CMD_JDS_SEND = 0x0021 export const HF2_EV_JDS_PACKET = 0x800020 export class Transport { dev: USBDevice; iface: USBInterface; altIface: USBAlternateInterface; epIn: USBEndpoint; epOut: USBEndpoint; readLoopStarted = false; ready = false; onData = (v: Uint8Array) => { }; onError = (e: Error) => { console.error("HF2 error: " + (e ? e.stack : e)) }; log(msg: string, v?: any) { if (v != undefined) console.log("HF2: " + msg, v) else console.log("HF2: " + msg) } private clearDev() { if (this.dev) { this.dev = null this.epIn = null this.epOut = null } } disconnectAsync() { this.ready = false if (!this.dev) return Promise.resolve() this.log("close device") return this.dev.close() .catch(e => { // just ignore errors closing, most likely device just disconnected }) .then(() => { this.clearDev() return U.delay(500) }) } private recvPacketAsync(): Promise<Uint8Array> { let final = (res: USBInTransferResult) => { if (res.status != "ok") this.error("USB IN transfer failed") let arr = new Uint8Array(res.data.buffer) if (arr.length == 0) return this.recvPacketAsync() return arr } if (!this.dev) return Promise.reject(new Error("Disconnected")) if (!this.epIn) { return this.dev.controlTransferIn({ requestType: "class", recipient: "interface", request: controlTransferGetReport, value: controlTransferInReport, index: this.iface.interfaceNumber }, 64).then(final) } return this.dev.transferIn(this.epIn.endpointNumber, 64) .then(final) } error(msg: string) { throw new Error(`USB error on device ${this.dev ? this.dev.productName : "n/a"} (${msg})`) } private async readLoop() { if (this.readLoopStarted) return this.readLoopStarted = true this.log("start read loop") while (true) { if (!this.ready) { break //await U.delay(300) //continue } try { const buf = await this.recvPacketAsync() if (buf[0]) { // we've got data; retry reading immedietly after processing it this.onData(buf) } else { // throttle down if no data coming await U.delay(5) } } catch (err) { if (this.dev) this.onError(err) await U.delay(300) } } } sendPacketAsync(pkt: Uint8Array) { if (!this.dev) return Promise.reject(new Error("Disconnected")) U.assert(pkt.length <= 64) if (!this.epOut) { return this.dev.controlTransferOut({ requestType: "class", recipient: "interface", request: controlTransferSetReport, value: controlTransferOutReport, index: this.iface.interfaceNumber }, pkt).then(res => { if (res.status != "ok") this.error("USB CTRL OUT transfer failed") }) } return this.dev.transferOut(this.epOut.endpointNumber, pkt) .then(res => { if (res.status != "ok") this.error("USB OUT transfer failed") }) } async init() { const usb = new webusb.USB({ devicesFound: async devices => { for (const device of devices) { if (device.deviceVersionMajor == 42) { for (const iface of device.configuration.interfaces) { const alt = iface.alternates[0] if (alt.interfaceClass == 0xff && alt.interfaceSubclass == 42) { this.dev = device this.iface = iface this.altIface = alt return device } } } } return undefined } }) this.dev = await usb.requestDevice({ filters: [{}] }) this.log("connect device: " + this.dev.manufacturerName + " " + this.dev.productName) await this.dev.open() await this.dev.selectConfiguration(1) if (this.altIface.endpoints.length) { this.epIn = this.altIface.endpoints.filter(e => e.direction == "in")[0] this.epOut = this.altIface.endpoints.filter(e => e.direction == "out")[0] U.assert(this.epIn.packetSize == 64); U.assert(this.epOut.packetSize == 64); } this.log("claim interface") await this.dev.claimInterface(this.iface.interfaceNumber) this.log("all connected") this.ready = true this.readLoop() } } export class Proto { eventHandlers: U.SMap<(buf: Uint8Array) => void> = {} msgs = new U.PromiseBuffer<Uint8Array>() cmdSeq = (Math.random() * 0xffff) | 0; private lock = new U.PromiseQueue(); constructor(public io: Transport) { let frames: Uint8Array[] = [] io.onData = buf => { let tp = buf[0] & HF2_FLAG_MASK let len = buf[0] & 63 //console.log(`msg tp=${tp} len=${len}`) let frame = new Uint8Array(len) U.memcpy(frame, 0, buf, 1, len) if (tp & HF2_FLAG_SERIAL_OUT) { this.onSerial(frame, tp == HF2_FLAG_SERIAL_ERR) return } frames.push(frame) if (tp == HF2_FLAG_CMDPKT_BODY) { return } else { U.assert(tp == HF2_FLAG_CMDPKT_LAST) let total = 0 for (let f of frames) total += f.length let r = new Uint8Array(total) let ptr = 0 for (let f of frames) { U.memcpy(r, ptr, f) ptr += f.length } frames = [] if (r[2] & HF2_STATUS_EVENT) { // asynchronous event this.handleEvent(r) } else
} } } error(m: string) { return this.io.error(m) } talkAsync(cmd: number, data?: Uint8Array) { let len = 8 if (data) len += data.length let pkt = new Uint8Array(len) let seq = ++this.cmdSeq & 0xffff U.write32(pkt, 0, cmd); U.write16(pkt, 4, seq); U.write16(pkt, 6, 0); if (data) U.memcpy(pkt, 8, data, 0, data.length) let numSkipped = 0 let handleReturnAsync = (): Promise<Uint8Array> => this.msgs.shiftAsync(1000) // we wait up to a second .then(res => { if (U.read16(res, 0) != seq) { if (numSkipped < 3) { numSkipped++ this.io.log(`message out of sync, (${seq} vs ${U.read16(res, 0)}); will re-try`) return handleReturnAsync() } this.error("out of sync") } let info = "" if (res[3]) info = "; info=" + res[3] switch (res[2]) { case HF2_STATUS_OK: return res.slice(4) case HF2_STATUS_INVALID_CMD: this.error("invalid command" + info) break case HF2_STATUS_EXEC_ERR: this.error("execution error" + info) break default: this.error("error " + res[2] + info) break } return null }) return this.lock.enqueue("talk", () => this.sendMsgAsync(pkt) .then(handleReturnAsync)) } private sendMsgAsync(buf: Uint8Array, serial: number = 0) { // Util.assert(buf.length <= this.maxMsgSize) let frame = new Uint8Array(64) let loop = (pos: number): Promise<void> => { let len = buf.length - pos if (len <= 0) return Promise.resolve() if (len > 63) { len = 63 frame[0] = HF2_FLAG_CMDPKT_BODY; } else { frame[0] = HF2_FLAG_CMDPKT_LAST; } if (serial) frame[0] = serial == 1 ? HF2_FLAG_SERIAL_OUT : HF2_FLAG_SERIAL_ERR; frame[0] |= len; for (let i = 0; i < len; ++i) frame[i + 1] = buf[pos + i] return this.io.sendPacketAsync(frame) .then(() => loop(pos + len)) } return loop(0) } onEvent(id: number, f: (buf: Uint8Array) => void) { U.assert(!!(id & HF2_EV_MASK)) this.eventHandlers[id + ""] = f } onJDMessage(f: (buf: Uint8Array) => void) { this.talkAsync(HF2_CMD_JDS_CONFIG, U.encodeU32LE([1])) this.onEvent(HF2_EV_JDS_PACKET, f) } sendJDMessageAsync(buf: Uint8Array) { return this.talkAsync(HF2_CMD_JDS_SEND, buf) } handleEvent(buf: Uint8Array) { let evid = U.read32(buf, 0) let f = this.eventHandlers[evid + ""] if (f) { f(buf.slice(4)) } else { this.io.log("unhandled event: " + evid.toString(16)) } } onSerial(data: Uint8Array, iserr: boolean) { console.log("SERIAL:", U.bufferToString(data)) } async init() { await this.io.init() const buf = await this.talkAsync(HF2_CMD_INFO) this.io.log("Connected to: " + U.bufferToString(buf)) } }
{ this.msgs.push(r) }
conditional_block
hf2.ts
import * as webusb from "webusb" import * as U from "./pxtutils" const controlTransferGetReport = 0x01; const controlTransferSetReport = 0x09; const controlTransferOutReport = 0x200; const controlTransferInReport = 0x100; // see https://github.com/microsoft/uf2/blob/master/hf2.md for full spec export const HF2_CMD_BININFO = 0x0001 // no arguments export const HF2_MODE_BOOTLOADER = 0x01 export const HF2_MODE_USERSPACE = 0x02 /* struct HF2_BININFO_Result { uint32_t mode; uint32_t flash_page_size; uint32_t flash_num_pages; uint32_t max_message_size; }; */ export const HF2_CMD_INFO = 0x0002 // no arguments // results is utf8 character array export const HF2_CMD_RESET_INTO_APP = 0x0003// no arguments, no result export const HF2_CMD_RESET_INTO_BOOTLOADER = 0x0004 // no arguments, no result export const HF2_CMD_START_FLASH = 0x0005 // no arguments, no result export const HF2_CMD_WRITE_FLASH_PAGE = 0x0006 /* struct HF2_WRITE_FLASH_PAGE_Command { uint32_t target_addr; uint32_t data[flash_page_size]; }; */ // no result export const HF2_CMD_CHKSUM_PAGES = 0x0007 /* struct HF2_CHKSUM_PAGES_Command { uint32_t target_addr; uint32_t num_pages; }; struct HF2_CHKSUM_PAGES_Result { uint16_t chksums[num_pages]; }; */ export const HF2_CMD_READ_WORDS = 0x0008 /* struct HF2_READ_WORDS_Command { uint32_t target_addr; uint32_t num_words; }; struct HF2_READ_WORDS_Result { uint32_t words[num_words]; }; */ export const HF2_CMD_WRITE_WORDS = 0x0009 /* struct HF2_WRITE_WORDS_Command { uint32_t target_addr; uint32_t num_words; uint32_t words[num_words]; }; */ // no result export const HF2_CMD_DMESG = 0x0010 // no arguments // results is utf8 character array export const HF2_FLAG_SERIAL_OUT = 0x80 export const HF2_FLAG_SERIAL_ERR = 0xC0 export const HF2_FLAG_CMDPKT_LAST = 0x40 export const HF2_FLAG_CMDPKT_BODY = 0x00 export const HF2_FLAG_MASK = 0xC0 export const HF2_SIZE_MASK = 63 export const HF2_STATUS_OK = 0x00 export const HF2_STATUS_INVALID_CMD = 0x01 export const HF2_STATUS_EXEC_ERR = 0x02 export const HF2_STATUS_EVENT = 0x80 // the eventId is overlayed on the tag+status; the mask corresponds // to the HF2_STATUS_EVENT above export const HF2_EV_MASK = 0x800000 export const HF2_CMD_JDS_CONFIG = 0x0020 export const HF2_CMD_JDS_SEND = 0x0021 export const HF2_EV_JDS_PACKET = 0x800020 export class Transport { dev: USBDevice; iface: USBInterface; altIface: USBAlternateInterface; epIn: USBEndpoint; epOut: USBEndpoint; readLoopStarted = false; ready = false; onData = (v: Uint8Array) => { }; onError = (e: Error) => { console.error("HF2 error: " + (e ? e.stack : e)) }; log(msg: string, v?: any) { if (v != undefined) console.log("HF2: " + msg, v) else console.log("HF2: " + msg) } private clearDev() { if (this.dev) { this.dev = null this.epIn = null this.epOut = null } } disconnectAsync() { this.ready = false if (!this.dev) return Promise.resolve() this.log("close device") return this.dev.close() .catch(e => { // just ignore errors closing, most likely device just disconnected }) .then(() => { this.clearDev() return U.delay(500) }) } private recvPacketAsync(): Promise<Uint8Array> { let final = (res: USBInTransferResult) => { if (res.status != "ok") this.error("USB IN transfer failed") let arr = new Uint8Array(res.data.buffer) if (arr.length == 0) return this.recvPacketAsync() return arr } if (!this.dev) return Promise.reject(new Error("Disconnected")) if (!this.epIn) { return this.dev.controlTransferIn({ requestType: "class", recipient: "interface", request: controlTransferGetReport, value: controlTransferInReport, index: this.iface.interfaceNumber }, 64).then(final) } return this.dev.transferIn(this.epIn.endpointNumber, 64) .then(final) }
(msg: string) { throw new Error(`USB error on device ${this.dev ? this.dev.productName : "n/a"} (${msg})`) } private async readLoop() { if (this.readLoopStarted) return this.readLoopStarted = true this.log("start read loop") while (true) { if (!this.ready) { break //await U.delay(300) //continue } try { const buf = await this.recvPacketAsync() if (buf[0]) { // we've got data; retry reading immedietly after processing it this.onData(buf) } else { // throttle down if no data coming await U.delay(5) } } catch (err) { if (this.dev) this.onError(err) await U.delay(300) } } } sendPacketAsync(pkt: Uint8Array) { if (!this.dev) return Promise.reject(new Error("Disconnected")) U.assert(pkt.length <= 64) if (!this.epOut) { return this.dev.controlTransferOut({ requestType: "class", recipient: "interface", request: controlTransferSetReport, value: controlTransferOutReport, index: this.iface.interfaceNumber }, pkt).then(res => { if (res.status != "ok") this.error("USB CTRL OUT transfer failed") }) } return this.dev.transferOut(this.epOut.endpointNumber, pkt) .then(res => { if (res.status != "ok") this.error("USB OUT transfer failed") }) } async init() { const usb = new webusb.USB({ devicesFound: async devices => { for (const device of devices) { if (device.deviceVersionMajor == 42) { for (const iface of device.configuration.interfaces) { const alt = iface.alternates[0] if (alt.interfaceClass == 0xff && alt.interfaceSubclass == 42) { this.dev = device this.iface = iface this.altIface = alt return device } } } } return undefined } }) this.dev = await usb.requestDevice({ filters: [{}] }) this.log("connect device: " + this.dev.manufacturerName + " " + this.dev.productName) await this.dev.open() await this.dev.selectConfiguration(1) if (this.altIface.endpoints.length) { this.epIn = this.altIface.endpoints.filter(e => e.direction == "in")[0] this.epOut = this.altIface.endpoints.filter(e => e.direction == "out")[0] U.assert(this.epIn.packetSize == 64); U.assert(this.epOut.packetSize == 64); } this.log("claim interface") await this.dev.claimInterface(this.iface.interfaceNumber) this.log("all connected") this.ready = true this.readLoop() } } export class Proto { eventHandlers: U.SMap<(buf: Uint8Array) => void> = {} msgs = new U.PromiseBuffer<Uint8Array>() cmdSeq = (Math.random() * 0xffff) | 0; private lock = new U.PromiseQueue(); constructor(public io: Transport) { let frames: Uint8Array[] = [] io.onData = buf => { let tp = buf[0] & HF2_FLAG_MASK let len = buf[0] & 63 //console.log(`msg tp=${tp} len=${len}`) let frame = new Uint8Array(len) U.memcpy(frame, 0, buf, 1, len) if (tp & HF2_FLAG_SERIAL_OUT) { this.onSerial(frame, tp == HF2_FLAG_SERIAL_ERR) return } frames.push(frame) if (tp == HF2_FLAG_CMDPKT_BODY) { return } else { U.assert(tp == HF2_FLAG_CMDPKT_LAST) let total = 0 for (let f of frames) total += f.length let r = new Uint8Array(total) let ptr = 0 for (let f of frames) { U.memcpy(r, ptr, f) ptr += f.length } frames = [] if (r[2] & HF2_STATUS_EVENT) { // asynchronous event this.handleEvent(r) } else { this.msgs.push(r) } } } } error(m: string) { return this.io.error(m) } talkAsync(cmd: number, data?: Uint8Array) { let len = 8 if (data) len += data.length let pkt = new Uint8Array(len) let seq = ++this.cmdSeq & 0xffff U.write32(pkt, 0, cmd); U.write16(pkt, 4, seq); U.write16(pkt, 6, 0); if (data) U.memcpy(pkt, 8, data, 0, data.length) let numSkipped = 0 let handleReturnAsync = (): Promise<Uint8Array> => this.msgs.shiftAsync(1000) // we wait up to a second .then(res => { if (U.read16(res, 0) != seq) { if (numSkipped < 3) { numSkipped++ this.io.log(`message out of sync, (${seq} vs ${U.read16(res, 0)}); will re-try`) return handleReturnAsync() } this.error("out of sync") } let info = "" if (res[3]) info = "; info=" + res[3] switch (res[2]) { case HF2_STATUS_OK: return res.slice(4) case HF2_STATUS_INVALID_CMD: this.error("invalid command" + info) break case HF2_STATUS_EXEC_ERR: this.error("execution error" + info) break default: this.error("error " + res[2] + info) break } return null }) return this.lock.enqueue("talk", () => this.sendMsgAsync(pkt) .then(handleReturnAsync)) } private sendMsgAsync(buf: Uint8Array, serial: number = 0) { // Util.assert(buf.length <= this.maxMsgSize) let frame = new Uint8Array(64) let loop = (pos: number): Promise<void> => { let len = buf.length - pos if (len <= 0) return Promise.resolve() if (len > 63) { len = 63 frame[0] = HF2_FLAG_CMDPKT_BODY; } else { frame[0] = HF2_FLAG_CMDPKT_LAST; } if (serial) frame[0] = serial == 1 ? HF2_FLAG_SERIAL_OUT : HF2_FLAG_SERIAL_ERR; frame[0] |= len; for (let i = 0; i < len; ++i) frame[i + 1] = buf[pos + i] return this.io.sendPacketAsync(frame) .then(() => loop(pos + len)) } return loop(0) } onEvent(id: number, f: (buf: Uint8Array) => void) { U.assert(!!(id & HF2_EV_MASK)) this.eventHandlers[id + ""] = f } onJDMessage(f: (buf: Uint8Array) => void) { this.talkAsync(HF2_CMD_JDS_CONFIG, U.encodeU32LE([1])) this.onEvent(HF2_EV_JDS_PACKET, f) } sendJDMessageAsync(buf: Uint8Array) { return this.talkAsync(HF2_CMD_JDS_SEND, buf) } handleEvent(buf: Uint8Array) { let evid = U.read32(buf, 0) let f = this.eventHandlers[evid + ""] if (f) { f(buf.slice(4)) } else { this.io.log("unhandled event: " + evid.toString(16)) } } onSerial(data: Uint8Array, iserr: boolean) { console.log("SERIAL:", U.bufferToString(data)) } async init() { await this.io.init() const buf = await this.talkAsync(HF2_CMD_INFO) this.io.log("Connected to: " + U.bufferToString(buf)) } }
error
identifier_name
lib.rs
//! # MCAI Worker SDK //! //! This library is an SDK to communicate via message broker with [StepFlow](https://hexdocs.pm/step_flow/readme.html). //! It's used for every worker as an abstraction. //! It manage itself requirements, message parsing, direct messaging. //! //! ## Worker implementation //! //! 1. Create a Rust project //! 2. Add MCAI Worker SDK as a dependency in Cargo.toml: `mcai_worker_sdk = "^1.0"` //! 1. Update the main file with the example provided here to implement [MessageEvent](trait.MessageEvent.html) trait, //! and call the [`start_worker`](fn.start_worker.html) to start the worker itself. //! //! ```rust //! use mcai_worker_sdk::{ //! MessageEvent, //! Version, //! worker::Parameter, //! }; //! use serde_derive::Deserialize; //! use schemars::JsonSchema; //! //! #[derive(Debug)] //! struct WorkerNameEvent {} //! //! #[derive(Debug, Deserialize, JsonSchema)] //! struct WorkerParameters {} //! //! impl MessageEvent<WorkerParameters> for WorkerNameEvent { //! fn get_name(&self) -> String {"sample_worker".to_string()} //! fn get_short_description(&self) -> String {"Short description".to_string()} //! fn get_description(&self) -> String {"Long description".to_string()} //! fn get_version(&self) -> Version { Version::new(0, 0, 1) } //! } //! static WORKER_NAME_EVENT: WorkerNameEvent = WorkerNameEvent {}; //! //! // uncomment it to start the worker //! // fn main() { //! // mcai_worker_sdk::start_worker(&WORKER_NAME_EVENT); //! // } //! ``` //! //! ## Runtime configuration //! //! ### AMQP connection //! //! | Variable | Description | //! |-----------------|-------------| //! | `AMQP_HOSTNAME` | IP or host of AMQP server (default: `localhost`) | //! | `AMQP_PORT` | AMQP server port (default: `5672`) | //! | `AMQP_TLS` | enable secure connection using AMQPS (default: `false`, enable with `true` or `1` or `TRUE` or `True`) | //! | `AMQP_USERNAME` | Username used to connect to AMQP server (default: `guest`) | //! | `AMQP_PASSWORD` | Password used to connect to AMQP server (default: `guest`) | //! | `AMQP_VHOST` | AMQP virtual host (default: `/`) | //! | `AMQP_QUEUE` | AMQP queue name used to receive job orders (default: `job_undefined`) | //! //! ### Vault connection //! //! | Variable | Description | //! |--------------------|-------------| //! | `BACKEND_HOSTNAME` | URL used to connect to backend server (default: `http://127.0.0.1:4000/api`) | //! | `BACKEND_USERNAME` | Username used to connect to backend server | //! | `BACKEND_PASSWORD` | Password used to connect to backend server | //! //! ## Start worker locally //! //! MCAI Worker SDK can be launched locally - without RabbitMQ. //! It can process some message for different purpose (functional tests, message order examples, etc.). //! //! To start worker in this mode, setup the environment variable `SOURCE_ORDERS` with path(s) to json orders. //! It can take multiple orders, joined with `:` on unix platform, `;` on windows os. //! //! ### Examples: //! //! ```bash //! RUST_LOG=info SOURCE_ORDERS=./examples/success_order.json:./examples/error_order.json cargo run --example worker //! ``` #[macro_use] extern crate log; #[macro_use] extern crate serde_derive; #[macro_use] extern crate serde_json; #[cfg(feature = "media")] #[macro_use] extern crate yaserde_derive; mod channels; mod config; mod error; pub mod job; pub mod message; pub mod parameter; pub mod worker; /// Re-export from lapin Channel pub use lapin::Channel; pub use log::{debug, error, info, trace, warn}; pub use schemars::JsonSchema; /// Re-export from semver: pub use semver::Version; pub use error::{MessageError, Result}; #[cfg(feature = "media")] pub use message::media::{ audio::AudioFormat, ebu_ttml_live::{ Body, Div, EbuTtmlLive, Frames, Head, Paragraph, Span, Styling, TimeExpression, TimeUnit, Title, }, filters::{AudioFilter, GenericFilter, VideoFilter}, video::{RegionOfInterest, Scaling, VideoFormat}, StreamDescriptor, }; pub use message::publish_job_progression; pub use parameter::container::ParametersContainer; pub use parameter::{Parameter, ParameterValue, Requirement}; #[cfg(feature = "media")] pub use stainless_ffmpeg::{format_context::FormatContext, frame::Frame}; use crate::worker::docker; use chrono::prelude::*; use config::*; use env_logger::Builder; use futures_executor::LocalPool; use futures_util::{future::FutureExt, stream::StreamExt, task::LocalSpawnExt}; use job::JobResult; use lapin::{options::*, types::FieldTable, Connection, ConnectionProperties}; use serde::de::DeserializeOwned; #[cfg(feature = "media")] use serde::Serialize; use std::str::FromStr; #[cfg(feature = "media")] use std::sync::{mpsc::Sender, Mutex}; use std::{cell::RefCell, fs, io::Write, rc::Rc, sync::Arc, thread, time}; #[cfg(feature = "media")] use yaserde::YaSerialize; /// Exposed Channel type pub type McaiChannel = Arc<Channel>; #[cfg(feature = "media")] #[derive(Debug)] pub struct ProcessResult { end_of_process: bool, json_content: Option<String>, xml_content: Option<String>, } #[cfg(feature = "media")] impl ProcessResult { pub fn empty() -> Self { ProcessResult { end_of_process: false, json_content: None, xml_content: None, } } pub fn end_of_process() -> Self { ProcessResult { end_of_process: true, json_content: None, xml_content: None, } } pub fn new_json<S: Serialize>(content: S) -> Self { let content = serde_json::to_string(&content).unwrap(); ProcessResult { end_of_process: false, json_content: Some(content), xml_content: None, } } pub fn new_xml<Y: YaSerialize>(content: Y) -> Self { let content = yaserde::ser::to_string(&content).unwrap(); ProcessResult { end_of_process: false, json_content: None, xml_content: Some(content), } } } #[cfg(feature = "media")] pub enum ProcessFrame { AudioVideo(Frame), EbuTtmlLive(Box<EbuTtmlLive>), Data(Vec<u8>), } #[cfg(feature = "media")] impl ProcessFrame { pub fn get_pts(&self) -> i64 { match self { ProcessFrame::AudioVideo(frame) => frame.get_pts(), ProcessFrame::EbuTtmlLive(_) | ProcessFrame::Data(_) => { // improvement: support pts to terminate 0 } } } } /// # Trait to describe a worker /// Implement this trait to implement a worker pub trait MessageEvent<P: DeserializeOwned + JsonSchema> { fn get_name(&self) -> String; fn get_short_description(&self) -> String; fn get_description(&self) -> String; fn get_version(&self) -> semver::Version; fn init(&mut self) -> Result<()> { Ok(()) } #[cfg(feature = "media")] fn init_process( &mut self, _parameters: P, _format_context: Arc<Mutex<FormatContext>>, _response_sender: Arc<Mutex<Sender<ProcessResult>>>, ) -> Result<Vec<StreamDescriptor>> { Ok(vec![]) } #[cfg(feature = "media")] fn process_frame( &mut self, _job_result: JobResult, _stream_index: usize, _frame: ProcessFrame, ) -> Result<ProcessResult> { Err(MessageError::NotImplemented()) } #[cfg(feature = "media")] fn ending_process(&mut self) -> Result<()> { Ok(()) } /// Not called when the "media" feature is enabled fn process( &self, _channel: Option<McaiChannel>, _parameters: P, _job_result: JobResult, ) -> Result<JobResult> where Self: std::marker::Sized, { Err(MessageError::NotImplemented()) } } /// Function to start a worker pub fn start_worker<P: DeserializeOwned + JsonSchema, ME: MessageEvent<P>>(mut message_event: ME) where ME: std::marker::Sync, { let mut builder = Builder::from_default_env(); let amqp_queue = get_amqp_queue(); let instance_id = docker::get_instance_id("/proc/self/cgroup"); let container_id = instance_id.clone(); builder .format(move |stream, record| { writeln!( stream, "{} - {} - {} - {} - {} - {}", Utc::now(), &container_id, get_amqp_queue(), record.target().parse::<i64>().unwrap_or(-1), record.level(), record.args(), ) }) .init(); let worker_configuration = worker::WorkerConfiguration::new(&amqp_queue, &message_event, &instance_id); if let Err(configuration_error) = worker_configuration { error!("{:?}", configuration_error); return; } let worker_configuration = worker_configuration.unwrap(); info!( "Worker: {}, version: {} (MCAI Worker SDK {})", worker_configuration.get_worker_name(), worker_configuration.get_worker_version(), worker_configuration.get_sdk_version(), ); if let Ok(enabled) = std::env::var("DESCRIBE") { if enabled == "1" || bool::from_str(&enabled.to_lowercase()).unwrap_or(false) { match serde_json::to_string_pretty(&worker_configuration) { Ok(serialized_configuration) => { println!("{}", serialized_configuration); return; } Err(error) => error!("Could not serialize worker configuration: {:?}", error), } } } if let Err(message) = message_event.init() { error!("{:?}", message); return; } let message_event_ref = Rc::new(RefCell::new(message_event)); info!("Worker initialized, ready to receive jobs"); if let Some(source_orders) = get_source_orders() { warn!("Worker will process source orders"); for source_order in &source_orders { info!("Start to process order: {:?}", source_order); let count = None; let channel = None; let message_data = fs::read_to_string(source_order).unwrap(); let result = message::parse_and_process_message( message_event_ref.clone(), &message_data, count, channel, message::publish_job_progression, ); match result { Ok(mut job_result) =>
Err(message) => { error!("{:?}", message); } } } return; } loop { let amqp_uri = get_amqp_uri(); let mut executor = LocalPool::new(); let spawner = executor.spawner(); executor.run_until(async { let conn = Connection::connect_uri( amqp_uri, ConnectionProperties::default().with_default_executor(8), ) .wait() .unwrap(); info!("Connected"); let channel = Arc::new(channels::declare_consumer_channel( &conn, &worker_configuration, )); let consumer = channel .clone() .basic_consume( &amqp_queue, "amqp_worker", BasicConsumeOptions::default(), FieldTable::default(), ) .await .unwrap(); let status_consumer = channel .clone() .basic_consume( &worker_configuration.get_direct_messaging_queue_name(), "status_amqp_worker", BasicConsumeOptions::default(), FieldTable::default(), ) .await .unwrap(); let status_response_channel = channel.clone(); let status_worker_configuration = worker_configuration.clone(); let _consumer = spawner.spawn_local(async move { status_consumer .for_each(move |delivery| { let (_channel, delivery) = delivery.expect("error caught in in consumer"); worker::system_information::send_real_time_information( delivery, &status_response_channel, &status_worker_configuration, ) .map(|_| ()) }) .await }); info!("Start to consume on queue {:?}", amqp_queue); let clone_channel = channel.clone(); let message_event = message_event_ref.clone(); consumer .for_each(move |delivery| { let (_channel, delivery) = delivery.expect("error caught in in consumer"); message::process_message(message_event.clone(), delivery, clone_channel.clone()) .map(|_| ()) }) .await }); let sleep_duration = time::Duration::new(1, 0); thread::sleep(sleep_duration); info!("Reconnection..."); } } #[test] fn empty_message_event_impl() { #[derive(Debug)] struct CustomEvent {} #[derive(JsonSchema, Deserialize)] struct CustomParameters {} impl MessageEvent<CustomParameters> for CustomEvent { fn get_name(&self) -> String { "custom".to_string() } fn get_short_description(&self) -> String { "short description".to_string() } fn get_description(&self) -> String { "long description".to_string() } fn get_version(&self) -> semver::Version { semver::Version::new(1, 2, 3) } } let custom_event = CustomEvent {}; let parameters = CustomParameters {}; let job = job::Job { job_id: 1234, parameters: vec![], }; let job_result = job::JobResult::new(job.job_id); let result = custom_event.process(None, parameters, job_result); assert!(result == Err(MessageError::NotImplemented())); }
{ job_result.update_execution_duration(); info!(target: &job_result.get_job_id().to_string(), "Process succeeded: {:?}", job_result) }
conditional_block
lib.rs
//! # MCAI Worker SDK //! //! This library is an SDK to communicate via message broker with [StepFlow](https://hexdocs.pm/step_flow/readme.html). //! It's used for every worker as an abstraction. //! It manage itself requirements, message parsing, direct messaging. //! //! ## Worker implementation //! //! 1. Create a Rust project //! 2. Add MCAI Worker SDK as a dependency in Cargo.toml: `mcai_worker_sdk = "^1.0"` //! 1. Update the main file with the example provided here to implement [MessageEvent](trait.MessageEvent.html) trait, //! and call the [`start_worker`](fn.start_worker.html) to start the worker itself. //! //! ```rust //! use mcai_worker_sdk::{ //! MessageEvent, //! Version, //! worker::Parameter, //! }; //! use serde_derive::Deserialize; //! use schemars::JsonSchema; //! //! #[derive(Debug)] //! struct WorkerNameEvent {} //! //! #[derive(Debug, Deserialize, JsonSchema)] //! struct WorkerParameters {} //! //! impl MessageEvent<WorkerParameters> for WorkerNameEvent { //! fn get_name(&self) -> String {"sample_worker".to_string()} //! fn get_short_description(&self) -> String {"Short description".to_string()} //! fn get_description(&self) -> String {"Long description".to_string()} //! fn get_version(&self) -> Version { Version::new(0, 0, 1) } //! } //! static WORKER_NAME_EVENT: WorkerNameEvent = WorkerNameEvent {}; //! //! // uncomment it to start the worker //! // fn main() {
//! // } //! ``` //! //! ## Runtime configuration //! //! ### AMQP connection //! //! | Variable | Description | //! |-----------------|-------------| //! | `AMQP_HOSTNAME` | IP or host of AMQP server (default: `localhost`) | //! | `AMQP_PORT` | AMQP server port (default: `5672`) | //! | `AMQP_TLS` | enable secure connection using AMQPS (default: `false`, enable with `true` or `1` or `TRUE` or `True`) | //! | `AMQP_USERNAME` | Username used to connect to AMQP server (default: `guest`) | //! | `AMQP_PASSWORD` | Password used to connect to AMQP server (default: `guest`) | //! | `AMQP_VHOST` | AMQP virtual host (default: `/`) | //! | `AMQP_QUEUE` | AMQP queue name used to receive job orders (default: `job_undefined`) | //! //! ### Vault connection //! //! | Variable | Description | //! |--------------------|-------------| //! | `BACKEND_HOSTNAME` | URL used to connect to backend server (default: `http://127.0.0.1:4000/api`) | //! | `BACKEND_USERNAME` | Username used to connect to backend server | //! | `BACKEND_PASSWORD` | Password used to connect to backend server | //! //! ## Start worker locally //! //! MCAI Worker SDK can be launched locally - without RabbitMQ. //! It can process some message for different purpose (functional tests, message order examples, etc.). //! //! To start worker in this mode, setup the environment variable `SOURCE_ORDERS` with path(s) to json orders. //! It can take multiple orders, joined with `:` on unix platform, `;` on windows os. //! //! ### Examples: //! //! ```bash //! RUST_LOG=info SOURCE_ORDERS=./examples/success_order.json:./examples/error_order.json cargo run --example worker //! ``` #[macro_use] extern crate log; #[macro_use] extern crate serde_derive; #[macro_use] extern crate serde_json; #[cfg(feature = "media")] #[macro_use] extern crate yaserde_derive; mod channels; mod config; mod error; pub mod job; pub mod message; pub mod parameter; pub mod worker; /// Re-export from lapin Channel pub use lapin::Channel; pub use log::{debug, error, info, trace, warn}; pub use schemars::JsonSchema; /// Re-export from semver: pub use semver::Version; pub use error::{MessageError, Result}; #[cfg(feature = "media")] pub use message::media::{ audio::AudioFormat, ebu_ttml_live::{ Body, Div, EbuTtmlLive, Frames, Head, Paragraph, Span, Styling, TimeExpression, TimeUnit, Title, }, filters::{AudioFilter, GenericFilter, VideoFilter}, video::{RegionOfInterest, Scaling, VideoFormat}, StreamDescriptor, }; pub use message::publish_job_progression; pub use parameter::container::ParametersContainer; pub use parameter::{Parameter, ParameterValue, Requirement}; #[cfg(feature = "media")] pub use stainless_ffmpeg::{format_context::FormatContext, frame::Frame}; use crate::worker::docker; use chrono::prelude::*; use config::*; use env_logger::Builder; use futures_executor::LocalPool; use futures_util::{future::FutureExt, stream::StreamExt, task::LocalSpawnExt}; use job::JobResult; use lapin::{options::*, types::FieldTable, Connection, ConnectionProperties}; use serde::de::DeserializeOwned; #[cfg(feature = "media")] use serde::Serialize; use std::str::FromStr; #[cfg(feature = "media")] use std::sync::{mpsc::Sender, Mutex}; use std::{cell::RefCell, fs, io::Write, rc::Rc, sync::Arc, thread, time}; #[cfg(feature = "media")] use yaserde::YaSerialize; /// Exposed Channel type pub type McaiChannel = Arc<Channel>; #[cfg(feature = "media")] #[derive(Debug)] pub struct ProcessResult { end_of_process: bool, json_content: Option<String>, xml_content: Option<String>, } #[cfg(feature = "media")] impl ProcessResult { pub fn empty() -> Self { ProcessResult { end_of_process: false, json_content: None, xml_content: None, } } pub fn end_of_process() -> Self { ProcessResult { end_of_process: true, json_content: None, xml_content: None, } } pub fn new_json<S: Serialize>(content: S) -> Self { let content = serde_json::to_string(&content).unwrap(); ProcessResult { end_of_process: false, json_content: Some(content), xml_content: None, } } pub fn new_xml<Y: YaSerialize>(content: Y) -> Self { let content = yaserde::ser::to_string(&content).unwrap(); ProcessResult { end_of_process: false, json_content: None, xml_content: Some(content), } } } #[cfg(feature = "media")] pub enum ProcessFrame { AudioVideo(Frame), EbuTtmlLive(Box<EbuTtmlLive>), Data(Vec<u8>), } #[cfg(feature = "media")] impl ProcessFrame { pub fn get_pts(&self) -> i64 { match self { ProcessFrame::AudioVideo(frame) => frame.get_pts(), ProcessFrame::EbuTtmlLive(_) | ProcessFrame::Data(_) => { // improvement: support pts to terminate 0 } } } } /// # Trait to describe a worker /// Implement this trait to implement a worker pub trait MessageEvent<P: DeserializeOwned + JsonSchema> { fn get_name(&self) -> String; fn get_short_description(&self) -> String; fn get_description(&self) -> String; fn get_version(&self) -> semver::Version; fn init(&mut self) -> Result<()> { Ok(()) } #[cfg(feature = "media")] fn init_process( &mut self, _parameters: P, _format_context: Arc<Mutex<FormatContext>>, _response_sender: Arc<Mutex<Sender<ProcessResult>>>, ) -> Result<Vec<StreamDescriptor>> { Ok(vec![]) } #[cfg(feature = "media")] fn process_frame( &mut self, _job_result: JobResult, _stream_index: usize, _frame: ProcessFrame, ) -> Result<ProcessResult> { Err(MessageError::NotImplemented()) } #[cfg(feature = "media")] fn ending_process(&mut self) -> Result<()> { Ok(()) } /// Not called when the "media" feature is enabled fn process( &self, _channel: Option<McaiChannel>, _parameters: P, _job_result: JobResult, ) -> Result<JobResult> where Self: std::marker::Sized, { Err(MessageError::NotImplemented()) } } /// Function to start a worker pub fn start_worker<P: DeserializeOwned + JsonSchema, ME: MessageEvent<P>>(mut message_event: ME) where ME: std::marker::Sync, { let mut builder = Builder::from_default_env(); let amqp_queue = get_amqp_queue(); let instance_id = docker::get_instance_id("/proc/self/cgroup"); let container_id = instance_id.clone(); builder .format(move |stream, record| { writeln!( stream, "{} - {} - {} - {} - {} - {}", Utc::now(), &container_id, get_amqp_queue(), record.target().parse::<i64>().unwrap_or(-1), record.level(), record.args(), ) }) .init(); let worker_configuration = worker::WorkerConfiguration::new(&amqp_queue, &message_event, &instance_id); if let Err(configuration_error) = worker_configuration { error!("{:?}", configuration_error); return; } let worker_configuration = worker_configuration.unwrap(); info!( "Worker: {}, version: {} (MCAI Worker SDK {})", worker_configuration.get_worker_name(), worker_configuration.get_worker_version(), worker_configuration.get_sdk_version(), ); if let Ok(enabled) = std::env::var("DESCRIBE") { if enabled == "1" || bool::from_str(&enabled.to_lowercase()).unwrap_or(false) { match serde_json::to_string_pretty(&worker_configuration) { Ok(serialized_configuration) => { println!("{}", serialized_configuration); return; } Err(error) => error!("Could not serialize worker configuration: {:?}", error), } } } if let Err(message) = message_event.init() { error!("{:?}", message); return; } let message_event_ref = Rc::new(RefCell::new(message_event)); info!("Worker initialized, ready to receive jobs"); if let Some(source_orders) = get_source_orders() { warn!("Worker will process source orders"); for source_order in &source_orders { info!("Start to process order: {:?}", source_order); let count = None; let channel = None; let message_data = fs::read_to_string(source_order).unwrap(); let result = message::parse_and_process_message( message_event_ref.clone(), &message_data, count, channel, message::publish_job_progression, ); match result { Ok(mut job_result) => { job_result.update_execution_duration(); info!(target: &job_result.get_job_id().to_string(), "Process succeeded: {:?}", job_result) } Err(message) => { error!("{:?}", message); } } } return; } loop { let amqp_uri = get_amqp_uri(); let mut executor = LocalPool::new(); let spawner = executor.spawner(); executor.run_until(async { let conn = Connection::connect_uri( amqp_uri, ConnectionProperties::default().with_default_executor(8), ) .wait() .unwrap(); info!("Connected"); let channel = Arc::new(channels::declare_consumer_channel( &conn, &worker_configuration, )); let consumer = channel .clone() .basic_consume( &amqp_queue, "amqp_worker", BasicConsumeOptions::default(), FieldTable::default(), ) .await .unwrap(); let status_consumer = channel .clone() .basic_consume( &worker_configuration.get_direct_messaging_queue_name(), "status_amqp_worker", BasicConsumeOptions::default(), FieldTable::default(), ) .await .unwrap(); let status_response_channel = channel.clone(); let status_worker_configuration = worker_configuration.clone(); let _consumer = spawner.spawn_local(async move { status_consumer .for_each(move |delivery| { let (_channel, delivery) = delivery.expect("error caught in in consumer"); worker::system_information::send_real_time_information( delivery, &status_response_channel, &status_worker_configuration, ) .map(|_| ()) }) .await }); info!("Start to consume on queue {:?}", amqp_queue); let clone_channel = channel.clone(); let message_event = message_event_ref.clone(); consumer .for_each(move |delivery| { let (_channel, delivery) = delivery.expect("error caught in in consumer"); message::process_message(message_event.clone(), delivery, clone_channel.clone()) .map(|_| ()) }) .await }); let sleep_duration = time::Duration::new(1, 0); thread::sleep(sleep_duration); info!("Reconnection..."); } } #[test] fn empty_message_event_impl() { #[derive(Debug)] struct CustomEvent {} #[derive(JsonSchema, Deserialize)] struct CustomParameters {} impl MessageEvent<CustomParameters> for CustomEvent { fn get_name(&self) -> String { "custom".to_string() } fn get_short_description(&self) -> String { "short description".to_string() } fn get_description(&self) -> String { "long description".to_string() } fn get_version(&self) -> semver::Version { semver::Version::new(1, 2, 3) } } let custom_event = CustomEvent {}; let parameters = CustomParameters {}; let job = job::Job { job_id: 1234, parameters: vec![], }; let job_result = job::JobResult::new(job.job_id); let result = custom_event.process(None, parameters, job_result); assert!(result == Err(MessageError::NotImplemented())); }
//! // mcai_worker_sdk::start_worker(&WORKER_NAME_EVENT);
random_line_split
lib.rs
//! # MCAI Worker SDK //! //! This library is an SDK to communicate via message broker with [StepFlow](https://hexdocs.pm/step_flow/readme.html). //! It's used for every worker as an abstraction. //! It manage itself requirements, message parsing, direct messaging. //! //! ## Worker implementation //! //! 1. Create a Rust project //! 2. Add MCAI Worker SDK as a dependency in Cargo.toml: `mcai_worker_sdk = "^1.0"` //! 1. Update the main file with the example provided here to implement [MessageEvent](trait.MessageEvent.html) trait, //! and call the [`start_worker`](fn.start_worker.html) to start the worker itself. //! //! ```rust //! use mcai_worker_sdk::{ //! MessageEvent, //! Version, //! worker::Parameter, //! }; //! use serde_derive::Deserialize; //! use schemars::JsonSchema; //! //! #[derive(Debug)] //! struct WorkerNameEvent {} //! //! #[derive(Debug, Deserialize, JsonSchema)] //! struct WorkerParameters {} //! //! impl MessageEvent<WorkerParameters> for WorkerNameEvent { //! fn get_name(&self) -> String {"sample_worker".to_string()} //! fn get_short_description(&self) -> String {"Short description".to_string()} //! fn get_description(&self) -> String {"Long description".to_string()} //! fn get_version(&self) -> Version { Version::new(0, 0, 1) } //! } //! static WORKER_NAME_EVENT: WorkerNameEvent = WorkerNameEvent {}; //! //! // uncomment it to start the worker //! // fn main() { //! // mcai_worker_sdk::start_worker(&WORKER_NAME_EVENT); //! // } //! ``` //! //! ## Runtime configuration //! //! ### AMQP connection //! //! | Variable | Description | //! |-----------------|-------------| //! | `AMQP_HOSTNAME` | IP or host of AMQP server (default: `localhost`) | //! | `AMQP_PORT` | AMQP server port (default: `5672`) | //! | `AMQP_TLS` | enable secure connection using AMQPS (default: `false`, enable with `true` or `1` or `TRUE` or `True`) | //! | `AMQP_USERNAME` | Username used to connect to AMQP server (default: `guest`) | //! | `AMQP_PASSWORD` | Password used to connect to AMQP server (default: `guest`) | //! | `AMQP_VHOST` | AMQP virtual host (default: `/`) | //! | `AMQP_QUEUE` | AMQP queue name used to receive job orders (default: `job_undefined`) | //! //! ### Vault connection //! //! | Variable | Description | //! |--------------------|-------------| //! | `BACKEND_HOSTNAME` | URL used to connect to backend server (default: `http://127.0.0.1:4000/api`) | //! | `BACKEND_USERNAME` | Username used to connect to backend server | //! | `BACKEND_PASSWORD` | Password used to connect to backend server | //! //! ## Start worker locally //! //! MCAI Worker SDK can be launched locally - without RabbitMQ. //! It can process some message for different purpose (functional tests, message order examples, etc.). //! //! To start worker in this mode, setup the environment variable `SOURCE_ORDERS` with path(s) to json orders. //! It can take multiple orders, joined with `:` on unix platform, `;` on windows os. //! //! ### Examples: //! //! ```bash //! RUST_LOG=info SOURCE_ORDERS=./examples/success_order.json:./examples/error_order.json cargo run --example worker //! ``` #[macro_use] extern crate log; #[macro_use] extern crate serde_derive; #[macro_use] extern crate serde_json; #[cfg(feature = "media")] #[macro_use] extern crate yaserde_derive; mod channels; mod config; mod error; pub mod job; pub mod message; pub mod parameter; pub mod worker; /// Re-export from lapin Channel pub use lapin::Channel; pub use log::{debug, error, info, trace, warn}; pub use schemars::JsonSchema; /// Re-export from semver: pub use semver::Version; pub use error::{MessageError, Result}; #[cfg(feature = "media")] pub use message::media::{ audio::AudioFormat, ebu_ttml_live::{ Body, Div, EbuTtmlLive, Frames, Head, Paragraph, Span, Styling, TimeExpression, TimeUnit, Title, }, filters::{AudioFilter, GenericFilter, VideoFilter}, video::{RegionOfInterest, Scaling, VideoFormat}, StreamDescriptor, }; pub use message::publish_job_progression; pub use parameter::container::ParametersContainer; pub use parameter::{Parameter, ParameterValue, Requirement}; #[cfg(feature = "media")] pub use stainless_ffmpeg::{format_context::FormatContext, frame::Frame}; use crate::worker::docker; use chrono::prelude::*; use config::*; use env_logger::Builder; use futures_executor::LocalPool; use futures_util::{future::FutureExt, stream::StreamExt, task::LocalSpawnExt}; use job::JobResult; use lapin::{options::*, types::FieldTable, Connection, ConnectionProperties}; use serde::de::DeserializeOwned; #[cfg(feature = "media")] use serde::Serialize; use std::str::FromStr; #[cfg(feature = "media")] use std::sync::{mpsc::Sender, Mutex}; use std::{cell::RefCell, fs, io::Write, rc::Rc, sync::Arc, thread, time}; #[cfg(feature = "media")] use yaserde::YaSerialize; /// Exposed Channel type pub type McaiChannel = Arc<Channel>; #[cfg(feature = "media")] #[derive(Debug)] pub struct ProcessResult { end_of_process: bool, json_content: Option<String>, xml_content: Option<String>, } #[cfg(feature = "media")] impl ProcessResult { pub fn empty() -> Self { ProcessResult { end_of_process: false, json_content: None, xml_content: None, } } pub fn end_of_process() -> Self { ProcessResult { end_of_process: true, json_content: None, xml_content: None, } } pub fn new_json<S: Serialize>(content: S) -> Self { let content = serde_json::to_string(&content).unwrap(); ProcessResult { end_of_process: false, json_content: Some(content), xml_content: None, } } pub fn new_xml<Y: YaSerialize>(content: Y) -> Self { let content = yaserde::ser::to_string(&content).unwrap(); ProcessResult { end_of_process: false, json_content: None, xml_content: Some(content), } } } #[cfg(feature = "media")] pub enum ProcessFrame { AudioVideo(Frame), EbuTtmlLive(Box<EbuTtmlLive>), Data(Vec<u8>), } #[cfg(feature = "media")] impl ProcessFrame { pub fn get_pts(&self) -> i64 { match self { ProcessFrame::AudioVideo(frame) => frame.get_pts(), ProcessFrame::EbuTtmlLive(_) | ProcessFrame::Data(_) => { // improvement: support pts to terminate 0 } } } } /// # Trait to describe a worker /// Implement this trait to implement a worker pub trait MessageEvent<P: DeserializeOwned + JsonSchema> { fn get_name(&self) -> String; fn get_short_description(&self) -> String; fn get_description(&self) -> String; fn get_version(&self) -> semver::Version; fn init(&mut self) -> Result<()> { Ok(()) } #[cfg(feature = "media")] fn init_process( &mut self, _parameters: P, _format_context: Arc<Mutex<FormatContext>>, _response_sender: Arc<Mutex<Sender<ProcessResult>>>, ) -> Result<Vec<StreamDescriptor>> { Ok(vec![]) } #[cfg(feature = "media")] fn
( &mut self, _job_result: JobResult, _stream_index: usize, _frame: ProcessFrame, ) -> Result<ProcessResult> { Err(MessageError::NotImplemented()) } #[cfg(feature = "media")] fn ending_process(&mut self) -> Result<()> { Ok(()) } /// Not called when the "media" feature is enabled fn process( &self, _channel: Option<McaiChannel>, _parameters: P, _job_result: JobResult, ) -> Result<JobResult> where Self: std::marker::Sized, { Err(MessageError::NotImplemented()) } } /// Function to start a worker pub fn start_worker<P: DeserializeOwned + JsonSchema, ME: MessageEvent<P>>(mut message_event: ME) where ME: std::marker::Sync, { let mut builder = Builder::from_default_env(); let amqp_queue = get_amqp_queue(); let instance_id = docker::get_instance_id("/proc/self/cgroup"); let container_id = instance_id.clone(); builder .format(move |stream, record| { writeln!( stream, "{} - {} - {} - {} - {} - {}", Utc::now(), &container_id, get_amqp_queue(), record.target().parse::<i64>().unwrap_or(-1), record.level(), record.args(), ) }) .init(); let worker_configuration = worker::WorkerConfiguration::new(&amqp_queue, &message_event, &instance_id); if let Err(configuration_error) = worker_configuration { error!("{:?}", configuration_error); return; } let worker_configuration = worker_configuration.unwrap(); info!( "Worker: {}, version: {} (MCAI Worker SDK {})", worker_configuration.get_worker_name(), worker_configuration.get_worker_version(), worker_configuration.get_sdk_version(), ); if let Ok(enabled) = std::env::var("DESCRIBE") { if enabled == "1" || bool::from_str(&enabled.to_lowercase()).unwrap_or(false) { match serde_json::to_string_pretty(&worker_configuration) { Ok(serialized_configuration) => { println!("{}", serialized_configuration); return; } Err(error) => error!("Could not serialize worker configuration: {:?}", error), } } } if let Err(message) = message_event.init() { error!("{:?}", message); return; } let message_event_ref = Rc::new(RefCell::new(message_event)); info!("Worker initialized, ready to receive jobs"); if let Some(source_orders) = get_source_orders() { warn!("Worker will process source orders"); for source_order in &source_orders { info!("Start to process order: {:?}", source_order); let count = None; let channel = None; let message_data = fs::read_to_string(source_order).unwrap(); let result = message::parse_and_process_message( message_event_ref.clone(), &message_data, count, channel, message::publish_job_progression, ); match result { Ok(mut job_result) => { job_result.update_execution_duration(); info!(target: &job_result.get_job_id().to_string(), "Process succeeded: {:?}", job_result) } Err(message) => { error!("{:?}", message); } } } return; } loop { let amqp_uri = get_amqp_uri(); let mut executor = LocalPool::new(); let spawner = executor.spawner(); executor.run_until(async { let conn = Connection::connect_uri( amqp_uri, ConnectionProperties::default().with_default_executor(8), ) .wait() .unwrap(); info!("Connected"); let channel = Arc::new(channels::declare_consumer_channel( &conn, &worker_configuration, )); let consumer = channel .clone() .basic_consume( &amqp_queue, "amqp_worker", BasicConsumeOptions::default(), FieldTable::default(), ) .await .unwrap(); let status_consumer = channel .clone() .basic_consume( &worker_configuration.get_direct_messaging_queue_name(), "status_amqp_worker", BasicConsumeOptions::default(), FieldTable::default(), ) .await .unwrap(); let status_response_channel = channel.clone(); let status_worker_configuration = worker_configuration.clone(); let _consumer = spawner.spawn_local(async move { status_consumer .for_each(move |delivery| { let (_channel, delivery) = delivery.expect("error caught in in consumer"); worker::system_information::send_real_time_information( delivery, &status_response_channel, &status_worker_configuration, ) .map(|_| ()) }) .await }); info!("Start to consume on queue {:?}", amqp_queue); let clone_channel = channel.clone(); let message_event = message_event_ref.clone(); consumer .for_each(move |delivery| { let (_channel, delivery) = delivery.expect("error caught in in consumer"); message::process_message(message_event.clone(), delivery, clone_channel.clone()) .map(|_| ()) }) .await }); let sleep_duration = time::Duration::new(1, 0); thread::sleep(sleep_duration); info!("Reconnection..."); } } #[test] fn empty_message_event_impl() { #[derive(Debug)] struct CustomEvent {} #[derive(JsonSchema, Deserialize)] struct CustomParameters {} impl MessageEvent<CustomParameters> for CustomEvent { fn get_name(&self) -> String { "custom".to_string() } fn get_short_description(&self) -> String { "short description".to_string() } fn get_description(&self) -> String { "long description".to_string() } fn get_version(&self) -> semver::Version { semver::Version::new(1, 2, 3) } } let custom_event = CustomEvent {}; let parameters = CustomParameters {}; let job = job::Job { job_id: 1234, parameters: vec![], }; let job_result = job::JobResult::new(job.job_id); let result = custom_event.process(None, parameters, job_result); assert!(result == Err(MessageError::NotImplemented())); }
process_frame
identifier_name
lib.rs
//! # MCAI Worker SDK //! //! This library is an SDK to communicate via message broker with [StepFlow](https://hexdocs.pm/step_flow/readme.html). //! It's used for every worker as an abstraction. //! It manage itself requirements, message parsing, direct messaging. //! //! ## Worker implementation //! //! 1. Create a Rust project //! 2. Add MCAI Worker SDK as a dependency in Cargo.toml: `mcai_worker_sdk = "^1.0"` //! 1. Update the main file with the example provided here to implement [MessageEvent](trait.MessageEvent.html) trait, //! and call the [`start_worker`](fn.start_worker.html) to start the worker itself. //! //! ```rust //! use mcai_worker_sdk::{ //! MessageEvent, //! Version, //! worker::Parameter, //! }; //! use serde_derive::Deserialize; //! use schemars::JsonSchema; //! //! #[derive(Debug)] //! struct WorkerNameEvent {} //! //! #[derive(Debug, Deserialize, JsonSchema)] //! struct WorkerParameters {} //! //! impl MessageEvent<WorkerParameters> for WorkerNameEvent { //! fn get_name(&self) -> String {"sample_worker".to_string()} //! fn get_short_description(&self) -> String {"Short description".to_string()} //! fn get_description(&self) -> String {"Long description".to_string()} //! fn get_version(&self) -> Version { Version::new(0, 0, 1) } //! } //! static WORKER_NAME_EVENT: WorkerNameEvent = WorkerNameEvent {}; //! //! // uncomment it to start the worker //! // fn main() { //! // mcai_worker_sdk::start_worker(&WORKER_NAME_EVENT); //! // } //! ``` //! //! ## Runtime configuration //! //! ### AMQP connection //! //! | Variable | Description | //! |-----------------|-------------| //! | `AMQP_HOSTNAME` | IP or host of AMQP server (default: `localhost`) | //! | `AMQP_PORT` | AMQP server port (default: `5672`) | //! | `AMQP_TLS` | enable secure connection using AMQPS (default: `false`, enable with `true` or `1` or `TRUE` or `True`) | //! | `AMQP_USERNAME` | Username used to connect to AMQP server (default: `guest`) | //! | `AMQP_PASSWORD` | Password used to connect to AMQP server (default: `guest`) | //! | `AMQP_VHOST` | AMQP virtual host (default: `/`) | //! | `AMQP_QUEUE` | AMQP queue name used to receive job orders (default: `job_undefined`) | //! //! ### Vault connection //! //! | Variable | Description | //! |--------------------|-------------| //! | `BACKEND_HOSTNAME` | URL used to connect to backend server (default: `http://127.0.0.1:4000/api`) | //! | `BACKEND_USERNAME` | Username used to connect to backend server | //! | `BACKEND_PASSWORD` | Password used to connect to backend server | //! //! ## Start worker locally //! //! MCAI Worker SDK can be launched locally - without RabbitMQ. //! It can process some message for different purpose (functional tests, message order examples, etc.). //! //! To start worker in this mode, setup the environment variable `SOURCE_ORDERS` with path(s) to json orders. //! It can take multiple orders, joined with `:` on unix platform, `;` on windows os. //! //! ### Examples: //! //! ```bash //! RUST_LOG=info SOURCE_ORDERS=./examples/success_order.json:./examples/error_order.json cargo run --example worker //! ``` #[macro_use] extern crate log; #[macro_use] extern crate serde_derive; #[macro_use] extern crate serde_json; #[cfg(feature = "media")] #[macro_use] extern crate yaserde_derive; mod channels; mod config; mod error; pub mod job; pub mod message; pub mod parameter; pub mod worker; /// Re-export from lapin Channel pub use lapin::Channel; pub use log::{debug, error, info, trace, warn}; pub use schemars::JsonSchema; /// Re-export from semver: pub use semver::Version; pub use error::{MessageError, Result}; #[cfg(feature = "media")] pub use message::media::{ audio::AudioFormat, ebu_ttml_live::{ Body, Div, EbuTtmlLive, Frames, Head, Paragraph, Span, Styling, TimeExpression, TimeUnit, Title, }, filters::{AudioFilter, GenericFilter, VideoFilter}, video::{RegionOfInterest, Scaling, VideoFormat}, StreamDescriptor, }; pub use message::publish_job_progression; pub use parameter::container::ParametersContainer; pub use parameter::{Parameter, ParameterValue, Requirement}; #[cfg(feature = "media")] pub use stainless_ffmpeg::{format_context::FormatContext, frame::Frame}; use crate::worker::docker; use chrono::prelude::*; use config::*; use env_logger::Builder; use futures_executor::LocalPool; use futures_util::{future::FutureExt, stream::StreamExt, task::LocalSpawnExt}; use job::JobResult; use lapin::{options::*, types::FieldTable, Connection, ConnectionProperties}; use serde::de::DeserializeOwned; #[cfg(feature = "media")] use serde::Serialize; use std::str::FromStr; #[cfg(feature = "media")] use std::sync::{mpsc::Sender, Mutex}; use std::{cell::RefCell, fs, io::Write, rc::Rc, sync::Arc, thread, time}; #[cfg(feature = "media")] use yaserde::YaSerialize; /// Exposed Channel type pub type McaiChannel = Arc<Channel>; #[cfg(feature = "media")] #[derive(Debug)] pub struct ProcessResult { end_of_process: bool, json_content: Option<String>, xml_content: Option<String>, } #[cfg(feature = "media")] impl ProcessResult { pub fn empty() -> Self { ProcessResult { end_of_process: false, json_content: None, xml_content: None, } } pub fn end_of_process() -> Self { ProcessResult { end_of_process: true, json_content: None, xml_content: None, } } pub fn new_json<S: Serialize>(content: S) -> Self { let content = serde_json::to_string(&content).unwrap(); ProcessResult { end_of_process: false, json_content: Some(content), xml_content: None, } } pub fn new_xml<Y: YaSerialize>(content: Y) -> Self { let content = yaserde::ser::to_string(&content).unwrap(); ProcessResult { end_of_process: false, json_content: None, xml_content: Some(content), } } } #[cfg(feature = "media")] pub enum ProcessFrame { AudioVideo(Frame), EbuTtmlLive(Box<EbuTtmlLive>), Data(Vec<u8>), } #[cfg(feature = "media")] impl ProcessFrame { pub fn get_pts(&self) -> i64 { match self { ProcessFrame::AudioVideo(frame) => frame.get_pts(), ProcessFrame::EbuTtmlLive(_) | ProcessFrame::Data(_) => { // improvement: support pts to terminate 0 } } } } /// # Trait to describe a worker /// Implement this trait to implement a worker pub trait MessageEvent<P: DeserializeOwned + JsonSchema> { fn get_name(&self) -> String; fn get_short_description(&self) -> String; fn get_description(&self) -> String; fn get_version(&self) -> semver::Version; fn init(&mut self) -> Result<()> { Ok(()) } #[cfg(feature = "media")] fn init_process( &mut self, _parameters: P, _format_context: Arc<Mutex<FormatContext>>, _response_sender: Arc<Mutex<Sender<ProcessResult>>>, ) -> Result<Vec<StreamDescriptor>> { Ok(vec![]) } #[cfg(feature = "media")] fn process_frame( &mut self, _job_result: JobResult, _stream_index: usize, _frame: ProcessFrame, ) -> Result<ProcessResult> { Err(MessageError::NotImplemented()) } #[cfg(feature = "media")] fn ending_process(&mut self) -> Result<()> { Ok(()) } /// Not called when the "media" feature is enabled fn process( &self, _channel: Option<McaiChannel>, _parameters: P, _job_result: JobResult, ) -> Result<JobResult> where Self: std::marker::Sized, { Err(MessageError::NotImplemented()) } } /// Function to start a worker pub fn start_worker<P: DeserializeOwned + JsonSchema, ME: MessageEvent<P>>(mut message_event: ME) where ME: std::marker::Sync, { let mut builder = Builder::from_default_env(); let amqp_queue = get_amqp_queue(); let instance_id = docker::get_instance_id("/proc/self/cgroup"); let container_id = instance_id.clone(); builder .format(move |stream, record| { writeln!( stream, "{} - {} - {} - {} - {} - {}", Utc::now(), &container_id, get_amqp_queue(), record.target().parse::<i64>().unwrap_or(-1), record.level(), record.args(), ) }) .init(); let worker_configuration = worker::WorkerConfiguration::new(&amqp_queue, &message_event, &instance_id); if let Err(configuration_error) = worker_configuration { error!("{:?}", configuration_error); return; } let worker_configuration = worker_configuration.unwrap(); info!( "Worker: {}, version: {} (MCAI Worker SDK {})", worker_configuration.get_worker_name(), worker_configuration.get_worker_version(), worker_configuration.get_sdk_version(), ); if let Ok(enabled) = std::env::var("DESCRIBE") { if enabled == "1" || bool::from_str(&enabled.to_lowercase()).unwrap_or(false) { match serde_json::to_string_pretty(&worker_configuration) { Ok(serialized_configuration) => { println!("{}", serialized_configuration); return; } Err(error) => error!("Could not serialize worker configuration: {:?}", error), } } } if let Err(message) = message_event.init() { error!("{:?}", message); return; } let message_event_ref = Rc::new(RefCell::new(message_event)); info!("Worker initialized, ready to receive jobs"); if let Some(source_orders) = get_source_orders() { warn!("Worker will process source orders"); for source_order in &source_orders { info!("Start to process order: {:?}", source_order); let count = None; let channel = None; let message_data = fs::read_to_string(source_order).unwrap(); let result = message::parse_and_process_message( message_event_ref.clone(), &message_data, count, channel, message::publish_job_progression, ); match result { Ok(mut job_result) => { job_result.update_execution_duration(); info!(target: &job_result.get_job_id().to_string(), "Process succeeded: {:?}", job_result) } Err(message) => { error!("{:?}", message); } } } return; } loop { let amqp_uri = get_amqp_uri(); let mut executor = LocalPool::new(); let spawner = executor.spawner(); executor.run_until(async { let conn = Connection::connect_uri( amqp_uri, ConnectionProperties::default().with_default_executor(8), ) .wait() .unwrap(); info!("Connected"); let channel = Arc::new(channels::declare_consumer_channel( &conn, &worker_configuration, )); let consumer = channel .clone() .basic_consume( &amqp_queue, "amqp_worker", BasicConsumeOptions::default(), FieldTable::default(), ) .await .unwrap(); let status_consumer = channel .clone() .basic_consume( &worker_configuration.get_direct_messaging_queue_name(), "status_amqp_worker", BasicConsumeOptions::default(), FieldTable::default(), ) .await .unwrap(); let status_response_channel = channel.clone(); let status_worker_configuration = worker_configuration.clone(); let _consumer = spawner.spawn_local(async move { status_consumer .for_each(move |delivery| { let (_channel, delivery) = delivery.expect("error caught in in consumer"); worker::system_information::send_real_time_information( delivery, &status_response_channel, &status_worker_configuration, ) .map(|_| ()) }) .await }); info!("Start to consume on queue {:?}", amqp_queue); let clone_channel = channel.clone(); let message_event = message_event_ref.clone(); consumer .for_each(move |delivery| { let (_channel, delivery) = delivery.expect("error caught in in consumer"); message::process_message(message_event.clone(), delivery, clone_channel.clone()) .map(|_| ()) }) .await }); let sleep_duration = time::Duration::new(1, 0); thread::sleep(sleep_duration); info!("Reconnection..."); } } #[test] fn empty_message_event_impl() { #[derive(Debug)] struct CustomEvent {} #[derive(JsonSchema, Deserialize)] struct CustomParameters {} impl MessageEvent<CustomParameters> for CustomEvent { fn get_name(&self) -> String { "custom".to_string() } fn get_short_description(&self) -> String { "short description".to_string() } fn get_description(&self) -> String
fn get_version(&self) -> semver::Version { semver::Version::new(1, 2, 3) } } let custom_event = CustomEvent {}; let parameters = CustomParameters {}; let job = job::Job { job_id: 1234, parameters: vec![], }; let job_result = job::JobResult::new(job.job_id); let result = custom_event.process(None, parameters, job_result); assert!(result == Err(MessageError::NotImplemented())); }
{ "long description".to_string() }
identifier_body
esp.py
# Copyright (c) 2011-2014 by California Institute of Technology # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the California Institute of Technology nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH # OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. r"""Equality Set Projection (ESP). Non-vertex polytope projection method from - https://web.archive.org/web/20150103142532/ https://www-control.eng.cam.ac.uk/~cnj22/research/projection.html - https://infoscience.epfl.ch/record/169768 Very unstable, can not handle complex polytopes. Reference ========= \cite{Jones04} """ # Created by P. Nilsson, 8/2/11 import pickle import numpy as np from scipy import io as sio from scipy import linalg from polytope import solvers class Ridge(object): """A ridge. Attributes: - `E_r`: Equality set of a facet - `ar, br`: Affine hull of the facet s.t. P_{E_0} = P intersection {x | ar x = br}. """ def __init__(self, E, a, b): self.E_r = E self.ar = a self.br = b class Ridge_Facet(object): """A ridge facet. Attributes: - `E_r`: Equality set of a ridge - `ar,br`: Affine hull of the ridge s.t. P_{E_f} intersection {x | ar x = br} defines the ridge, where E_f is the equality set of the facet. - `E_0`: Equality set of a facet - `af,bf`: Affine hull of the facet. """ def __init__(self, E_r, ar, br, E_0, af, bf): self.E_r = E_r self.ar = ar self.br = br self.E_0 = E_0 self.af = af self.bf = bf def esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0): """Project polytope [C D] x <= b onto C coordinates. Projects the polytope [C D] x <= b onto the coordinates that correspond to C. The projection of the polytope P = {[C D]x <= b} where C is M x D and D is M x K is defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b} """ if 'glpk' not in solvers.installed_solvers: raise Exception( "projection_esp error:" " Equality set projection requires `cvxopt.glpk` to run.") # Remove zero columns and rows nonzerorows = np.nonzero( np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0] nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0] nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0] C = CC[nonzerorows, :].copy() D = DD[nonzerorows, :].copy() C = C[:, nonzeroxcols] D = D[:, nonzeroycols] b = bb[nonzerorows].copy() # Make sure origo is inside polytope if not centered: xc0, yc0, trans = cheby_center(C, D, b) if trans: b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten() else: b = b else: trans = False d = C.shape[1] k = D.shape[1] if verbose > 0: print("Projecting from dim " + str(d + k) + " to " + str(d)) if k == 0: # Not projecting return C, bb, [] if d == 1: # Projection to 1D c = np.zeros(d + k) c[0] = 1 G = np.hstack([C, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') if sol['status'] != "optimal": raise Exception( "esp: projection to 1D is not full-dimensional, " "LP returned status " + str(sol['status'])) min_sol = np.array(sol['x']).flatten() min_dual_sol = np.array(sol['z']).flatten() sol = solvers.lpsolve(-c, G, b, solver='glpk') if sol['status'] != "optimal": raise Exception( "esp: projection to 1D is not full-dimensional, " + "LP returned status " + str(sol['status'])) max_sol = np.array(sol['x']).flatten() max_dual_sol = np.array(sol['z']).flatten() # min, max x_min = min_sol[0] x_max = max_sol[0] y_min = min_sol[range(1, k + 1)] y_max = max_sol[range(1, k + 1)] if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol): # Min case, relax constraint a little to avoid infeasibility E_min = unique_equalityset( C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol) else: E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0] if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol): # Max case, relax constraint a little to avoid infeasibility E_max = unique_equalityset( C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol) else: E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0] G = np.array([[1.], [-1.]]) g = np.array([x_max, -x_min]) # Relocate if trans: g = g + np.dot(G, xc0) # Return zero cols/rows E_max = nonzerorows[E_max] E_min = nonzerorows[E_min] if verbose > 0: print( "Returning projection from dim " + str(d + k) + " to dim 1 \n") return G, g, [E_max, E_min] E = [] L = [] E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol) ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose) for i in range(len(ridge_list)): r = ridge_list[i] L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf)) G = af.T g = bf if verbose > 0: print("\nStarting eq set " + str(E_0) + "\nStarting ridges ") for rr in L: print(str(rr.E_r)) E.append(E_0) while len(L) > 0: rid_fac1 = L[0] if verbose > 0: print("\nLooking for neighbors to " + str(rid_fac1.E_0) + " and " + str(rid_fac1.E_r) + " ..") E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol) if verbose > 0: print("found neighbor " + str(E_adj) + ". \n\nLooking for ridges of neighbor..") ridge_list = ridge( C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol, verbose=verbose) if verbose > 0: print("found " + str(len(ridge_list)) + " ridges\n") found_org = False for i in range(len(ridge_list)): r = ridge_list[i] E_r = r.E_r ar = r.ar br = r.br found = False for j in range(len(L)): rid_fac2 = L[j] A_r = rid_fac2.E_r if len(A_r) != len(E_r): continue t1 = np.sort(np.array(A_r)) t2 = np.sort(np.array(E_r)) if np.sum(np.abs(t1 - t2)) < abs_tol: found = True break if found: if verbose > 0: print("Ridge " + str(E_r) + " already visited, removing from L..") if rid_fac2 == rid_fac1: found_org = True L.remove(rid_fac2) else: if verbose > 0: print("Adding ridge-facet " + str(E_adj) + " " + str(E_r) + "") L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj)) if not found_org: print("Expected ridge " + str(rid_fac1.E_r)) print("but got ridges ") for rid in ridge_list: print(rid.E_r) raise Exception( "esp: ridge did not return neighboring ridge as expected") G = np.vstack([G, a_adj]) g = np.hstack([g, b_adj]) E.append(E_adj) # Restore center if trans: g = g + np.dot(G, xc0) # Return zero rows for Ef in E: Ef = nonzerorows[Ef] return G, g, E def shoot(C, D, b, maxiter=1000, abs_tol=1e-7): """Return random equality set of P that projects on a projection facet. Returns randomly selected equality set E_0 of P such that the projection of the equality set is a facet of the projection. @param C: Matrix defining the polytope Cx+Dy <= b @param D: Matrix defining the polytope Cx+Dy <= b @param b: Vector defining the polytope Cx+Dy <= b @return: `E_0,af,bf`: Equality set and affine hull """ d = C.shape[1] k = D.shape[1] iter = 0 while True: if iter > maxiter: raise Exception( "shoot: could not find starting equality set") gamma = np.random.rand(d) - 0.5 c = np.zeros(k + 1) c[0] = -1 G = np.hstack([np.array([np.dot(C, gamma)]).T, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') opt_sol = np.array(sol['x']).flatten() opt_dual = np.array(sol['z']).flatten() r_opt = opt_sol[0] y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten() x_opt = r_opt * gamma E_0 = np.nonzero( np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0] DE0 = D[E_0, :] CE0 = C[E_0, :] b0 = b[E_0] if rank(np.dot(null_space(DE0.T).T, CE0)) == 1: break iter += 1 af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol) if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol=abs_tol): E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol) af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0]) if len(bf) > 1: raise Exception("shoot: wrong dimension of affine hull") return E_0, af.flatten(), bf def ridge(C, D, b, E, af, bf, abs_tol=1e-7, verbose=0): """Compute all ridges of a facet in the projection. Input: `C,D,b`: Original polytope data `E,af,bf`: Equality set and affine hull of a facet in the projection Output: `ridge_list`: A list containing all the ridges of the facet as Ridge objects """ d = C.shape[1] k = D.shape[1] Er_list = [] q = C.shape[0] E_c = np.setdiff1d(range(q), E) # E slices C_E = C[E, :] D_E = D[E, :] b_E = b[E, :] # E_c slices C_Ec = C[E_c, :] D_Ec = D[E_c, :] b_Ec = b[E_c] # dots S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E) L = np.dot(D_Ec, null_space(D_E)) t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E)) if rank(np.hstack([C_E, D_E])) < k + 1: if verbose > 1: print("Doing recursive ESP call") u, s, v = linalg.svd(np.array([af]), full_matrices=1) sigma = s[0] v = v.T * u[0, 0] # Correct sign V_hat = v[:, [0]] V_tilde = v[:, range(1, v.shape[1])] Cnew = np.dot(S, V_tilde) Dnew = L bnew = t - np.dot(S, V_hat).flatten() * bf / sigma Anew = np.hstack([Cnew, Dnew]) xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew) bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten() Gt, gt, E_t = esp( Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol, verbose=0) if (len(E_t[0]) == 0) or (len(E_t[1]) == 0): raise Exception( "ridge: recursive call did not return any equality sets") for i in range(len(E_t)): E_f = E_t[i] er = np.sort(np.hstack([E, E_c[E_f]])) ar = np.dot(Gt[i, :], V_tilde.T).flatten() br0 = gt[i].flatten() # Make orthogonal to facet ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br0 - bf * np.dot(af.flatten(), ar.flatten()) # Normalize and make ridge equation point outwards norm = np.sqrt(np.sum(ar * ar)) ar = ar * np.sign(br) / norm br = br * np.sign(br) / norm # Restore center br = br + np.dot(Gt[i, :], xc2) / norm if len(ar) > d: raise Exception("ridge: wrong length of new ridge!") Er_list.append(Ridge(er, ar, br)) else: if verbose > 0: print("Doing direct calculation of ridges") X = np.arange(S.shape[0]) while len(X) > 0: i = X[0] X = np.setdiff1d(X, i) if np.linalg.norm(S[i, :]) < abs_tol:
Si = S[i, :] Si = Si / np.linalg.norm(Si) if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol: test1 = null_space( np.vstack([ np.hstack([af, bf]), np.hstack([S[i, :], t[i]])]), nonempty=True) test2 = np.hstack([S, np.array([t]).T]) test = np.dot(test1.T, test2.T) test = np.sum(np.abs(test), 0) Q_i = np.nonzero(test > abs_tol)[0] Q = np.nonzero(test < abs_tol)[0] X = np.setdiff1d(X, Q) # Have Q_i Sq = S[Q_i, :] tq = t[Q_i] c = np.zeros(d + 1) c[0] = 1 Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq]) Gdo = np.hstack([-1, np.zeros(Sq.shape[1])]) G = np.vstack([Gup, Gdo]) h = np.hstack([tq, 1]) Al = np.zeros([2, 1]) Ar = np.vstack([af, S[i, :]]) A = np.hstack([Al, Ar]) bb = np.hstack([bf, t[i]]) sol = solvers._solve_lp_using_cvxopt( c, G, h, A=A, b=bb) if sol['status'] == 'optimal': tau = sol['x'][0] if tau < -abs_tol: ar = np.array([S[i, :]]).flatten() br = t[i].flatten() # Make orthogonal to facet ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br - bf * np.dot(af.flatten(), ar.flatten()) # Normalize and make ridge equation point outwards norm = np.sqrt(np.sum(ar * ar)) ar = ar / norm br = br / norm # accumulate Er_list.append( Ridge(np.sort(np.hstack([E, E_c[Q]])), ar, br)) return Er_list def adjacent(C, D, b, rid_fac, abs_tol=1e-7): """Compute the (unique) adjacent facet. @param rid_fac: A Ridge_Facet object containing the parameters for a facet and one of its ridges. @return: (E_adj,a_adj,b_adj): The equality set and parameters for the adjacent facet such that:: P_{E_adj} = P intersection {x | a_adj x = b_adj} """ E = rid_fac.E_0 af = rid_fac.af bf = rid_fac.bf # E_r = rid_fac.E_r ar = rid_fac.ar br = rid_fac.br # shape d = C.shape[1] k = D.shape[1] # E_r slices C_er = C[E_r, :] D_er = D[E_r, :] b_er = b[E_r] # stack c = -np.hstack([ar, np.zeros(k)]) G = np.hstack([C_er, D_er]) h = b_er A = np.hstack([af, np.zeros(k)]) sol = solvers._solve_lp_using_cvxopt( c, G, h, A=A.T, b=bf * (1 - 0.01)) if sol['status'] != "optimal": print(G) print(h) print(af) print(bf) print(ar) print(br) print(np.dot(af, ar)) data = {} data["C"] = C data["D"] = D data["b"] = b sio.savemat("matlabdata", data) with open('polytope.p', 'wb') as f: pickle.dump(data, f) raise Exception( "adjacent: Lp returned status " + str(sol['status'])) opt_sol = np.array(sol['x']).flatten() dual_opt_sol = np.array(sol['z']).flatten() x_opt = opt_sol[range(d)] y_opt = opt_sol[range(d, d + k)] if is_dual_degenerate( c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol, dual_opt_sol, abs_tol=abs_tol): # If degenerate, compute affine hull and take preimage E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0] a_temp, b_temp = proj_aff( C_er[E_temp, :], D_er[E_temp, :], b_er[E_temp], expected_dim=1, abs_tol=abs_tol) E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol) if len(E_adj) == 0: data = {} data["C"] = C data["D"] = D data["b"] = b data["Er"] = E_r + 1 data["ar"] = ar data["br"] = br data["Ef"] = E + 1 data["af"] = af data["bf"] = bf sio.savemat("matlabdata", data) raise Exception( "adjacent: equality set computation returned empty set") else: r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol E_adj = np.nonzero(r)[0] C_eadj = C[E_adj, :] D_eadj = D[E_adj, :] b_eadj = b[E_adj] af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol) return E_adj, af_adj, bf_adj def proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-7): """Affine projection. Compute the set aff = {x | Ce x + De y = be} on the form aff = ({x | a x = b} intersection {Ce x + De y < be}). Input: Polytope parameters Ce, De and be Output: Constants a and b """ # Remove zero columns ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0] D = De[:, ind] if D.shape[1] == 0: a = Ce b = be a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception( "proj_aff: wrong dimension calculated in 1") return a_n.flatten(), b_n sh = np.shape(D.T) m = sh[0] n = sh[1] nDe = null_space(D.T) a = np.dot(nDe.T, Ce) b = np.dot(nDe.T, be) a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception("proj_aff: wrong dimension calculated in 2") return a_n, b_n def is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-7): """Return `True` if pair of dual problems is dual degenerate. Checks if the pair of dual problems:: (P): min c'x (D): max h'z + b'y s.t Gx <= h s.t G'z + A'y = c Ax = b z <= 0 is dual degenerate, i.e. if (P) has several optimal solutions. Optimal solutions x* and z* are required. Input: `G,h,A,b`: Parameters of (P) `x_opt`: One optimal solution to (P) `z_opt`: The optimal solution to (D) corresponding to _inequality constraints_ in (P) Output: `dual`: Boolean indicating whether (P) has many optimal solutions. """ D = - G d = - h.flatten() mu = - z_opt.flatten() # mu >= 0 # Active constraints I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0] # Positive elements in dual opt J = np.nonzero(mu > abs_tol)[0] # i, j i = mu < abs_tol # Zero elements in dual opt i = i.astype(int) j = np.zeros(len(mu), dtype=int) j[I] = 1 # 1 if active # Indices where active constraints have 0 dual opt L = np.nonzero(i + j == 2)[0] # sizes nI = len(I) nJ = len(J) nL = len(L) # constraints DI = D[I, :] # Active constraints DJ = D[J, :] # Constraints with positive lagrange mult DL = D[L, :] # Active constraints with zero dual opt dual = 0 if A is None: test = DI else: test = np.vstack([DI, A]) if rank(test) < np.amin(DI.shape): return True else: if len(L) > 0: if A is None: Ae = DJ else: Ae = np.vstack([DJ, A]) be = np.zeros(Ae.shape[0]) Ai = - DL bi = np.zeros(nL) sol = solvers._solve_lp_using_cvxopt( c= - np.sum(DL, axis=0), G=Ai, h=bi, A=Ae, b=be) if sol['status'] == "dual infeasible": # Dual infeasible -> primal unbounded -> value>epsilon return True if sol['primal objective'] > abs_tol: return True return False def unique_equalityset(C, D, b, af, bf, abs_tol=1e-7, verbose=0): """Return equality set E with the following property: P_E = {x | af x = bf} intersection P where P is the polytope C x + D y < b The inequalities have to be satisfied with equality everywhere on the face defined by af and bf. """ if D is not None: A = np.hstack([C, D]) a = np.hstack([af, np.zeros(D.shape[1])]) else: A = C a = af E = [] for i in range(A.shape[0]): A_i = np.array(A[i, :]) b_i = b[i] sol = solvers._solve_lp_using_cvxopt( c=A_i, G=A, h=b, A=a.T, b=bf) if sol['status'] != "optimal": raise Exception( "unique_equalityset: LP returned status " + str(sol['status'])) if np.abs(sol['primal objective'] - b_i) < abs_tol: # Constraint is active everywhere E.append(i) if len(E) == 0: raise Exception("unique_equalityset: empty E") return np.array(E) def unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-7): A = np.hstack([C, D]) E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0] af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1) # stack ineq = np.hstack([af, np.zeros(D.shape[1])]) G = np.vstack([A, np.vstack([ineq, -ineq])]) h = np.hstack([b, np.hstack([bf, -bf])]) # shape m = G.shape[0] n = G.shape[1] # ht e = 1e-3 v = np.vstack([np.zeros([1, n]), np.eye(n)]).T v = v - np.array([np.mean(v, axis=1)]).T v = v * e ht = h + np.amin(-np.dot(G, v), axis=1) # stack H1 = np.hstack([G, -np.eye(m)]) H2 = np.hstack([G, np.zeros([m, m])]) H3 = np.hstack([np.zeros([m, n]), -np.eye(m)]) H = np.vstack([H1, np.vstack([H2, H3])]) h = np.hstack([ht, np.hstack([h, np.zeros(m)])]) c = np.hstack([np.zeros(n), np.ones(m)]) sol = solvers.lpsolve(c, H, h, solver='glpk') if not sol['status'] == "optimal": raise Exception( "unique_equalityset: LP returned status " + str(sol['status'])) opt_sol2 = np.array(sol['x']).flatten() x = opt_sol2[range(n)] s = opt_sol2[range(n, len(opt_sol2))] E = np.nonzero(s > abs_tol)[0] print(E) E = np.sort(E[np.nonzero(E < C.shape[0])]) # Check that they define the same projection at, bt = proj_aff(C[E, :], D[E, :], b[E]) if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol: raise Exception("unique_equalityset2: affine hulls not the same") return E def cheby_center(C, D, b): """Calculate Chebyshev center for the polytope `C x + D y <= b`. Input: `C, D, b`: Polytope parameters Output: `x_0, y_0`: The chebyshev centra `boolean`: True if a point could be found, False otherwise. """ d = C.shape[1] k = D.shape[1] A = np.hstack([C, D]) dim = np.shape(A)[1] c = - np.r_[np.zeros(dim), 1] norm2 = np.sqrt(np.sum(A * A, axis=1)) G = np.c_[A, norm2] sol = solvers.lpsolve(c, G, h=b, solver='glpk') if sol['status'] == "optimal": opt = np.array(sol['x'][0:-1]).flatten() return opt[range(d)], opt[range(d, d + k)], True else: return np.zeros(d), np.zeros(k), False def normalize(AA, bb, abs_tol=1e-7): """Normalize `A x = b` such that `A'A = 1` and `b > 0`. Also, remove duplicate lines. """ if AA.size == 0: return AA, bb dim = AA.size / bb.size A = AA.copy().reshape(bb.size, dim) b = bb.copy().reshape(bb.size, 1) # Remove zero lines keepind = np.nonzero( np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0] A = A[keepind, :] b = b[keepind] # Normalize anorm = np.sqrt(np.sum(A * A, axis=1)) for i in range(len(anorm)): A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i] b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i] # Remove duplicate rows keep_row = [] for i in range(len(anorm)): unique = True for j in range(i + 1, len(anorm)): test = (np.sum(np.abs(A[i, :] - A[j, :])) + np.abs(b[i, 0] - b[j, 0])) if test < abs_tol: unique = False break if unique: keep_row.append(i) A_n = A[keep_row, :] b_n = b[keep_row, 0] # Return flat A if only one row if A_n.size == dim: A_n = A_n.flatten() return A_n, b_n.flatten() def rank(A, eps=1e-15): u, s, vh = linalg.svd(A) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps return np.sum(s > tol) def null_space(A, eps=1e-15, nonempty=False): """Returns the null space N_A to matrix A such that A N_A = 0.""" u, s, v = linalg.svd(A, full_matrices=1) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps rank = np.sum(s > tol) N_space = v[range(rank, n), :].T if nonempty and (len(N_space) == 0): N_space = v[range(np.amax(n - 1, 1), n), :] return N_space
continue
conditional_block
esp.py
# Copyright (c) 2011-2014 by California Institute of Technology # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the California Institute of Technology nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH # OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. r"""Equality Set Projection (ESP). Non-vertex polytope projection method from - https://web.archive.org/web/20150103142532/ https://www-control.eng.cam.ac.uk/~cnj22/research/projection.html - https://infoscience.epfl.ch/record/169768 Very unstable, can not handle complex polytopes. Reference ========= \cite{Jones04} """ # Created by P. Nilsson, 8/2/11 import pickle import numpy as np from scipy import io as sio from scipy import linalg from polytope import solvers class Ridge(object): """A ridge. Attributes: - `E_r`: Equality set of a facet - `ar, br`: Affine hull of the facet s.t. P_{E_0} = P intersection {x | ar x = br}. """ def __init__(self, E, a, b):
class Ridge_Facet(object): """A ridge facet. Attributes: - `E_r`: Equality set of a ridge - `ar,br`: Affine hull of the ridge s.t. P_{E_f} intersection {x | ar x = br} defines the ridge, where E_f is the equality set of the facet. - `E_0`: Equality set of a facet - `af,bf`: Affine hull of the facet. """ def __init__(self, E_r, ar, br, E_0, af, bf): self.E_r = E_r self.ar = ar self.br = br self.E_0 = E_0 self.af = af self.bf = bf def esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0): """Project polytope [C D] x <= b onto C coordinates. Projects the polytope [C D] x <= b onto the coordinates that correspond to C. The projection of the polytope P = {[C D]x <= b} where C is M x D and D is M x K is defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b} """ if 'glpk' not in solvers.installed_solvers: raise Exception( "projection_esp error:" " Equality set projection requires `cvxopt.glpk` to run.") # Remove zero columns and rows nonzerorows = np.nonzero( np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0] nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0] nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0] C = CC[nonzerorows, :].copy() D = DD[nonzerorows, :].copy() C = C[:, nonzeroxcols] D = D[:, nonzeroycols] b = bb[nonzerorows].copy() # Make sure origo is inside polytope if not centered: xc0, yc0, trans = cheby_center(C, D, b) if trans: b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten() else: b = b else: trans = False d = C.shape[1] k = D.shape[1] if verbose > 0: print("Projecting from dim " + str(d + k) + " to " + str(d)) if k == 0: # Not projecting return C, bb, [] if d == 1: # Projection to 1D c = np.zeros(d + k) c[0] = 1 G = np.hstack([C, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') if sol['status'] != "optimal": raise Exception( "esp: projection to 1D is not full-dimensional, " "LP returned status " + str(sol['status'])) min_sol = np.array(sol['x']).flatten() min_dual_sol = np.array(sol['z']).flatten() sol = solvers.lpsolve(-c, G, b, solver='glpk') if sol['status'] != "optimal": raise Exception( "esp: projection to 1D is not full-dimensional, " + "LP returned status " + str(sol['status'])) max_sol = np.array(sol['x']).flatten() max_dual_sol = np.array(sol['z']).flatten() # min, max x_min = min_sol[0] x_max = max_sol[0] y_min = min_sol[range(1, k + 1)] y_max = max_sol[range(1, k + 1)] if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol): # Min case, relax constraint a little to avoid infeasibility E_min = unique_equalityset( C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol) else: E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0] if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol): # Max case, relax constraint a little to avoid infeasibility E_max = unique_equalityset( C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol) else: E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0] G = np.array([[1.], [-1.]]) g = np.array([x_max, -x_min]) # Relocate if trans: g = g + np.dot(G, xc0) # Return zero cols/rows E_max = nonzerorows[E_max] E_min = nonzerorows[E_min] if verbose > 0: print( "Returning projection from dim " + str(d + k) + " to dim 1 \n") return G, g, [E_max, E_min] E = [] L = [] E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol) ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose) for i in range(len(ridge_list)): r = ridge_list[i] L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf)) G = af.T g = bf if verbose > 0: print("\nStarting eq set " + str(E_0) + "\nStarting ridges ") for rr in L: print(str(rr.E_r)) E.append(E_0) while len(L) > 0: rid_fac1 = L[0] if verbose > 0: print("\nLooking for neighbors to " + str(rid_fac1.E_0) + " and " + str(rid_fac1.E_r) + " ..") E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol) if verbose > 0: print("found neighbor " + str(E_adj) + ". \n\nLooking for ridges of neighbor..") ridge_list = ridge( C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol, verbose=verbose) if verbose > 0: print("found " + str(len(ridge_list)) + " ridges\n") found_org = False for i in range(len(ridge_list)): r = ridge_list[i] E_r = r.E_r ar = r.ar br = r.br found = False for j in range(len(L)): rid_fac2 = L[j] A_r = rid_fac2.E_r if len(A_r) != len(E_r): continue t1 = np.sort(np.array(A_r)) t2 = np.sort(np.array(E_r)) if np.sum(np.abs(t1 - t2)) < abs_tol: found = True break if found: if verbose > 0: print("Ridge " + str(E_r) + " already visited, removing from L..") if rid_fac2 == rid_fac1: found_org = True L.remove(rid_fac2) else: if verbose > 0: print("Adding ridge-facet " + str(E_adj) + " " + str(E_r) + "") L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj)) if not found_org: print("Expected ridge " + str(rid_fac1.E_r)) print("but got ridges ") for rid in ridge_list: print(rid.E_r) raise Exception( "esp: ridge did not return neighboring ridge as expected") G = np.vstack([G, a_adj]) g = np.hstack([g, b_adj]) E.append(E_adj) # Restore center if trans: g = g + np.dot(G, xc0) # Return zero rows for Ef in E: Ef = nonzerorows[Ef] return G, g, E def shoot(C, D, b, maxiter=1000, abs_tol=1e-7): """Return random equality set of P that projects on a projection facet. Returns randomly selected equality set E_0 of P such that the projection of the equality set is a facet of the projection. @param C: Matrix defining the polytope Cx+Dy <= b @param D: Matrix defining the polytope Cx+Dy <= b @param b: Vector defining the polytope Cx+Dy <= b @return: `E_0,af,bf`: Equality set and affine hull """ d = C.shape[1] k = D.shape[1] iter = 0 while True: if iter > maxiter: raise Exception( "shoot: could not find starting equality set") gamma = np.random.rand(d) - 0.5 c = np.zeros(k + 1) c[0] = -1 G = np.hstack([np.array([np.dot(C, gamma)]).T, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') opt_sol = np.array(sol['x']).flatten() opt_dual = np.array(sol['z']).flatten() r_opt = opt_sol[0] y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten() x_opt = r_opt * gamma E_0 = np.nonzero( np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0] DE0 = D[E_0, :] CE0 = C[E_0, :] b0 = b[E_0] if rank(np.dot(null_space(DE0.T).T, CE0)) == 1: break iter += 1 af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol) if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol=abs_tol): E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol) af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0]) if len(bf) > 1: raise Exception("shoot: wrong dimension of affine hull") return E_0, af.flatten(), bf def ridge(C, D, b, E, af, bf, abs_tol=1e-7, verbose=0): """Compute all ridges of a facet in the projection. Input: `C,D,b`: Original polytope data `E,af,bf`: Equality set and affine hull of a facet in the projection Output: `ridge_list`: A list containing all the ridges of the facet as Ridge objects """ d = C.shape[1] k = D.shape[1] Er_list = [] q = C.shape[0] E_c = np.setdiff1d(range(q), E) # E slices C_E = C[E, :] D_E = D[E, :] b_E = b[E, :] # E_c slices C_Ec = C[E_c, :] D_Ec = D[E_c, :] b_Ec = b[E_c] # dots S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E) L = np.dot(D_Ec, null_space(D_E)) t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E)) if rank(np.hstack([C_E, D_E])) < k + 1: if verbose > 1: print("Doing recursive ESP call") u, s, v = linalg.svd(np.array([af]), full_matrices=1) sigma = s[0] v = v.T * u[0, 0] # Correct sign V_hat = v[:, [0]] V_tilde = v[:, range(1, v.shape[1])] Cnew = np.dot(S, V_tilde) Dnew = L bnew = t - np.dot(S, V_hat).flatten() * bf / sigma Anew = np.hstack([Cnew, Dnew]) xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew) bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten() Gt, gt, E_t = esp( Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol, verbose=0) if (len(E_t[0]) == 0) or (len(E_t[1]) == 0): raise Exception( "ridge: recursive call did not return any equality sets") for i in range(len(E_t)): E_f = E_t[i] er = np.sort(np.hstack([E, E_c[E_f]])) ar = np.dot(Gt[i, :], V_tilde.T).flatten() br0 = gt[i].flatten() # Make orthogonal to facet ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br0 - bf * np.dot(af.flatten(), ar.flatten()) # Normalize and make ridge equation point outwards norm = np.sqrt(np.sum(ar * ar)) ar = ar * np.sign(br) / norm br = br * np.sign(br) / norm # Restore center br = br + np.dot(Gt[i, :], xc2) / norm if len(ar) > d: raise Exception("ridge: wrong length of new ridge!") Er_list.append(Ridge(er, ar, br)) else: if verbose > 0: print("Doing direct calculation of ridges") X = np.arange(S.shape[0]) while len(X) > 0: i = X[0] X = np.setdiff1d(X, i) if np.linalg.norm(S[i, :]) < abs_tol: continue Si = S[i, :] Si = Si / np.linalg.norm(Si) if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol: test1 = null_space( np.vstack([ np.hstack([af, bf]), np.hstack([S[i, :], t[i]])]), nonempty=True) test2 = np.hstack([S, np.array([t]).T]) test = np.dot(test1.T, test2.T) test = np.sum(np.abs(test), 0) Q_i = np.nonzero(test > abs_tol)[0] Q = np.nonzero(test < abs_tol)[0] X = np.setdiff1d(X, Q) # Have Q_i Sq = S[Q_i, :] tq = t[Q_i] c = np.zeros(d + 1) c[0] = 1 Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq]) Gdo = np.hstack([-1, np.zeros(Sq.shape[1])]) G = np.vstack([Gup, Gdo]) h = np.hstack([tq, 1]) Al = np.zeros([2, 1]) Ar = np.vstack([af, S[i, :]]) A = np.hstack([Al, Ar]) bb = np.hstack([bf, t[i]]) sol = solvers._solve_lp_using_cvxopt( c, G, h, A=A, b=bb) if sol['status'] == 'optimal': tau = sol['x'][0] if tau < -abs_tol: ar = np.array([S[i, :]]).flatten() br = t[i].flatten() # Make orthogonal to facet ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br - bf * np.dot(af.flatten(), ar.flatten()) # Normalize and make ridge equation point outwards norm = np.sqrt(np.sum(ar * ar)) ar = ar / norm br = br / norm # accumulate Er_list.append( Ridge(np.sort(np.hstack([E, E_c[Q]])), ar, br)) return Er_list def adjacent(C, D, b, rid_fac, abs_tol=1e-7): """Compute the (unique) adjacent facet. @param rid_fac: A Ridge_Facet object containing the parameters for a facet and one of its ridges. @return: (E_adj,a_adj,b_adj): The equality set and parameters for the adjacent facet such that:: P_{E_adj} = P intersection {x | a_adj x = b_adj} """ E = rid_fac.E_0 af = rid_fac.af bf = rid_fac.bf # E_r = rid_fac.E_r ar = rid_fac.ar br = rid_fac.br # shape d = C.shape[1] k = D.shape[1] # E_r slices C_er = C[E_r, :] D_er = D[E_r, :] b_er = b[E_r] # stack c = -np.hstack([ar, np.zeros(k)]) G = np.hstack([C_er, D_er]) h = b_er A = np.hstack([af, np.zeros(k)]) sol = solvers._solve_lp_using_cvxopt( c, G, h, A=A.T, b=bf * (1 - 0.01)) if sol['status'] != "optimal": print(G) print(h) print(af) print(bf) print(ar) print(br) print(np.dot(af, ar)) data = {} data["C"] = C data["D"] = D data["b"] = b sio.savemat("matlabdata", data) with open('polytope.p', 'wb') as f: pickle.dump(data, f) raise Exception( "adjacent: Lp returned status " + str(sol['status'])) opt_sol = np.array(sol['x']).flatten() dual_opt_sol = np.array(sol['z']).flatten() x_opt = opt_sol[range(d)] y_opt = opt_sol[range(d, d + k)] if is_dual_degenerate( c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol, dual_opt_sol, abs_tol=abs_tol): # If degenerate, compute affine hull and take preimage E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0] a_temp, b_temp = proj_aff( C_er[E_temp, :], D_er[E_temp, :], b_er[E_temp], expected_dim=1, abs_tol=abs_tol) E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol) if len(E_adj) == 0: data = {} data["C"] = C data["D"] = D data["b"] = b data["Er"] = E_r + 1 data["ar"] = ar data["br"] = br data["Ef"] = E + 1 data["af"] = af data["bf"] = bf sio.savemat("matlabdata", data) raise Exception( "adjacent: equality set computation returned empty set") else: r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol E_adj = np.nonzero(r)[0] C_eadj = C[E_adj, :] D_eadj = D[E_adj, :] b_eadj = b[E_adj] af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol) return E_adj, af_adj, bf_adj def proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-7): """Affine projection. Compute the set aff = {x | Ce x + De y = be} on the form aff = ({x | a x = b} intersection {Ce x + De y < be}). Input: Polytope parameters Ce, De and be Output: Constants a and b """ # Remove zero columns ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0] D = De[:, ind] if D.shape[1] == 0: a = Ce b = be a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception( "proj_aff: wrong dimension calculated in 1") return a_n.flatten(), b_n sh = np.shape(D.T) m = sh[0] n = sh[1] nDe = null_space(D.T) a = np.dot(nDe.T, Ce) b = np.dot(nDe.T, be) a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception("proj_aff: wrong dimension calculated in 2") return a_n, b_n def is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-7): """Return `True` if pair of dual problems is dual degenerate. Checks if the pair of dual problems:: (P): min c'x (D): max h'z + b'y s.t Gx <= h s.t G'z + A'y = c Ax = b z <= 0 is dual degenerate, i.e. if (P) has several optimal solutions. Optimal solutions x* and z* are required. Input: `G,h,A,b`: Parameters of (P) `x_opt`: One optimal solution to (P) `z_opt`: The optimal solution to (D) corresponding to _inequality constraints_ in (P) Output: `dual`: Boolean indicating whether (P) has many optimal solutions. """ D = - G d = - h.flatten() mu = - z_opt.flatten() # mu >= 0 # Active constraints I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0] # Positive elements in dual opt J = np.nonzero(mu > abs_tol)[0] # i, j i = mu < abs_tol # Zero elements in dual opt i = i.astype(int) j = np.zeros(len(mu), dtype=int) j[I] = 1 # 1 if active # Indices where active constraints have 0 dual opt L = np.nonzero(i + j == 2)[0] # sizes nI = len(I) nJ = len(J) nL = len(L) # constraints DI = D[I, :] # Active constraints DJ = D[J, :] # Constraints with positive lagrange mult DL = D[L, :] # Active constraints with zero dual opt dual = 0 if A is None: test = DI else: test = np.vstack([DI, A]) if rank(test) < np.amin(DI.shape): return True else: if len(L) > 0: if A is None: Ae = DJ else: Ae = np.vstack([DJ, A]) be = np.zeros(Ae.shape[0]) Ai = - DL bi = np.zeros(nL) sol = solvers._solve_lp_using_cvxopt( c= - np.sum(DL, axis=0), G=Ai, h=bi, A=Ae, b=be) if sol['status'] == "dual infeasible": # Dual infeasible -> primal unbounded -> value>epsilon return True if sol['primal objective'] > abs_tol: return True return False def unique_equalityset(C, D, b, af, bf, abs_tol=1e-7, verbose=0): """Return equality set E with the following property: P_E = {x | af x = bf} intersection P where P is the polytope C x + D y < b The inequalities have to be satisfied with equality everywhere on the face defined by af and bf. """ if D is not None: A = np.hstack([C, D]) a = np.hstack([af, np.zeros(D.shape[1])]) else: A = C a = af E = [] for i in range(A.shape[0]): A_i = np.array(A[i, :]) b_i = b[i] sol = solvers._solve_lp_using_cvxopt( c=A_i, G=A, h=b, A=a.T, b=bf) if sol['status'] != "optimal": raise Exception( "unique_equalityset: LP returned status " + str(sol['status'])) if np.abs(sol['primal objective'] - b_i) < abs_tol: # Constraint is active everywhere E.append(i) if len(E) == 0: raise Exception("unique_equalityset: empty E") return np.array(E) def unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-7): A = np.hstack([C, D]) E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0] af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1) # stack ineq = np.hstack([af, np.zeros(D.shape[1])]) G = np.vstack([A, np.vstack([ineq, -ineq])]) h = np.hstack([b, np.hstack([bf, -bf])]) # shape m = G.shape[0] n = G.shape[1] # ht e = 1e-3 v = np.vstack([np.zeros([1, n]), np.eye(n)]).T v = v - np.array([np.mean(v, axis=1)]).T v = v * e ht = h + np.amin(-np.dot(G, v), axis=1) # stack H1 = np.hstack([G, -np.eye(m)]) H2 = np.hstack([G, np.zeros([m, m])]) H3 = np.hstack([np.zeros([m, n]), -np.eye(m)]) H = np.vstack([H1, np.vstack([H2, H3])]) h = np.hstack([ht, np.hstack([h, np.zeros(m)])]) c = np.hstack([np.zeros(n), np.ones(m)]) sol = solvers.lpsolve(c, H, h, solver='glpk') if not sol['status'] == "optimal": raise Exception( "unique_equalityset: LP returned status " + str(sol['status'])) opt_sol2 = np.array(sol['x']).flatten() x = opt_sol2[range(n)] s = opt_sol2[range(n, len(opt_sol2))] E = np.nonzero(s > abs_tol)[0] print(E) E = np.sort(E[np.nonzero(E < C.shape[0])]) # Check that they define the same projection at, bt = proj_aff(C[E, :], D[E, :], b[E]) if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol: raise Exception("unique_equalityset2: affine hulls not the same") return E def cheby_center(C, D, b): """Calculate Chebyshev center for the polytope `C x + D y <= b`. Input: `C, D, b`: Polytope parameters Output: `x_0, y_0`: The chebyshev centra `boolean`: True if a point could be found, False otherwise. """ d = C.shape[1] k = D.shape[1] A = np.hstack([C, D]) dim = np.shape(A)[1] c = - np.r_[np.zeros(dim), 1] norm2 = np.sqrt(np.sum(A * A, axis=1)) G = np.c_[A, norm2] sol = solvers.lpsolve(c, G, h=b, solver='glpk') if sol['status'] == "optimal": opt = np.array(sol['x'][0:-1]).flatten() return opt[range(d)], opt[range(d, d + k)], True else: return np.zeros(d), np.zeros(k), False def normalize(AA, bb, abs_tol=1e-7): """Normalize `A x = b` such that `A'A = 1` and `b > 0`. Also, remove duplicate lines. """ if AA.size == 0: return AA, bb dim = AA.size / bb.size A = AA.copy().reshape(bb.size, dim) b = bb.copy().reshape(bb.size, 1) # Remove zero lines keepind = np.nonzero( np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0] A = A[keepind, :] b = b[keepind] # Normalize anorm = np.sqrt(np.sum(A * A, axis=1)) for i in range(len(anorm)): A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i] b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i] # Remove duplicate rows keep_row = [] for i in range(len(anorm)): unique = True for j in range(i + 1, len(anorm)): test = (np.sum(np.abs(A[i, :] - A[j, :])) + np.abs(b[i, 0] - b[j, 0])) if test < abs_tol: unique = False break if unique: keep_row.append(i) A_n = A[keep_row, :] b_n = b[keep_row, 0] # Return flat A if only one row if A_n.size == dim: A_n = A_n.flatten() return A_n, b_n.flatten() def rank(A, eps=1e-15): u, s, vh = linalg.svd(A) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps return np.sum(s > tol) def null_space(A, eps=1e-15, nonempty=False): """Returns the null space N_A to matrix A such that A N_A = 0.""" u, s, v = linalg.svd(A, full_matrices=1) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps rank = np.sum(s > tol) N_space = v[range(rank, n), :].T if nonempty and (len(N_space) == 0): N_space = v[range(np.amax(n - 1, 1), n), :] return N_space
self.E_r = E self.ar = a self.br = b
identifier_body
esp.py
# Copyright (c) 2011-2014 by California Institute of Technology # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the California Institute of Technology nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH # OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. r"""Equality Set Projection (ESP). Non-vertex polytope projection method from - https://web.archive.org/web/20150103142532/ https://www-control.eng.cam.ac.uk/~cnj22/research/projection.html - https://infoscience.epfl.ch/record/169768 Very unstable, can not handle complex polytopes. Reference ========= \cite{Jones04} """ # Created by P. Nilsson, 8/2/11 import pickle import numpy as np from scipy import io as sio from scipy import linalg from polytope import solvers class Ridge(object): """A ridge. Attributes: - `E_r`: Equality set of a facet - `ar, br`: Affine hull of the facet s.t. P_{E_0} = P intersection {x | ar x = br}. """ def __init__(self, E, a, b): self.E_r = E self.ar = a self.br = b class Ridge_Facet(object): """A ridge facet. Attributes: - `E_r`: Equality set of a ridge - `ar,br`: Affine hull of the ridge s.t. P_{E_f} intersection {x | ar x = br} defines the ridge, where E_f is the equality set of the facet. - `E_0`: Equality set of a facet - `af,bf`: Affine hull of the facet. """ def __init__(self, E_r, ar, br, E_0, af, bf): self.E_r = E_r self.ar = ar self.br = br self.E_0 = E_0 self.af = af self.bf = bf def esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0): """Project polytope [C D] x <= b onto C coordinates. Projects the polytope [C D] x <= b onto the coordinates that correspond to C. The projection of the polytope P = {[C D]x <= b} where C is M x D and D is M x K is defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b} """ if 'glpk' not in solvers.installed_solvers: raise Exception( "projection_esp error:" " Equality set projection requires `cvxopt.glpk` to run.") # Remove zero columns and rows nonzerorows = np.nonzero( np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0] nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0] nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0] C = CC[nonzerorows, :].copy() D = DD[nonzerorows, :].copy() C = C[:, nonzeroxcols] D = D[:, nonzeroycols] b = bb[nonzerorows].copy() # Make sure origo is inside polytope if not centered: xc0, yc0, trans = cheby_center(C, D, b) if trans: b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten() else: b = b else: trans = False d = C.shape[1] k = D.shape[1] if verbose > 0: print("Projecting from dim " + str(d + k) + " to " + str(d)) if k == 0: # Not projecting return C, bb, [] if d == 1: # Projection to 1D c = np.zeros(d + k) c[0] = 1 G = np.hstack([C, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') if sol['status'] != "optimal": raise Exception( "esp: projection to 1D is not full-dimensional, " "LP returned status " + str(sol['status'])) min_sol = np.array(sol['x']).flatten() min_dual_sol = np.array(sol['z']).flatten() sol = solvers.lpsolve(-c, G, b, solver='glpk') if sol['status'] != "optimal": raise Exception( "esp: projection to 1D is not full-dimensional, " + "LP returned status " + str(sol['status'])) max_sol = np.array(sol['x']).flatten() max_dual_sol = np.array(sol['z']).flatten() # min, max x_min = min_sol[0] x_max = max_sol[0] y_min = min_sol[range(1, k + 1)] y_max = max_sol[range(1, k + 1)] if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol): # Min case, relax constraint a little to avoid infeasibility E_min = unique_equalityset( C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol) else: E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0] if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol): # Max case, relax constraint a little to avoid infeasibility E_max = unique_equalityset( C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol) else: E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0] G = np.array([[1.], [-1.]]) g = np.array([x_max, -x_min]) # Relocate if trans: g = g + np.dot(G, xc0) # Return zero cols/rows E_max = nonzerorows[E_max] E_min = nonzerorows[E_min] if verbose > 0: print( "Returning projection from dim " + str(d + k) + " to dim 1 \n") return G, g, [E_max, E_min] E = [] L = [] E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol) ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose) for i in range(len(ridge_list)): r = ridge_list[i] L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf)) G = af.T g = bf if verbose > 0: print("\nStarting eq set " + str(E_0) + "\nStarting ridges ") for rr in L: print(str(rr.E_r)) E.append(E_0) while len(L) > 0: rid_fac1 = L[0] if verbose > 0: print("\nLooking for neighbors to " + str(rid_fac1.E_0) + " and " + str(rid_fac1.E_r) + " ..") E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol) if verbose > 0: print("found neighbor " + str(E_adj) + ". \n\nLooking for ridges of neighbor..") ridge_list = ridge( C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol, verbose=verbose) if verbose > 0: print("found " + str(len(ridge_list)) + " ridges\n") found_org = False for i in range(len(ridge_list)): r = ridge_list[i] E_r = r.E_r ar = r.ar br = r.br found = False for j in range(len(L)): rid_fac2 = L[j] A_r = rid_fac2.E_r if len(A_r) != len(E_r): continue t1 = np.sort(np.array(A_r)) t2 = np.sort(np.array(E_r)) if np.sum(np.abs(t1 - t2)) < abs_tol: found = True break if found: if verbose > 0: print("Ridge " + str(E_r) + " already visited, removing from L..") if rid_fac2 == rid_fac1: found_org = True L.remove(rid_fac2) else: if verbose > 0: print("Adding ridge-facet " + str(E_adj) + " " + str(E_r) + "") L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj)) if not found_org: print("Expected ridge " + str(rid_fac1.E_r)) print("but got ridges ") for rid in ridge_list: print(rid.E_r) raise Exception( "esp: ridge did not return neighboring ridge as expected") G = np.vstack([G, a_adj]) g = np.hstack([g, b_adj]) E.append(E_adj) # Restore center if trans: g = g + np.dot(G, xc0) # Return zero rows for Ef in E: Ef = nonzerorows[Ef] return G, g, E def shoot(C, D, b, maxiter=1000, abs_tol=1e-7): """Return random equality set of P that projects on a projection facet. Returns randomly selected equality set E_0 of P such that the projection of the equality set is a facet of the projection. @param C: Matrix defining the polytope Cx+Dy <= b @param D: Matrix defining the polytope Cx+Dy <= b @param b: Vector defining the polytope Cx+Dy <= b @return: `E_0,af,bf`: Equality set and affine hull """ d = C.shape[1] k = D.shape[1] iter = 0 while True: if iter > maxiter: raise Exception( "shoot: could not find starting equality set") gamma = np.random.rand(d) - 0.5 c = np.zeros(k + 1) c[0] = -1 G = np.hstack([np.array([np.dot(C, gamma)]).T, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') opt_sol = np.array(sol['x']).flatten() opt_dual = np.array(sol['z']).flatten() r_opt = opt_sol[0] y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten() x_opt = r_opt * gamma E_0 = np.nonzero( np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0] DE0 = D[E_0, :] CE0 = C[E_0, :] b0 = b[E_0] if rank(np.dot(null_space(DE0.T).T, CE0)) == 1: break iter += 1 af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol) if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol=abs_tol): E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol) af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0]) if len(bf) > 1: raise Exception("shoot: wrong dimension of affine hull") return E_0, af.flatten(), bf def ridge(C, D, b, E, af, bf, abs_tol=1e-7, verbose=0): """Compute all ridges of a facet in the projection. Input: `C,D,b`: Original polytope data `E,af,bf`: Equality set and affine hull of a facet in the projection Output: `ridge_list`: A list containing all the ridges of the facet as Ridge objects """ d = C.shape[1] k = D.shape[1] Er_list = [] q = C.shape[0] E_c = np.setdiff1d(range(q), E) # E slices C_E = C[E, :] D_E = D[E, :] b_E = b[E, :] # E_c slices C_Ec = C[E_c, :] D_Ec = D[E_c, :] b_Ec = b[E_c] # dots S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E) L = np.dot(D_Ec, null_space(D_E)) t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E)) if rank(np.hstack([C_E, D_E])) < k + 1: if verbose > 1: print("Doing recursive ESP call") u, s, v = linalg.svd(np.array([af]), full_matrices=1) sigma = s[0] v = v.T * u[0, 0] # Correct sign V_hat = v[:, [0]] V_tilde = v[:, range(1, v.shape[1])] Cnew = np.dot(S, V_tilde) Dnew = L bnew = t - np.dot(S, V_hat).flatten() * bf / sigma Anew = np.hstack([Cnew, Dnew]) xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew) bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten() Gt, gt, E_t = esp( Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol, verbose=0) if (len(E_t[0]) == 0) or (len(E_t[1]) == 0): raise Exception( "ridge: recursive call did not return any equality sets") for i in range(len(E_t)): E_f = E_t[i] er = np.sort(np.hstack([E, E_c[E_f]])) ar = np.dot(Gt[i, :], V_tilde.T).flatten() br0 = gt[i].flatten() # Make orthogonal to facet ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br0 - bf * np.dot(af.flatten(), ar.flatten()) # Normalize and make ridge equation point outwards norm = np.sqrt(np.sum(ar * ar)) ar = ar * np.sign(br) / norm br = br * np.sign(br) / norm # Restore center br = br + np.dot(Gt[i, :], xc2) / norm if len(ar) > d: raise Exception("ridge: wrong length of new ridge!") Er_list.append(Ridge(er, ar, br)) else: if verbose > 0: print("Doing direct calculation of ridges") X = np.arange(S.shape[0]) while len(X) > 0: i = X[0] X = np.setdiff1d(X, i) if np.linalg.norm(S[i, :]) < abs_tol: continue Si = S[i, :] Si = Si / np.linalg.norm(Si) if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol: test1 = null_space( np.vstack([ np.hstack([af, bf]), np.hstack([S[i, :], t[i]])]), nonempty=True) test2 = np.hstack([S, np.array([t]).T]) test = np.dot(test1.T, test2.T) test = np.sum(np.abs(test), 0) Q_i = np.nonzero(test > abs_tol)[0] Q = np.nonzero(test < abs_tol)[0] X = np.setdiff1d(X, Q) # Have Q_i Sq = S[Q_i, :] tq = t[Q_i] c = np.zeros(d + 1) c[0] = 1 Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq]) Gdo = np.hstack([-1, np.zeros(Sq.shape[1])]) G = np.vstack([Gup, Gdo]) h = np.hstack([tq, 1]) Al = np.zeros([2, 1]) Ar = np.vstack([af, S[i, :]]) A = np.hstack([Al, Ar]) bb = np.hstack([bf, t[i]]) sol = solvers._solve_lp_using_cvxopt( c, G, h, A=A, b=bb) if sol['status'] == 'optimal': tau = sol['x'][0] if tau < -abs_tol: ar = np.array([S[i, :]]).flatten() br = t[i].flatten() # Make orthogonal to facet ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br - bf * np.dot(af.flatten(), ar.flatten()) # Normalize and make ridge equation point outwards norm = np.sqrt(np.sum(ar * ar)) ar = ar / norm br = br / norm # accumulate Er_list.append( Ridge(np.sort(np.hstack([E, E_c[Q]])), ar, br)) return Er_list def adjacent(C, D, b, rid_fac, abs_tol=1e-7): """Compute the (unique) adjacent facet. @param rid_fac: A Ridge_Facet object containing the parameters for a facet and one of its ridges. @return: (E_adj,a_adj,b_adj): The equality set and parameters for the adjacent facet such that:: P_{E_adj} = P intersection {x | a_adj x = b_adj} """ E = rid_fac.E_0 af = rid_fac.af bf = rid_fac.bf # E_r = rid_fac.E_r ar = rid_fac.ar br = rid_fac.br # shape d = C.shape[1] k = D.shape[1] # E_r slices C_er = C[E_r, :] D_er = D[E_r, :] b_er = b[E_r] # stack c = -np.hstack([ar, np.zeros(k)]) G = np.hstack([C_er, D_er]) h = b_er A = np.hstack([af, np.zeros(k)]) sol = solvers._solve_lp_using_cvxopt( c, G, h, A=A.T, b=bf * (1 - 0.01)) if sol['status'] != "optimal": print(G) print(h) print(af) print(bf) print(ar) print(br) print(np.dot(af, ar)) data = {} data["C"] = C data["D"] = D data["b"] = b sio.savemat("matlabdata", data) with open('polytope.p', 'wb') as f: pickle.dump(data, f) raise Exception( "adjacent: Lp returned status " + str(sol['status'])) opt_sol = np.array(sol['x']).flatten() dual_opt_sol = np.array(sol['z']).flatten() x_opt = opt_sol[range(d)] y_opt = opt_sol[range(d, d + k)] if is_dual_degenerate( c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol, dual_opt_sol, abs_tol=abs_tol): # If degenerate, compute affine hull and take preimage E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0] a_temp, b_temp = proj_aff( C_er[E_temp, :], D_er[E_temp, :], b_er[E_temp], expected_dim=1, abs_tol=abs_tol) E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol) if len(E_adj) == 0: data = {} data["C"] = C data["D"] = D data["b"] = b data["Er"] = E_r + 1 data["ar"] = ar data["br"] = br data["Ef"] = E + 1 data["af"] = af data["bf"] = bf sio.savemat("matlabdata", data) raise Exception( "adjacent: equality set computation returned empty set") else: r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol E_adj = np.nonzero(r)[0] C_eadj = C[E_adj, :] D_eadj = D[E_adj, :] b_eadj = b[E_adj] af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol) return E_adj, af_adj, bf_adj def proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-7): """Affine projection. Compute the set aff = {x | Ce x + De y = be} on the form aff = ({x | a x = b} intersection {Ce x + De y < be}). Input: Polytope parameters Ce, De and be Output: Constants a and b """ # Remove zero columns ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0] D = De[:, ind] if D.shape[1] == 0: a = Ce b = be a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception( "proj_aff: wrong dimension calculated in 1") return a_n.flatten(), b_n sh = np.shape(D.T) m = sh[0] n = sh[1] nDe = null_space(D.T) a = np.dot(nDe.T, Ce) b = np.dot(nDe.T, be) a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception("proj_aff: wrong dimension calculated in 2") return a_n, b_n def is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-7): """Return `True` if pair of dual problems is dual degenerate. Checks if the pair of dual problems:: (P): min c'x (D): max h'z + b'y s.t Gx <= h s.t G'z + A'y = c Ax = b z <= 0 is dual degenerate, i.e. if (P) has several optimal solutions. Optimal solutions x* and z* are required. Input: `G,h,A,b`: Parameters of (P) `x_opt`: One optimal solution to (P) `z_opt`: The optimal solution to (D) corresponding to _inequality constraints_ in (P) Output: `dual`: Boolean indicating whether (P) has many optimal solutions. """ D = - G d = - h.flatten() mu = - z_opt.flatten() # mu >= 0 # Active constraints I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0] # Positive elements in dual opt J = np.nonzero(mu > abs_tol)[0] # i, j i = mu < abs_tol # Zero elements in dual opt i = i.astype(int) j = np.zeros(len(mu), dtype=int) j[I] = 1 # 1 if active # Indices where active constraints have 0 dual opt L = np.nonzero(i + j == 2)[0] # sizes nI = len(I) nJ = len(J) nL = len(L) # constraints DI = D[I, :] # Active constraints DJ = D[J, :] # Constraints with positive lagrange mult DL = D[L, :] # Active constraints with zero dual opt dual = 0 if A is None: test = DI else: test = np.vstack([DI, A]) if rank(test) < np.amin(DI.shape): return True else: if len(L) > 0: if A is None: Ae = DJ else: Ae = np.vstack([DJ, A]) be = np.zeros(Ae.shape[0]) Ai = - DL bi = np.zeros(nL) sol = solvers._solve_lp_using_cvxopt( c= - np.sum(DL, axis=0), G=Ai, h=bi, A=Ae, b=be) if sol['status'] == "dual infeasible": # Dual infeasible -> primal unbounded -> value>epsilon return True if sol['primal objective'] > abs_tol: return True return False def unique_equalityset(C, D, b, af, bf, abs_tol=1e-7, verbose=0): """Return equality set E with the following property: P_E = {x | af x = bf} intersection P where P is the polytope C x + D y < b The inequalities have to be satisfied with equality everywhere on the face defined by af and bf. """ if D is not None: A = np.hstack([C, D]) a = np.hstack([af, np.zeros(D.shape[1])]) else: A = C a = af E = [] for i in range(A.shape[0]): A_i = np.array(A[i, :]) b_i = b[i] sol = solvers._solve_lp_using_cvxopt( c=A_i, G=A, h=b, A=a.T, b=bf) if sol['status'] != "optimal": raise Exception( "unique_equalityset: LP returned status " + str(sol['status'])) if np.abs(sol['primal objective'] - b_i) < abs_tol: # Constraint is active everywhere E.append(i) if len(E) == 0: raise Exception("unique_equalityset: empty E") return np.array(E) def unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-7): A = np.hstack([C, D]) E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0] af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1) # stack ineq = np.hstack([af, np.zeros(D.shape[1])]) G = np.vstack([A, np.vstack([ineq, -ineq])]) h = np.hstack([b, np.hstack([bf, -bf])]) # shape m = G.shape[0] n = G.shape[1] # ht e = 1e-3 v = np.vstack([np.zeros([1, n]), np.eye(n)]).T v = v - np.array([np.mean(v, axis=1)]).T v = v * e ht = h + np.amin(-np.dot(G, v), axis=1) # stack H1 = np.hstack([G, -np.eye(m)]) H2 = np.hstack([G, np.zeros([m, m])]) H3 = np.hstack([np.zeros([m, n]), -np.eye(m)]) H = np.vstack([H1, np.vstack([H2, H3])]) h = np.hstack([ht, np.hstack([h, np.zeros(m)])]) c = np.hstack([np.zeros(n), np.ones(m)]) sol = solvers.lpsolve(c, H, h, solver='glpk') if not sol['status'] == "optimal": raise Exception( "unique_equalityset: LP returned status " + str(sol['status'])) opt_sol2 = np.array(sol['x']).flatten() x = opt_sol2[range(n)] s = opt_sol2[range(n, len(opt_sol2))] E = np.nonzero(s > abs_tol)[0] print(E) E = np.sort(E[np.nonzero(E < C.shape[0])]) # Check that they define the same projection at, bt = proj_aff(C[E, :], D[E, :], b[E]) if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol: raise Exception("unique_equalityset2: affine hulls not the same") return E def cheby_center(C, D, b): """Calculate Chebyshev center for the polytope `C x + D y <= b`. Input: `C, D, b`: Polytope parameters Output: `x_0, y_0`: The chebyshev centra `boolean`: True if a point could be found, False otherwise. """ d = C.shape[1] k = D.shape[1] A = np.hstack([C, D]) dim = np.shape(A)[1] c = - np.r_[np.zeros(dim), 1] norm2 = np.sqrt(np.sum(A * A, axis=1)) G = np.c_[A, norm2] sol = solvers.lpsolve(c, G, h=b, solver='glpk') if sol['status'] == "optimal": opt = np.array(sol['x'][0:-1]).flatten() return opt[range(d)], opt[range(d, d + k)], True else: return np.zeros(d), np.zeros(k), False def
(AA, bb, abs_tol=1e-7): """Normalize `A x = b` such that `A'A = 1` and `b > 0`. Also, remove duplicate lines. """ if AA.size == 0: return AA, bb dim = AA.size / bb.size A = AA.copy().reshape(bb.size, dim) b = bb.copy().reshape(bb.size, 1) # Remove zero lines keepind = np.nonzero( np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0] A = A[keepind, :] b = b[keepind] # Normalize anorm = np.sqrt(np.sum(A * A, axis=1)) for i in range(len(anorm)): A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i] b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i] # Remove duplicate rows keep_row = [] for i in range(len(anorm)): unique = True for j in range(i + 1, len(anorm)): test = (np.sum(np.abs(A[i, :] - A[j, :])) + np.abs(b[i, 0] - b[j, 0])) if test < abs_tol: unique = False break if unique: keep_row.append(i) A_n = A[keep_row, :] b_n = b[keep_row, 0] # Return flat A if only one row if A_n.size == dim: A_n = A_n.flatten() return A_n, b_n.flatten() def rank(A, eps=1e-15): u, s, vh = linalg.svd(A) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps return np.sum(s > tol) def null_space(A, eps=1e-15, nonempty=False): """Returns the null space N_A to matrix A such that A N_A = 0.""" u, s, v = linalg.svd(A, full_matrices=1) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps rank = np.sum(s > tol) N_space = v[range(rank, n), :].T if nonempty and (len(N_space) == 0): N_space = v[range(np.amax(n - 1, 1), n), :] return N_space
normalize
identifier_name
esp.py
# Copyright (c) 2011-2014 by California Institute of Technology # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the California Institute of Technology nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH # OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. r"""Equality Set Projection (ESP). Non-vertex polytope projection method from - https://web.archive.org/web/20150103142532/ https://www-control.eng.cam.ac.uk/~cnj22/research/projection.html - https://infoscience.epfl.ch/record/169768 Very unstable, can not handle complex polytopes. Reference ========= \cite{Jones04} """ # Created by P. Nilsson, 8/2/11 import pickle import numpy as np from scipy import io as sio from scipy import linalg from polytope import solvers class Ridge(object): """A ridge. Attributes: - `E_r`: Equality set of a facet - `ar, br`: Affine hull of the facet s.t. P_{E_0} = P intersection {x | ar x = br}. """ def __init__(self, E, a, b): self.E_r = E self.ar = a self.br = b class Ridge_Facet(object): """A ridge facet. Attributes: - `E_r`: Equality set of a ridge - `ar,br`: Affine hull of the ridge s.t. P_{E_f} intersection {x | ar x = br} defines the ridge, where E_f is the equality set of the facet. - `E_0`: Equality set of a facet - `af,bf`: Affine hull of the facet. """ def __init__(self, E_r, ar, br, E_0, af, bf): self.E_r = E_r self.ar = ar self.br = br self.E_0 = E_0 self.af = af self.bf = bf def esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0): """Project polytope [C D] x <= b onto C coordinates. Projects the polytope [C D] x <= b onto the coordinates that correspond to C. The projection of the polytope P = {[C D]x <= b} where C is M x D and D is M x K is defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b} """ if 'glpk' not in solvers.installed_solvers: raise Exception( "projection_esp error:" " Equality set projection requires `cvxopt.glpk` to run.") # Remove zero columns and rows nonzerorows = np.nonzero( np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0] nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0] nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0] C = CC[nonzerorows, :].copy() D = DD[nonzerorows, :].copy() C = C[:, nonzeroxcols] D = D[:, nonzeroycols] b = bb[nonzerorows].copy() # Make sure origo is inside polytope if not centered: xc0, yc0, trans = cheby_center(C, D, b) if trans: b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten() else: b = b else: trans = False d = C.shape[1] k = D.shape[1] if verbose > 0: print("Projecting from dim " + str(d + k) + " to " + str(d)) if k == 0: # Not projecting return C, bb, [] if d == 1: # Projection to 1D c = np.zeros(d + k) c[0] = 1 G = np.hstack([C, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') if sol['status'] != "optimal": raise Exception( "esp: projection to 1D is not full-dimensional, " "LP returned status " + str(sol['status'])) min_sol = np.array(sol['x']).flatten() min_dual_sol = np.array(sol['z']).flatten() sol = solvers.lpsolve(-c, G, b, solver='glpk') if sol['status'] != "optimal": raise Exception( "esp: projection to 1D is not full-dimensional, " + "LP returned status " + str(sol['status'])) max_sol = np.array(sol['x']).flatten() max_dual_sol = np.array(sol['z']).flatten() # min, max x_min = min_sol[0] x_max = max_sol[0] y_min = min_sol[range(1, k + 1)] y_max = max_sol[range(1, k + 1)] if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol): # Min case, relax constraint a little to avoid infeasibility E_min = unique_equalityset( C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol) else: E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0] if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol): # Max case, relax constraint a little to avoid infeasibility E_max = unique_equalityset( C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol) else: E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0] G = np.array([[1.], [-1.]]) g = np.array([x_max, -x_min]) # Relocate if trans: g = g + np.dot(G, xc0) # Return zero cols/rows E_max = nonzerorows[E_max] E_min = nonzerorows[E_min] if verbose > 0: print( "Returning projection from dim " + str(d + k) + " to dim 1 \n") return G, g, [E_max, E_min] E = [] L = [] E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol) ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose) for i in range(len(ridge_list)): r = ridge_list[i] L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf)) G = af.T g = bf if verbose > 0: print("\nStarting eq set " + str(E_0) + "\nStarting ridges ") for rr in L: print(str(rr.E_r)) E.append(E_0) while len(L) > 0: rid_fac1 = L[0] if verbose > 0: print("\nLooking for neighbors to " + str(rid_fac1.E_0) + " and " + str(rid_fac1.E_r) + " ..") E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol) if verbose > 0: print("found neighbor " + str(E_adj) + ". \n\nLooking for ridges of neighbor..") ridge_list = ridge( C, D, b, E_adj, a_adj, b_adj, abs_tol=abs_tol, verbose=verbose) if verbose > 0: print("found " + str(len(ridge_list)) + " ridges\n") found_org = False for i in range(len(ridge_list)): r = ridge_list[i] E_r = r.E_r ar = r.ar br = r.br found = False for j in range(len(L)): rid_fac2 = L[j] A_r = rid_fac2.E_r if len(A_r) != len(E_r): continue t1 = np.sort(np.array(A_r)) t2 = np.sort(np.array(E_r)) if np.sum(np.abs(t1 - t2)) < abs_tol: found = True break if found: if verbose > 0: print("Ridge " + str(E_r) + " already visited, removing from L..") if rid_fac2 == rid_fac1: found_org = True L.remove(rid_fac2) else: if verbose > 0: print("Adding ridge-facet " + str(E_adj) + " " + str(E_r) + "") L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj)) if not found_org: print("Expected ridge " + str(rid_fac1.E_r)) print("but got ridges ") for rid in ridge_list: print(rid.E_r) raise Exception( "esp: ridge did not return neighboring ridge as expected") G = np.vstack([G, a_adj]) g = np.hstack([g, b_adj]) E.append(E_adj) # Restore center if trans: g = g + np.dot(G, xc0) # Return zero rows for Ef in E: Ef = nonzerorows[Ef] return G, g, E def shoot(C, D, b, maxiter=1000, abs_tol=1e-7): """Return random equality set of P that projects on a projection facet. Returns randomly selected equality set E_0 of P such that the projection of the equality set is a facet of the projection. @param C: Matrix defining the polytope Cx+Dy <= b @param D: Matrix defining the polytope Cx+Dy <= b @param b: Vector defining the polytope Cx+Dy <= b @return: `E_0,af,bf`: Equality set and affine hull """ d = C.shape[1] k = D.shape[1] iter = 0 while True: if iter > maxiter: raise Exception( "shoot: could not find starting equality set") gamma = np.random.rand(d) - 0.5 c = np.zeros(k + 1) c[0] = -1 G = np.hstack([np.array([np.dot(C, gamma)]).T, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') opt_sol = np.array(sol['x']).flatten() opt_dual = np.array(sol['z']).flatten() r_opt = opt_sol[0] y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten() x_opt = r_opt * gamma E_0 = np.nonzero( np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0] DE0 = D[E_0, :] CE0 = C[E_0, :] b0 = b[E_0] if rank(np.dot(null_space(DE0.T).T, CE0)) == 1: break iter += 1 af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol) if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol=abs_tol): E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol) af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0]) if len(bf) > 1: raise Exception("shoot: wrong dimension of affine hull") return E_0, af.flatten(), bf def ridge(C, D, b, E, af, bf, abs_tol=1e-7, verbose=0): """Compute all ridges of a facet in the projection. Input: `C,D,b`: Original polytope data `E,af,bf`: Equality set and affine hull of a facet in the projection Output: `ridge_list`: A list containing all the ridges of the facet as Ridge objects """ d = C.shape[1] k = D.shape[1] Er_list = [] q = C.shape[0] E_c = np.setdiff1d(range(q), E) # E slices C_E = C[E, :] D_E = D[E, :] b_E = b[E, :] # E_c slices C_Ec = C[E_c, :] D_Ec = D[E_c, :] b_Ec = b[E_c] # dots S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E) L = np.dot(D_Ec, null_space(D_E)) t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E)) if rank(np.hstack([C_E, D_E])) < k + 1: if verbose > 1: print("Doing recursive ESP call") u, s, v = linalg.svd(np.array([af]), full_matrices=1) sigma = s[0] v = v.T * u[0, 0] # Correct sign V_hat = v[:, [0]] V_tilde = v[:, range(1, v.shape[1])] Cnew = np.dot(S, V_tilde) Dnew = L bnew = t - np.dot(S, V_hat).flatten() * bf / sigma Anew = np.hstack([Cnew, Dnew]) xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew) bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten() Gt, gt, E_t = esp( Cnew, Dnew, bnew, centered=True, abs_tol=abs_tol, verbose=0) if (len(E_t[0]) == 0) or (len(E_t[1]) == 0): raise Exception( "ridge: recursive call did not return any equality sets") for i in range(len(E_t)): E_f = E_t[i] er = np.sort(np.hstack([E, E_c[E_f]])) ar = np.dot(Gt[i, :], V_tilde.T).flatten() br0 = gt[i].flatten() # Make orthogonal to facet ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br0 - bf * np.dot(af.flatten(), ar.flatten()) # Normalize and make ridge equation point outwards norm = np.sqrt(np.sum(ar * ar)) ar = ar * np.sign(br) / norm br = br * np.sign(br) / norm # Restore center br = br + np.dot(Gt[i, :], xc2) / norm if len(ar) > d: raise Exception("ridge: wrong length of new ridge!") Er_list.append(Ridge(er, ar, br)) else: if verbose > 0: print("Doing direct calculation of ridges") X = np.arange(S.shape[0]) while len(X) > 0: i = X[0] X = np.setdiff1d(X, i) if np.linalg.norm(S[i, :]) < abs_tol: continue Si = S[i, :] Si = Si / np.linalg.norm(Si) if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol: test1 = null_space( np.vstack([ np.hstack([af, bf]), np.hstack([S[i, :], t[i]])]), nonempty=True) test2 = np.hstack([S, np.array([t]).T]) test = np.dot(test1.T, test2.T) test = np.sum(np.abs(test), 0) Q_i = np.nonzero(test > abs_tol)[0]
c = np.zeros(d + 1) c[0] = 1 Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq]) Gdo = np.hstack([-1, np.zeros(Sq.shape[1])]) G = np.vstack([Gup, Gdo]) h = np.hstack([tq, 1]) Al = np.zeros([2, 1]) Ar = np.vstack([af, S[i, :]]) A = np.hstack([Al, Ar]) bb = np.hstack([bf, t[i]]) sol = solvers._solve_lp_using_cvxopt( c, G, h, A=A, b=bb) if sol['status'] == 'optimal': tau = sol['x'][0] if tau < -abs_tol: ar = np.array([S[i, :]]).flatten() br = t[i].flatten() # Make orthogonal to facet ar = ar - af * np.dot(af.flatten(), ar.flatten()) br = br - bf * np.dot(af.flatten(), ar.flatten()) # Normalize and make ridge equation point outwards norm = np.sqrt(np.sum(ar * ar)) ar = ar / norm br = br / norm # accumulate Er_list.append( Ridge(np.sort(np.hstack([E, E_c[Q]])), ar, br)) return Er_list def adjacent(C, D, b, rid_fac, abs_tol=1e-7): """Compute the (unique) adjacent facet. @param rid_fac: A Ridge_Facet object containing the parameters for a facet and one of its ridges. @return: (E_adj,a_adj,b_adj): The equality set and parameters for the adjacent facet such that:: P_{E_adj} = P intersection {x | a_adj x = b_adj} """ E = rid_fac.E_0 af = rid_fac.af bf = rid_fac.bf # E_r = rid_fac.E_r ar = rid_fac.ar br = rid_fac.br # shape d = C.shape[1] k = D.shape[1] # E_r slices C_er = C[E_r, :] D_er = D[E_r, :] b_er = b[E_r] # stack c = -np.hstack([ar, np.zeros(k)]) G = np.hstack([C_er, D_er]) h = b_er A = np.hstack([af, np.zeros(k)]) sol = solvers._solve_lp_using_cvxopt( c, G, h, A=A.T, b=bf * (1 - 0.01)) if sol['status'] != "optimal": print(G) print(h) print(af) print(bf) print(ar) print(br) print(np.dot(af, ar)) data = {} data["C"] = C data["D"] = D data["b"] = b sio.savemat("matlabdata", data) with open('polytope.p', 'wb') as f: pickle.dump(data, f) raise Exception( "adjacent: Lp returned status " + str(sol['status'])) opt_sol = np.array(sol['x']).flatten() dual_opt_sol = np.array(sol['z']).flatten() x_opt = opt_sol[range(d)] y_opt = opt_sol[range(d, d + k)] if is_dual_degenerate( c.flatten(), G, h, A, bf * (1 - 0.01), opt_sol, dual_opt_sol, abs_tol=abs_tol): # If degenerate, compute affine hull and take preimage E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0] a_temp, b_temp = proj_aff( C_er[E_temp, :], D_er[E_temp, :], b_er[E_temp], expected_dim=1, abs_tol=abs_tol) E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol) if len(E_adj) == 0: data = {} data["C"] = C data["D"] = D data["b"] = b data["Er"] = E_r + 1 data["ar"] = ar data["br"] = br data["Ef"] = E + 1 data["af"] = af data["bf"] = bf sio.savemat("matlabdata", data) raise Exception( "adjacent: equality set computation returned empty set") else: r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol E_adj = np.nonzero(r)[0] C_eadj = C[E_adj, :] D_eadj = D[E_adj, :] b_eadj = b[E_adj] af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol) return E_adj, af_adj, bf_adj def proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-7): """Affine projection. Compute the set aff = {x | Ce x + De y = be} on the form aff = ({x | a x = b} intersection {Ce x + De y < be}). Input: Polytope parameters Ce, De and be Output: Constants a and b """ # Remove zero columns ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0] D = De[:, ind] if D.shape[1] == 0: a = Ce b = be a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception( "proj_aff: wrong dimension calculated in 1") return a_n.flatten(), b_n sh = np.shape(D.T) m = sh[0] n = sh[1] nDe = null_space(D.T) a = np.dot(nDe.T, Ce) b = np.dot(nDe.T, be) a_n, b_n = normalize(a, b) if expected_dim is not None: if expected_dim != b_n.size: raise Exception("proj_aff: wrong dimension calculated in 2") return a_n, b_n def is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-7): """Return `True` if pair of dual problems is dual degenerate. Checks if the pair of dual problems:: (P): min c'x (D): max h'z + b'y s.t Gx <= h s.t G'z + A'y = c Ax = b z <= 0 is dual degenerate, i.e. if (P) has several optimal solutions. Optimal solutions x* and z* are required. Input: `G,h,A,b`: Parameters of (P) `x_opt`: One optimal solution to (P) `z_opt`: The optimal solution to (D) corresponding to _inequality constraints_ in (P) Output: `dual`: Boolean indicating whether (P) has many optimal solutions. """ D = - G d = - h.flatten() mu = - z_opt.flatten() # mu >= 0 # Active constraints I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0] # Positive elements in dual opt J = np.nonzero(mu > abs_tol)[0] # i, j i = mu < abs_tol # Zero elements in dual opt i = i.astype(int) j = np.zeros(len(mu), dtype=int) j[I] = 1 # 1 if active # Indices where active constraints have 0 dual opt L = np.nonzero(i + j == 2)[0] # sizes nI = len(I) nJ = len(J) nL = len(L) # constraints DI = D[I, :] # Active constraints DJ = D[J, :] # Constraints with positive lagrange mult DL = D[L, :] # Active constraints with zero dual opt dual = 0 if A is None: test = DI else: test = np.vstack([DI, A]) if rank(test) < np.amin(DI.shape): return True else: if len(L) > 0: if A is None: Ae = DJ else: Ae = np.vstack([DJ, A]) be = np.zeros(Ae.shape[0]) Ai = - DL bi = np.zeros(nL) sol = solvers._solve_lp_using_cvxopt( c= - np.sum(DL, axis=0), G=Ai, h=bi, A=Ae, b=be) if sol['status'] == "dual infeasible": # Dual infeasible -> primal unbounded -> value>epsilon return True if sol['primal objective'] > abs_tol: return True return False def unique_equalityset(C, D, b, af, bf, abs_tol=1e-7, verbose=0): """Return equality set E with the following property: P_E = {x | af x = bf} intersection P where P is the polytope C x + D y < b The inequalities have to be satisfied with equality everywhere on the face defined by af and bf. """ if D is not None: A = np.hstack([C, D]) a = np.hstack([af, np.zeros(D.shape[1])]) else: A = C a = af E = [] for i in range(A.shape[0]): A_i = np.array(A[i, :]) b_i = b[i] sol = solvers._solve_lp_using_cvxopt( c=A_i, G=A, h=b, A=a.T, b=bf) if sol['status'] != "optimal": raise Exception( "unique_equalityset: LP returned status " + str(sol['status'])) if np.abs(sol['primal objective'] - b_i) < abs_tol: # Constraint is active everywhere E.append(i) if len(E) == 0: raise Exception("unique_equalityset: empty E") return np.array(E) def unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-7): A = np.hstack([C, D]) E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0] af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1) # stack ineq = np.hstack([af, np.zeros(D.shape[1])]) G = np.vstack([A, np.vstack([ineq, -ineq])]) h = np.hstack([b, np.hstack([bf, -bf])]) # shape m = G.shape[0] n = G.shape[1] # ht e = 1e-3 v = np.vstack([np.zeros([1, n]), np.eye(n)]).T v = v - np.array([np.mean(v, axis=1)]).T v = v * e ht = h + np.amin(-np.dot(G, v), axis=1) # stack H1 = np.hstack([G, -np.eye(m)]) H2 = np.hstack([G, np.zeros([m, m])]) H3 = np.hstack([np.zeros([m, n]), -np.eye(m)]) H = np.vstack([H1, np.vstack([H2, H3])]) h = np.hstack([ht, np.hstack([h, np.zeros(m)])]) c = np.hstack([np.zeros(n), np.ones(m)]) sol = solvers.lpsolve(c, H, h, solver='glpk') if not sol['status'] == "optimal": raise Exception( "unique_equalityset: LP returned status " + str(sol['status'])) opt_sol2 = np.array(sol['x']).flatten() x = opt_sol2[range(n)] s = opt_sol2[range(n, len(opt_sol2))] E = np.nonzero(s > abs_tol)[0] print(E) E = np.sort(E[np.nonzero(E < C.shape[0])]) # Check that they define the same projection at, bt = proj_aff(C[E, :], D[E, :], b[E]) if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol: raise Exception("unique_equalityset2: affine hulls not the same") return E def cheby_center(C, D, b): """Calculate Chebyshev center for the polytope `C x + D y <= b`. Input: `C, D, b`: Polytope parameters Output: `x_0, y_0`: The chebyshev centra `boolean`: True if a point could be found, False otherwise. """ d = C.shape[1] k = D.shape[1] A = np.hstack([C, D]) dim = np.shape(A)[1] c = - np.r_[np.zeros(dim), 1] norm2 = np.sqrt(np.sum(A * A, axis=1)) G = np.c_[A, norm2] sol = solvers.lpsolve(c, G, h=b, solver='glpk') if sol['status'] == "optimal": opt = np.array(sol['x'][0:-1]).flatten() return opt[range(d)], opt[range(d, d + k)], True else: return np.zeros(d), np.zeros(k), False def normalize(AA, bb, abs_tol=1e-7): """Normalize `A x = b` such that `A'A = 1` and `b > 0`. Also, remove duplicate lines. """ if AA.size == 0: return AA, bb dim = AA.size / bb.size A = AA.copy().reshape(bb.size, dim) b = bb.copy().reshape(bb.size, 1) # Remove zero lines keepind = np.nonzero( np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0] A = A[keepind, :] b = b[keepind] # Normalize anorm = np.sqrt(np.sum(A * A, axis=1)) for i in range(len(anorm)): A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i] b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i] # Remove duplicate rows keep_row = [] for i in range(len(anorm)): unique = True for j in range(i + 1, len(anorm)): test = (np.sum(np.abs(A[i, :] - A[j, :])) + np.abs(b[i, 0] - b[j, 0])) if test < abs_tol: unique = False break if unique: keep_row.append(i) A_n = A[keep_row, :] b_n = b[keep_row, 0] # Return flat A if only one row if A_n.size == dim: A_n = A_n.flatten() return A_n, b_n.flatten() def rank(A, eps=1e-15): u, s, vh = linalg.svd(A) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps return np.sum(s > tol) def null_space(A, eps=1e-15, nonempty=False): """Returns the null space N_A to matrix A such that A N_A = 0.""" u, s, v = linalg.svd(A, full_matrices=1) m = A.shape[0] n = A.shape[1] tol = np.amax([m, n]) * np.amax(s) * eps rank = np.sum(s > tol) N_space = v[range(rank, n), :].T if nonempty and (len(N_space) == 0): N_space = v[range(np.amax(n - 1, 1), n), :] return N_space
Q = np.nonzero(test < abs_tol)[0] X = np.setdiff1d(X, Q) # Have Q_i Sq = S[Q_i, :] tq = t[Q_i]
random_line_split
learnPython.py
# 注:python下的变量:不需要预先声明变量的类型,变量的类型和值在赋值的那一刻被初始化(声明和定义的过程一气完成) # 在python中,每一个变量在内存中创建,我们可以通过变量来查看内存中的值 # 类似于c的指针】 # 示例:x = 5 # 存储过程:系统先是找了一块内存,将5存储进去,紧接着x志向当前的这块内存 # id(x) -> 地址:104113976,这个地址当前存储为5 # 字面常量:5、1.23、或如'This is a string'这样的文本 # 数字的类型分为:Integers Floats # 字符串:可以使用 '' 或 “”,用三引号 ''' 或 """ 来指定多行字符串 # 注:内置的type()函数可以用来查询变量所指的对象类型 # 一、format函数 # format()函数:这个方法会把字符串当作一个模板,通过传入的参数进行格式化 # 这个格式化的模板使用大括号{}作为特殊字符,数字是可选项 # eg: '{0} is {1} years old'.format(name,age) # '{} is {} years old'.format(name,age) # 1> 字符串的参数使用{NUM}进行表示, 0表示第一个参数, 1表示第二个参数, 以后顺次递加; # 2> 使用":", 指定代表元素需要的操作, 如":.3"小数点三位, '{0:.3f}',format(1.0/3) ":8"占8个字符空间等 # 3> 使用(^)定义'___hello___'字符串长度为11, _是用来补充字符的 '{0:_^11}'.format('hello') # 二、 # print使用注意: # 1> 使用关键字输出: print('{name} wrote {book}'.format(name='Swaroop',book='A vyty of python')) # 2> print会换行,可以通过end指定其应以空白结尾(不换行) # eg: print('a',end='') print('b',end='') -> ab # print('a',end=' ') print('b',end=' ') -> a b # 注:每当需要提供命令行参数时,点击Run->Edit Configurations 并在Script parameters:部分输入相应参数,并点击ok # 注:在python中认为:同样缩进的代码块是一个模块 # 运算符与表达式 # + - * ** / // % << >> & | ^(按位异或) ~(按位取反) < > <= >= == != not(布尔非) nd or # ** 乘方 // 整除 # 读取键盘输入 :raw_input input # 三、函数 -> 用def定义 # 1> 定义全局变量 :global EG: global x # 2> 函数的默认参数 EG: def say(message, time=1): # print(message*times) # 若调用:say('hello',3) -> 结果:hellohellohello # 3> 关键字参数 EG:def func(a, b=5, c=10) # print('a is',a,'and b is',b,'and c is',c) # 调用:func(24,c=35) -> c=35:就是关键字参数赋值 # 4> 可变参数:定义的函数里面能够有任意数量的变量,也就是参数数量是可变的 # 示例: # def total(a = 5, *numbers,**phonebook): # print('a',a) # for i in numbers: # print('number ',i) # for first,second in phonebook.items(): # print(first,': ',second) # # total(100,1,2,3,John=1234,merry=5678,Tom=6789) # 解释: # *numbers: 称为数组参数 # **phonebook: 称为字典参数 # 在调用total函数式,total中匹配完定义好的参数,剩余的参数以元素的形式存储在args中,字典参数的值存储在phonebook中 # 5> return语句 # 每个函数都在其末尾隐含了一句 return None # 注:Python中的pass语句用于指示一个没有内容的语句块 # EG: def some_function(): # pass # 6> DocStrings 文档字符串 ''' *** ''' # 可以通过函数的 __doc__属性来获取函数的文档字符串属性 # 示例: # def Mymax(a,b): # ''' Mymax函数用来打印两个数的最大值''' # if a>b: # return a # elif a<b: # return b # # print(Mymax(5,10)) # print(Mymax.__doc__) -> 输出:Mymax函数用来打印两个数的最大值 # 四、模块 # 模块:就是包含代码的文件,不一定是python代码,有四种代码类型的模板: # 1> 使用python写的程序(.py文件) # 2> c或c++扩展(已编译为共享库或DLL文件) # 3> 包 (包含多个模块) # 4> 内建模块 (使用c编写并已链接到python解释器内) # # (1)为什么用模块 # 使用模块可以提高代码的可维护性和重复使用,还可以避免函数名和变量名冲突 # (2)import导入模块 # 模块可以包含可执行的语句和函数的定义,这些语句的目的是初始化模块,它们只在模块名第一次遇到导入import语句时才执行 # 即:python优化手段:第一次导入后就将模块名加载到了内存,后续再import引入该模块,只是对该模块对象增加了一次引用,不会重新执行模块内的语句 # (3)模块名称空间 # 每个模块都有一个独立的名称空间,定义在这个模块中函数,把这个模块的名称空间当做全局名称空间,这样我们在编写自己的模块时,就不担心我们定义在自己模块中的全局变量会在被导入时,与使用者的全局变量冲突 # (4)模块的重命名 # EG: import my_module as new_module # EG: #mysql.py # def sqlparse(): # print('from mysql sqlparse') # #oracle.py # def sqlprase(): # print('from oracle sqlprase') # #test.py # db_type=input('>>:') # 输入mysql或oracle # if db_type == 'mysql' # import mysql as db # elif db_type == 'oracle' # import oracle as db # db.sqlprase() # 注:import os # print(os.getcwd()) #用来查看程序目前所处的目录 # 1> form...import语句 # 示例:from math import sqrt -> 将math.sqrt引入到当前文件 应该避免使用,可能会出现名称冲突 # print('Square root of 16 is',sqrt(16)) # 示例: from my_module import * -> 把my_module中所有的不是一下划线(_)开头的名字都导入到当前位置 应该避免使用 # 注:在my_module.py中新增一行 # __all__=['money','read1'] -> 这样在另外一个文件中用from my_module import * 就只能导入列表中规定的这两个名字 # 2> 模块的 __name__属性 # 可以通过模块的全局变量__name__来查看模块名: # a. 当做脚本运行: __name__ == '__main__' # b. 当做模块导入: __name__ == 模块名 # 作用:用来控制.py文件在不同的应用场景下执行不同的逻辑 # (5) 编译python文件 # python解释器会在__pycache__目录中下缓存每个模块编译后的版本,格式为: module.version.pyc # 通常会包含python的版本号 EG: my_module.py模块会被缓存成__pycache__/my_module.cpython-33.pyc # dir函数:能够返回由对象多定义的名称列表。 # 若这一对象是一个模块,则该列表会包括函数内所定义函数、类、变量 # dir函数接受参数:若参数是模块名称,函数将会返回这一指定模块的名称列表 # dir无参:函数将返回当前模块的名称列表 # 五、数据结构 # (1) List 列表 -> 可以使用下标访问元素 # 示例: list1 = ['hello','world','Runnable'] # list2 = [1,2,3,4] # list3 = ['hello','world',1,2] # 列表更新: list1[2] = 'china' # 列表删除: del list1[2] # 列表截取: list1[1:] <-> list1[1,3]:下标取值范围 [1,3) # 列表拼接: list2 + [5,6,7] # 1> 函数: len(list) max(list) min(list) list(seq) -> 将元祖转换为列表 # 2> 方法: list.append(obj) list.count(obj) list.extend(seq)->在列表末尾一次性追加另一个序列中的多个值 # list.index(obj) list.insert(insert,obj) list.pop(obj)->移除列表中的一个元素,默认是最后一个元素 # list.remove(obj) list.reverse() list.sort() list.clear() list.copy() # (2) 元组 用小括号()表示 # 注:若元组中只有一个元素,需加上逗号 tmp = (50,) # 元组中的元素之是不允许修改的,但可以对元组进行连接组合 # 元组中的元素值是不允许删除的,但可以使用del语句来删除整个数组 # (3) 字典 # 表现形式:d = {key1:value1, key2:value2 ... ...} # 修改字典:dict = {'Name': 'Runoob', 'Age': 7, 'Class': 'First'} # dict['Age'] = 8; # 更新 Age # dict['School'] = "菜鸟教程" # 添加信息 # 删除字典元素: dict = {'Name': 'Runoob', 'Age': 7, 'Class': 'First'} # del dict['Name'] # 删除键 'Name' # dict.clear() # 清空字典 # del dict # 删除字典 # 注:key只能出现一次;key必须不可变(EG: 可以使用数字、字符串、元组,不可以使用列表) # 1> 内置函数 # str(dict) 以字符串的形式输出字典 # 2> 内置方法 # dict.get(key) dict.item()->以列表返回可以遍历的(key-value)元组数据 # dict.key() dict.values() pop(key)->删除字典给定key对应的value... # (4)集合 # 集合是一个无序不重复元素的集,用{}创建集合 # (5)字符串 # 字符串都是str类下的对象 # 字符串内置函数: ... ... # 注:引用 # eg: list1 = [1,2,3] # s1 = list1 #s1和list1指向同一对象 # eg:s2 = list1[:] #通过生成一份完整的切片制作一份list1的副本 # del s2[0] #这个操作只会对s2产生影响 # 六、迭代器和生成器 # (1)迭代器的两个基本方法: iter() 和 next() # 示例:list=[1,2,3,4] # it=iter(list) # for x in it # print(x,end=' ') # 示例:it1=iter(list) # print(next(it)) #输出迭代器的下一个元素 # (2)生成器 # 在python中,使用了yield的函数被称为生成器 # 在调用生成器运行的过程中,每次遇到yield时函数会暂停并保存当前所有的运行信息,并在下一次执行next()方法时从当前位置继续执行 ? # 七、面向对象编程 # 1> 方法 # 类成员方法:必须多加一个参数(self)在参数列表开头,在调用该类方法时不需要给该参数赋值 # Python中的self <--> C++中的this指针 # 类方法(静态方法) :用 @classmethod 声明 # 2> __init__方法 # 会在类对象被实例化时立即执行 (相当于C++中的构造方法) # 3> 类变量与对象变量 # 类变量:可以被属于该类的所有实例访问。该类变量只有一个副本,当任何一个对象对类变量做出改变时,发生的变动将在其它所有实例中都得到体现 以"类名."调用 # 对象变量:有类对象所拥有。一般在 __init__()方法中声明 以"self."开头 # 4> 继承 # 要想使用继承,需要在派生类后面跟一个包含基类名称的元组 EG:class Tearch(People): ... # 注:若在派生类中定义了 __init__方法,需要自己显示的调用基类的__init__方法 -> 此时调用基类的__init__方法需要在参数列表中写入 self # 若在派生类中没有定义__init__方法,python会自动调用基类的构造函数 # 多重继承:继承元组中有超过一个类 # 注:所有类成员都是公开的 # 但若数据成员(变量、方法)使用双下划线作为前缀(eg: __privatervar),Python会使用名称调整来使其有效的成为一个私有变量 或 私有方法 类外不能访问 # 可以使用 对象._classname__** : 调用类(classname)的私有成员** 该私有成员可以是 类变量 对象变量 私有方法 # 八、文件IO # (1) open函数 # -打开一个文件,创建一个file对象 # file object = open(file_name [,access_mode] [,buffering]) # - access_mode: 决定打开文件的模式 # - r: 以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。 # - rb: 以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。这是默认模式。一般用于非文本文件如图片等。 # - r+: 打开一个文件用于读写。文件指针将会放在文件的开头 # - rb+: 以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。一般用于非文本文件如图片等。 # - w: 打开一个文件只用于写入。如果该文件已存在则将其覆盖。如果该文件不存在,创建新文件。 # - wb: 以二进制格式打开一个文件只用于写入。如果该文件已存在则将其覆盖。如果该文件不存在,创建新文件。一般用于非文本文件如图片等。 # - w+: 打开一个文件用于读写。如果该文件已存在则将其覆盖。如果该文件不存在,创建新文件。 # - wb+: 以二进制格式打开一个文件用于读写。如果该文件已存在则将其覆盖。如果该文件不存在,创建新文件。一般用于非文本文件如图片等。 # - a: 打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。 # - ab: 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。 # - a+: 打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。 # - ab+: 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。 # # (2) python读取文件函数 read() readline() readlines() -> 取决于文件指针的位置 # 1> read() 一次性读取文本全部的内容,以字符串的形式返回结果 # 2> readline() 只读取文本第一行的内容,以字符串的形式返回结果 # 3> readlines() 读取文本所有内容,并且以数列的格式返回结果,一般配合for in 使用 # 示例:读取文件"file1.txt" # - 使用readline() # while True: # str = file.readline() # if(0 == len(str)): # break # print(str, end='') # - 使用readlines() # for str in file.readlined(): # print(str,end = '') # (3) close()方法 # -刷新缓冲区里任何还没写入的信息,并关闭该文件,这之后便不能再进行写入。 # 注:file.flush():刷新文件内部缓冲,直接把缓冲区的数据立刻写入文件,而不是被动的等待输出缓冲区写入 # (4) write()方法 # - 方法可将任何字符串写入一个打开的文件。需要重点注意的是,Python字符串可以是二进制数据,而不是仅仅是文字。 # - 注:write()方法不会在字符串的结尾添加换行符('\n'),若需要换行,需要自己在字符串中添加 \n # - 语法:fileObject.write(string) # 注:在write内容之后,直接read文件输出会为空,是因为指针已经在内容末尾了 # 为了保证无论是否出错都能正确的关闭文件,我们可以使用try...finally来实现 # (5) 文件定位 # tell()方法:告诉文件内的当前位置 # seek(offset [,from]):改变当前文件的位置 # - offset:表示要移动的字节数 # - from:指定开始移动字节的参考位置 # - 0 : 意味着将文件的开头作为移动字节的参考位置 # - 1 : 使用当前位置作为参考位置 # - 2 : 将文件的末尾作为参考位置 # (6) 重命名和删除文件 # - 需导入python的os模块 # os.rename(current_file_name,new_file_name) # os.remove(file_name) # (7) 目录操作 # os.mkdir("newdir"): 使用os模块的mkdir()方法在当前目录下创建新的目录 # os.chdir("newdir"): 改变当前目录路劲 # os.getcwd():显示当前的工作目录 # os.rmdir("dir"):删除目录,在删除这个目录之前,它的所有内容都应该先被清除 # os.chmod(path, mode):更改权限 # os.fstat(fd):返回文件描述符fd的状态,像stat()。 # os.fstatvfs(fd):返回包含文件描述符fd的文件的文件系统的信息,像 statvfs() # os.removedirs(path):递归删除目录。 # os.pipe():创建一个管道. 返回一对文件描述符(r, w) 分别为读和写 # 九、异常处理 # 异常是一个Python对象,表示一个错误 # 捕获异常: # 语法:try...except...else # 1> 可以通过raise语句来引发一次异常 # '''自定义异常:测试异常处理''' # class ShortLengthException(Exception): # def __init__(self,len,minlen): # self.len = len # self.minlen = minlen # # 异常测试 # minlen = 3 # str = input('please input >> ') # try: # if len(str) < minlen: # raise ShortLengthException(len(str), minlen) # except IOError: # print("input error") # except ShortLengthException as ex: # print("ShortLengthException :The input len was {},excepted at least {}".format(ex.len,ex.minlen)) # else: # print("succeed!") # 2> try...finally # 示例: # import time # ''' 文件异常处理 ''' # file = None # try: # file = open("./test.txt","r+") # str = file.readline() # print(str,end="") # print("press Ctrl+C",end='') # time.sleep(5) # except IOError: # print("Can not find file") # except KeyboardInterrupt: #用户终端执行 # print("raised KeyboardInterrupt") # else: # if file: # file.close() # 十、 网络编程 # 1> python使用socket()函数来创建套接字 # s = socket.socket(family,type,proto) # - family:可以选择 AF_INET(用于 Internet 进程间通信) 或者 AF_UNIX(用于同一台机器进程间通信) # - type:套接字类型,可以是 SOCKET_STREAM(流式套接字,主要用于 TCP 协议)或者SOCKET_DGRAM(数据报套接字,主要用于 UDP 协议) # - protocol: 一般不填默认为0 # 2> socket对象内建方法 # -- 服务器套接字 # s.bind(address) 绑定地址(host,port)到套接字, 在AF_INET下,以元组(host,port)的形式表示地址。 # s.listen(backlog) 开始TCP监听。backlog指定在拒绝连接之前,操作系统可以挂起的最大连接数量。该值至少为1,大部分应用程序设为5就可以了。 # s.accept() 接受TCP连接并返回(conn,address),其中conn是新的套接字对象,可以用来接收和发送数据。address是连接客户端的地址。 # -- 客户端套接字 # s.connect() 主动初始化TCP服务器连接,。一般address的格式为元组(hostname,port),如果连接出错,返回socket.error错误。 # 3> 公共用途的套接字函数 # s.recv() 接收TCP数据,数据以字符串形式返回,bufsize指定要接收的最大数据量。flag提供有关消息的其他信息,通常可以忽略。 # s.send() 发送TCP数据,将string中的数据发送到连接的套接字。返回值是要发送的字节数量,该数量可能小于string的字节大小。 # s.sendall() 完整发送TCP数据,完整发送TCP数据。将string中的数据发送到连接的套接字,但在返回之前会尝试发送所有数据。成功返回None,失败则抛出异常。 # s.recvfrom() 接收UDP数据,与recv()类似,但返回值是(data,address)。其中data是包含接收数据的字符串,address是发送数据的套接字地址。 # s.sendto() 发送UDP数据,将数据发送到套接字,address是形式为(ipaddr,port)的元组,指定远程地址。返回值是发送的字节数。 # s.close() 关闭套接字 # s.fileno() 返回套接字的文件描述符。 # s.setblocking(flag) 如果flag为0,则将套接字设为非阻塞模式,否则将套接字设为阻塞模式(默认值)。非阻塞模式下,如果调用recv()没有发现任何数据,或send()调用无法立即发送数据,那么将引起socket.error异常。 # 注:send、recv发送的是bytes,用户可以看清的是str # encode(): 将str编码为指定的bytes # decode(): 如果我们从网络或磁盘上读取了字节流,那么读到的数据就是bytes -> bytes变为str # 示例: # n = s.send(send_data.encode()) # recv_data = s.recv(1024).decode() # https://www.cnblogs.com/nulige/p/6235531.html?utm_source=itdadao&utm_medium=referral 使用到的模块解析 # 注:socketserver详解 # SocketServer框架式一个基本的socket服务器框架,使用了threading来处理多个客户端的连接,使用seletor模块来处理高并发访问 # SocketServer内部使用IO多路复用以及"多进程"和"多线程",从而实现并发处理客户端请求 # SocketServer提供5个基本服务类: # -请求处理类 # - BaseServer 基类,不直接对外服务 # - TCPServer:派生类,针对TCP套接字流 # - UnixStreamServer针对UNIX域套接字,不常用 # - UDPServer:派生类,针对UDP数据报套接字 # - UnixDatagramServer针对UNIX域套接字,不常用 # 请求处理类有三种方法: # - setup() 也就是在handle()之前被调用,主要的作用就是执行处理请求之前的初始化相关的各种工作。默认不会做任何事 # - handle() 做那些所有与处理请求相关的工作。默认也不会做任何事。他有数个实例参数:self.request self.client_address self.server # - finish() 在handle()方法之后会被调用,他的作用就是执行当处理完请求后的清理工作,默认不会做任何事 # # 用socketserver创建一个服务的步骤: # 1、创建一个request handler class(请求处理类),合理选择StreamRequestHandler和DatagramRequestHandler之中的一个作为父类(当然,使用BaseRequestHandler作为父类也可),并重写它的handle()方法。 # 2、实例化一个server class对象,并将服务的地址和之前创建的request handler class传递给它。 # 3、调用server class对象的handle_request() 或 serve_forever()方法来开始处理请求。 # # 十一、多线程 # Python使用线程有两种方式:函数 或者 用类来包装线程对象 # (1) thread模块的start_new_thread()函数 # 语法:start_new_thread(function,args[,kwargs]) # - function: 线程函数 # - args: 传递给线程函数的参数,必须是tuple类型(元组类型) # - kwargs: 可选参数 # (2)threading 模块除了包含 _thread 模块中的所有方法外,还提供的其他方法: # threading.currentThread(): 返回当前的线程变量。 # threading.enumerate(): 返回一个包含正在运行的线程的list。正在运行指线程启动后、结束前,不包括启动前和终止后的线程。 # threading.activeCount(): 返回正在运行的线程数量,与len(threading.enumerate())有相同的结果。 # # 除了使用方法外,线程模块同样提供了Thread类来处理线程,Thread类提供了以下方法: # run(): 用以表示线程活动的方法。 # start():启动线程活动。 # join([time]): 等待至线程中止。这阻塞调用线程直至线程的join() 方法被调用中止-正常退出或者抛出未处理的异常-或者是可选的超时发生。 # isAlive(): 返回线程是否活动的。 # getName(): 返回线程名。 # setName(): 设置线程名。 # 十二、Gevent # Gevent是一个基于greenlet的Python的并发框架,以微线程greenlet为核心,使用了epoll事件监听机制以及诸多其他优化而变得高效。 # gevent每次遇到io操作,需要耗时等待时,会自动跳到下一个协程继续执行 # gevent是第三方库,通过greenlet实现协程的基本思想是: # - 当一个greenlet遇到IO操作时,比如访问网络,就自动切换到其他的greenlet,等待IO操作完成,再在适当的时候切换回来继续执行。 # - 由于IO操作非常耗时,经常使程序处于等待状态,有了gevent为我们自动切换协程,就保证总有greenlet在运行,而不是等待IO # - 在gevent里面,上下文切换是通过yielding(退位)来完成 -> 通过调用gevent.sleep(***),让它们yield向对方 # (1) 协程,又称微线程,纤程 # - 协程的特点在于是一个线程执行 # - 最大的优势就是协程极高的执行效率。因为子程序切换不是线程切换,而是由程序自身控制,因此,没有线程切换的开销,和多线程比,线程数量越多,协程的性能优势就越明显。 # - 第二大优势就是不需要多线程的锁机制,因为只有一个线程,也不存在同时写变量冲突,在协程中控制共享资源不加锁,只需要判断状态就好了,所以执行效率比多线程高很多。 # - 因为协程是一个线程执行,那怎么利用多核CPU呢?最简单的方法是多进程+协程,既充分利用多核,又充分发挥协程的高效率,可获得极高的性能 # (2) Greenlets # 在gevent中用到的主要模式是Greenlet,它是以C扩展模块形式接入Python的轻量级协程 # - 创建Greenlets # import gevent # from gevent import Greenlet # # def foo(message, n): # """ # Each thread will be passed the message, and n arguments # in its initialization. # """ # gevent.sleep(n) # print(message) # # # thread1 = Greenlet.spawn(foo, "Hello", 1) # thread2 = gevent.spawn(foo, "I live!", 2) # thread3 = gevent.spawn(lambda x: (x+1), 2) # # threads = [thread1, thread2, thread3] # # # Block until all threads complete. # gevent.joinall(threads) # 十三、python调用shell命令 # (1) os模块的system方法 # system方法:会创建子进程执行外部程序 # 示例: os.system("ls") # (2) os模块popen方法 # popen方法:可以的搭配shell命令的返回值 os.popen(cmd)后,需要在调用read()或者readlines()这两个命令,输出结果 # 示例:os.popen("ls").read() # (3) commands模块 # 使用commands模块的getoutput方法,这样的方法同popend的差别在于popen返回的是一个文件句柄,而本方法将外部程序的输出结果当作字符串返回。非常多情况下用起来要更方便些。 # 主要方法: # commands.getstatusoutput(cmd) 返回(status, output) # commands.getoutput(cmd) 仅仅返回输出结果 # commands.getstatus(file) 返回ls -ld file的运行结果字符串,调用了getoutput。不建议使用此方法 # # (4) subprocess模块 # 使用subprocess模块能够创建新的进程。能够与新建进程的输入/输出/错误管道连通。并能够获得新建进程运行的返回状态。使用subprocess模块的目的是替代os.system()、os.popen*()、commands.*等旧的函数或模块。 # subprocess.call(["some_command","some_argument","another_argument_or_path"])
# subprocess.call(command,shell=True) # subprocess.Popen(command,shell=True) # 假设command不是一个可运行文件。shell=True不可省。 # 示例: # from subprocess import call # call(['ls','-l']) # 或 # from subprocess import Popen # Popen(['ls','-l'])
random_line_split
main.rs
/** shorturl is a web server that can host shortened URLs. ## Example usage Creating a link: ``` $ curl -X POST 127.0.0.1:8080/tsauvajon -d "https://linkedin.com/in/tsauvajon" /tsauvajon now redirects to https://linkedin.com/in/tsauvajon ``` Using it redirects us: ``` $ curl 127.0.0.1:8080/tsauvajon -v * Trying 127.0.0.1... * TCP_NODELAY set * Connected to 127.0.0.1 (127.0.0.1) port 8080 (#0) > GET /tsauvajon HTTP/1.1 > Host: 127.0.0.1:8080 > User-Agent: curl/7.64.1 > Accept: * / * > < HTTP/1.1 302 Found < content-length: 51 < location: https://linkedin.com/in/tsauvajon < date: Wed, 19 May 2021 17:36:49 GMT < * Connection #0 to host 127.0.0.1 left intact redirecting to https://linkedin.com/in/tsauvajon...* Closing connection 0 ``` */ use actix_web::{error, get, post, web, App, HttpResponse, HttpServer, Responder}; use futures::StreamExt; use std::collections::HashMap; use std::sync::RwLock; use url::Url; const MAX_SIZE: usize = 1_024; // max payload size is 1k const RANDOM_URL_SIZE: usize = 5; // ramdomly generated URLs are 5 characters long type Db = web::Data<RwLock<HashMap<String, String>>>; #[get("/{id}")] async fn browse(db: web::Data<Db>, web::Path(id): web::Path<String>) -> impl Responder { match db.read() { Ok(db) => match db.get(&id) { None => Err(error::ErrorNotFound("not found")), Some(url) => Ok(HttpResponse::Found() .header("Location", url.clone()) .body(format!("redirecting to {}...", url))), }, Err(err) => { println!("accessing the db: {}", err); Err(error::ErrorInternalServerError(err.to_string())) } } } fn hash(input: &str) -> String { blake3::hash(input.as_bytes()).to_hex()[..RANDOM_URL_SIZE].to_string() } async fn read_target(mut payload: web::Payload) -> Result<String, String> { let mut body = web::BytesMut::new(); while let Some(chunk) = payload.next().await { let chunk = chunk.or_else(|err| Err(err.to_string()))?; // limit max size of in-memory payload if (body.len() + chunk.len()) > MAX_SIZE { return Err("overflow".to_string()); } body.extend_from_slice(&chunk); } String::from_utf8(body[..].to_vec()) .or_else(|err| Err(format!("invalid request body: {}", err))) } fn create_short_url( db: web::Data<Db>, target: String, id: Option<String>, ) -> Result<String, String>
#[post("/{id}")] async fn create_with_id( db: web::Data<Db>, payload: web::Payload, web::Path(id): web::Path<String>, ) -> impl Responder { let target = match read_target(payload).await { Ok(target) => target, Err(err) => return Err(error::ErrorBadRequest(err)), }; create_short_url(db, target, Some(id)).or_else(|err| Err(error::ErrorBadRequest(err))) } #[post("/")] async fn create_random(db: web::Data<Db>, payload: web::Payload) -> impl Responder { let target = match read_target(payload).await { Ok(target) => target, Err(err) => return Err(error::ErrorBadRequest(err)), }; create_short_url(db, target, None).or_else(|err| Err(error::ErrorBadRequest(err))) } #[actix_web::main] async fn main() -> std::io::Result<()> { let db: Db = web::Data::new(RwLock::new(HashMap::new())); HttpServer::new(move || { App::new() .data(db.clone()) .service(browse) .service(create_random) .service(create_with_id) }) .bind("127.0.0.1:8080")? .run() .await } #[cfg(test)] mod tests { use super::*; #[test] fn test_hash() { assert_eq!("4cca4", hash("something")); assert_eq!("284a1", hash("something else")); } #[test] fn test_create_short_malformed_url() { let db: Db = web::Data::new(RwLock::new(HashMap::new())); let target = "this is not a valid URL".to_string(); let id = Some("hello".to_string()); assert_eq!( Err("malformed URL: relative URL without a base".to_string()), create_short_url(web::Data::new(db), target, id) ); } #[test] fn test_create_short_url() { let db: Db = web::Data::new(RwLock::new(HashMap::new())); let target = "https://google.com".to_string(); let id = "hello".to_string(); create_short_url(web::Data::new(db.clone()), target.clone(), Some(id.clone())).unwrap(); let db = db.read().unwrap(); let got = db.get(&id).unwrap(); assert_eq!(&target, got); } #[test] fn test_create_short_url_hashed_id() { let db: Db = web::Data::new(RwLock::new(HashMap::new())); let target = "https://google.com"; create_short_url(web::Data::new(db.clone()), target.to_string(), None).unwrap(); let id = hash(target); let db = db.read().unwrap(); let got = db.get(&id).unwrap(); assert_eq!(&target, got); } #[test] fn test_create_short_url_already_exists() { let id = "hello".to_string(); let mut db: HashMap<String, String> = HashMap::new(); db.insert(id.clone(), "some existing value".to_string()); let db: Db = web::Data::new(RwLock::new(db)); let target = "https://google.com".to_string(); assert_eq!( Err("already registered".to_string()), create_short_url(web::Data::new(db), target, Some(id)) ); } } #[cfg(test)] mod integration_tests { use super::*; use actix_web::{ body::Body, http::{HeaderValue, StatusCode}, test, }; // create a new custom shorturl #[actix_rt::test] async fn integration_test_create_custom_shortened_url() { let req = test::TestRequest::post() .uri("/hello") .set_payload("https://hello.world") .to_request(); let db: Db = web::Data::new(RwLock::new(HashMap::new())); let mut app = test::init_service(App::new().data(db.clone()).service(create_with_id)).await; let resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::OK); let db = db.read().unwrap(); assert_eq!(db.get("hello"), Some(&"https://hello.world".to_string())); assert_eq!(db.get("wwerwewrew"), None); } // create a new random shorturl #[actix_rt::test] async fn integration_test_create_random_shortened_url() { let req = test::TestRequest::post() .uri("/") .set_payload("https://hello.world") .to_request(); let db: Db = web::Data::new(RwLock::new(HashMap::new())); let mut app = test::init_service(App::new().data(db.clone()).service(create_random)).await; let resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::OK); let db = db.read().unwrap(); assert_eq!( db.get(&hash("https://hello.world")), Some(&"https://hello.world".to_string()) ); assert_eq!(db.get("wwerwewrew"), None); } // follow an existing shorturl #[actix_rt::test] async fn integration_test_use_shortened_url() { let req = test::TestRequest::get().uri("/hi").to_request(); let mut db: HashMap<String, String> = HashMap::new(); db.insert("hi".into(), "https://linkedin.com/in/tsauvajon".into()); let mut app = test::init_service( App::new() .data(web::Data::new(RwLock::new(db))) .service(browse), ) .await; let mut resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::FOUND); let body = resp.take_body(); let body = body.as_ref().unwrap(); assert_eq!( &Body::from("redirecting to https://linkedin.com/in/tsauvajon..."), body ); assert_eq!( resp.headers().get("Location"), Some(&HeaderValue::from_str("https://linkedin.com/in/tsauvajon").unwrap()) ) } // try to follow a shortened URL that doesn't exist #[actix_rt::test] async fn integration_test_link_miss() { let req = test::TestRequest::get() .uri("/thislinkdoesntexist") .to_request(); let db: Db = web::Data::new(RwLock::new(HashMap::new())); let mut app = test::init_service(App::new().data(db).service(browse)).await; let mut resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::NOT_FOUND); let body = resp.take_body(); let body = body.as_ref().unwrap(); assert_eq!(&Body::from("not found"), body); assert_eq!(resp.headers().get("Location"), None) } // try to add a link for an already existing short-url #[actix_rt::test] async fn integration_test_collision() { let req = test::TestRequest::post() .uri("/alreadyexists") .set_payload("https://something.new") .to_request(); let mut db: HashMap<String, String> = HashMap::new(); db.insert( "alreadyexists".into(), "https://github.com/tsauvajon".into(), ); let mut app = test::init_service( App::new() .data(web::Data::new(RwLock::new(db))) .service(create_with_id), ) .await; let mut resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::BAD_REQUEST); let body = resp.take_body(); let body = body.as_ref().unwrap(); assert_eq!(&Body::from("already registered"), body); } }
{ if let Err(err) = Url::parse(&target) { return Err(format!("malformed URL: {}", err)); }; let id = match id { Some(id) => id, None => hash(&target), }; let mut db = db.write().unwrap(); if db.contains_key(&id) { Err("already registered".to_string()) } else { db.insert(id.clone(), target.clone()); Ok(format!("/{} now redirects to {}", id, target)) } }
identifier_body
main.rs
/** shorturl is a web server that can host shortened URLs. ## Example usage Creating a link: ``` $ curl -X POST 127.0.0.1:8080/tsauvajon -d "https://linkedin.com/in/tsauvajon" /tsauvajon now redirects to https://linkedin.com/in/tsauvajon ``` Using it redirects us: ``` $ curl 127.0.0.1:8080/tsauvajon -v * Trying 127.0.0.1... * TCP_NODELAY set * Connected to 127.0.0.1 (127.0.0.1) port 8080 (#0) > GET /tsauvajon HTTP/1.1 > Host: 127.0.0.1:8080 > User-Agent: curl/7.64.1 > Accept: * / * > < HTTP/1.1 302 Found < content-length: 51 < location: https://linkedin.com/in/tsauvajon < date: Wed, 19 May 2021 17:36:49 GMT < * Connection #0 to host 127.0.0.1 left intact
*/ use actix_web::{error, get, post, web, App, HttpResponse, HttpServer, Responder}; use futures::StreamExt; use std::collections::HashMap; use std::sync::RwLock; use url::Url; const MAX_SIZE: usize = 1_024; // max payload size is 1k const RANDOM_URL_SIZE: usize = 5; // ramdomly generated URLs are 5 characters long type Db = web::Data<RwLock<HashMap<String, String>>>; #[get("/{id}")] async fn browse(db: web::Data<Db>, web::Path(id): web::Path<String>) -> impl Responder { match db.read() { Ok(db) => match db.get(&id) { None => Err(error::ErrorNotFound("not found")), Some(url) => Ok(HttpResponse::Found() .header("Location", url.clone()) .body(format!("redirecting to {}...", url))), }, Err(err) => { println!("accessing the db: {}", err); Err(error::ErrorInternalServerError(err.to_string())) } } } fn hash(input: &str) -> String { blake3::hash(input.as_bytes()).to_hex()[..RANDOM_URL_SIZE].to_string() } async fn read_target(mut payload: web::Payload) -> Result<String, String> { let mut body = web::BytesMut::new(); while let Some(chunk) = payload.next().await { let chunk = chunk.or_else(|err| Err(err.to_string()))?; // limit max size of in-memory payload if (body.len() + chunk.len()) > MAX_SIZE { return Err("overflow".to_string()); } body.extend_from_slice(&chunk); } String::from_utf8(body[..].to_vec()) .or_else(|err| Err(format!("invalid request body: {}", err))) } fn create_short_url( db: web::Data<Db>, target: String, id: Option<String>, ) -> Result<String, String> { if let Err(err) = Url::parse(&target) { return Err(format!("malformed URL: {}", err)); }; let id = match id { Some(id) => id, None => hash(&target), }; let mut db = db.write().unwrap(); if db.contains_key(&id) { Err("already registered".to_string()) } else { db.insert(id.clone(), target.clone()); Ok(format!("/{} now redirects to {}", id, target)) } } #[post("/{id}")] async fn create_with_id( db: web::Data<Db>, payload: web::Payload, web::Path(id): web::Path<String>, ) -> impl Responder { let target = match read_target(payload).await { Ok(target) => target, Err(err) => return Err(error::ErrorBadRequest(err)), }; create_short_url(db, target, Some(id)).or_else(|err| Err(error::ErrorBadRequest(err))) } #[post("/")] async fn create_random(db: web::Data<Db>, payload: web::Payload) -> impl Responder { let target = match read_target(payload).await { Ok(target) => target, Err(err) => return Err(error::ErrorBadRequest(err)), }; create_short_url(db, target, None).or_else(|err| Err(error::ErrorBadRequest(err))) } #[actix_web::main] async fn main() -> std::io::Result<()> { let db: Db = web::Data::new(RwLock::new(HashMap::new())); HttpServer::new(move || { App::new() .data(db.clone()) .service(browse) .service(create_random) .service(create_with_id) }) .bind("127.0.0.1:8080")? .run() .await } #[cfg(test)] mod tests { use super::*; #[test] fn test_hash() { assert_eq!("4cca4", hash("something")); assert_eq!("284a1", hash("something else")); } #[test] fn test_create_short_malformed_url() { let db: Db = web::Data::new(RwLock::new(HashMap::new())); let target = "this is not a valid URL".to_string(); let id = Some("hello".to_string()); assert_eq!( Err("malformed URL: relative URL without a base".to_string()), create_short_url(web::Data::new(db), target, id) ); } #[test] fn test_create_short_url() { let db: Db = web::Data::new(RwLock::new(HashMap::new())); let target = "https://google.com".to_string(); let id = "hello".to_string(); create_short_url(web::Data::new(db.clone()), target.clone(), Some(id.clone())).unwrap(); let db = db.read().unwrap(); let got = db.get(&id).unwrap(); assert_eq!(&target, got); } #[test] fn test_create_short_url_hashed_id() { let db: Db = web::Data::new(RwLock::new(HashMap::new())); let target = "https://google.com"; create_short_url(web::Data::new(db.clone()), target.to_string(), None).unwrap(); let id = hash(target); let db = db.read().unwrap(); let got = db.get(&id).unwrap(); assert_eq!(&target, got); } #[test] fn test_create_short_url_already_exists() { let id = "hello".to_string(); let mut db: HashMap<String, String> = HashMap::new(); db.insert(id.clone(), "some existing value".to_string()); let db: Db = web::Data::new(RwLock::new(db)); let target = "https://google.com".to_string(); assert_eq!( Err("already registered".to_string()), create_short_url(web::Data::new(db), target, Some(id)) ); } } #[cfg(test)] mod integration_tests { use super::*; use actix_web::{ body::Body, http::{HeaderValue, StatusCode}, test, }; // create a new custom shorturl #[actix_rt::test] async fn integration_test_create_custom_shortened_url() { let req = test::TestRequest::post() .uri("/hello") .set_payload("https://hello.world") .to_request(); let db: Db = web::Data::new(RwLock::new(HashMap::new())); let mut app = test::init_service(App::new().data(db.clone()).service(create_with_id)).await; let resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::OK); let db = db.read().unwrap(); assert_eq!(db.get("hello"), Some(&"https://hello.world".to_string())); assert_eq!(db.get("wwerwewrew"), None); } // create a new random shorturl #[actix_rt::test] async fn integration_test_create_random_shortened_url() { let req = test::TestRequest::post() .uri("/") .set_payload("https://hello.world") .to_request(); let db: Db = web::Data::new(RwLock::new(HashMap::new())); let mut app = test::init_service(App::new().data(db.clone()).service(create_random)).await; let resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::OK); let db = db.read().unwrap(); assert_eq!( db.get(&hash("https://hello.world")), Some(&"https://hello.world".to_string()) ); assert_eq!(db.get("wwerwewrew"), None); } // follow an existing shorturl #[actix_rt::test] async fn integration_test_use_shortened_url() { let req = test::TestRequest::get().uri("/hi").to_request(); let mut db: HashMap<String, String> = HashMap::new(); db.insert("hi".into(), "https://linkedin.com/in/tsauvajon".into()); let mut app = test::init_service( App::new() .data(web::Data::new(RwLock::new(db))) .service(browse), ) .await; let mut resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::FOUND); let body = resp.take_body(); let body = body.as_ref().unwrap(); assert_eq!( &Body::from("redirecting to https://linkedin.com/in/tsauvajon..."), body ); assert_eq!( resp.headers().get("Location"), Some(&HeaderValue::from_str("https://linkedin.com/in/tsauvajon").unwrap()) ) } // try to follow a shortened URL that doesn't exist #[actix_rt::test] async fn integration_test_link_miss() { let req = test::TestRequest::get() .uri("/thislinkdoesntexist") .to_request(); let db: Db = web::Data::new(RwLock::new(HashMap::new())); let mut app = test::init_service(App::new().data(db).service(browse)).await; let mut resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::NOT_FOUND); let body = resp.take_body(); let body = body.as_ref().unwrap(); assert_eq!(&Body::from("not found"), body); assert_eq!(resp.headers().get("Location"), None) } // try to add a link for an already existing short-url #[actix_rt::test] async fn integration_test_collision() { let req = test::TestRequest::post() .uri("/alreadyexists") .set_payload("https://something.new") .to_request(); let mut db: HashMap<String, String> = HashMap::new(); db.insert( "alreadyexists".into(), "https://github.com/tsauvajon".into(), ); let mut app = test::init_service( App::new() .data(web::Data::new(RwLock::new(db))) .service(create_with_id), ) .await; let mut resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::BAD_REQUEST); let body = resp.take_body(); let body = body.as_ref().unwrap(); assert_eq!(&Body::from("already registered"), body); } }
redirecting to https://linkedin.com/in/tsauvajon...* Closing connection 0 ```
random_line_split
main.rs
/** shorturl is a web server that can host shortened URLs. ## Example usage Creating a link: ``` $ curl -X POST 127.0.0.1:8080/tsauvajon -d "https://linkedin.com/in/tsauvajon" /tsauvajon now redirects to https://linkedin.com/in/tsauvajon ``` Using it redirects us: ``` $ curl 127.0.0.1:8080/tsauvajon -v * Trying 127.0.0.1... * TCP_NODELAY set * Connected to 127.0.0.1 (127.0.0.1) port 8080 (#0) > GET /tsauvajon HTTP/1.1 > Host: 127.0.0.1:8080 > User-Agent: curl/7.64.1 > Accept: * / * > < HTTP/1.1 302 Found < content-length: 51 < location: https://linkedin.com/in/tsauvajon < date: Wed, 19 May 2021 17:36:49 GMT < * Connection #0 to host 127.0.0.1 left intact redirecting to https://linkedin.com/in/tsauvajon...* Closing connection 0 ``` */ use actix_web::{error, get, post, web, App, HttpResponse, HttpServer, Responder}; use futures::StreamExt; use std::collections::HashMap; use std::sync::RwLock; use url::Url; const MAX_SIZE: usize = 1_024; // max payload size is 1k const RANDOM_URL_SIZE: usize = 5; // ramdomly generated URLs are 5 characters long type Db = web::Data<RwLock<HashMap<String, String>>>; #[get("/{id}")] async fn
(db: web::Data<Db>, web::Path(id): web::Path<String>) -> impl Responder { match db.read() { Ok(db) => match db.get(&id) { None => Err(error::ErrorNotFound("not found")), Some(url) => Ok(HttpResponse::Found() .header("Location", url.clone()) .body(format!("redirecting to {}...", url))), }, Err(err) => { println!("accessing the db: {}", err); Err(error::ErrorInternalServerError(err.to_string())) } } } fn hash(input: &str) -> String { blake3::hash(input.as_bytes()).to_hex()[..RANDOM_URL_SIZE].to_string() } async fn read_target(mut payload: web::Payload) -> Result<String, String> { let mut body = web::BytesMut::new(); while let Some(chunk) = payload.next().await { let chunk = chunk.or_else(|err| Err(err.to_string()))?; // limit max size of in-memory payload if (body.len() + chunk.len()) > MAX_SIZE { return Err("overflow".to_string()); } body.extend_from_slice(&chunk); } String::from_utf8(body[..].to_vec()) .or_else(|err| Err(format!("invalid request body: {}", err))) } fn create_short_url( db: web::Data<Db>, target: String, id: Option<String>, ) -> Result<String, String> { if let Err(err) = Url::parse(&target) { return Err(format!("malformed URL: {}", err)); }; let id = match id { Some(id) => id, None => hash(&target), }; let mut db = db.write().unwrap(); if db.contains_key(&id) { Err("already registered".to_string()) } else { db.insert(id.clone(), target.clone()); Ok(format!("/{} now redirects to {}", id, target)) } } #[post("/{id}")] async fn create_with_id( db: web::Data<Db>, payload: web::Payload, web::Path(id): web::Path<String>, ) -> impl Responder { let target = match read_target(payload).await { Ok(target) => target, Err(err) => return Err(error::ErrorBadRequest(err)), }; create_short_url(db, target, Some(id)).or_else(|err| Err(error::ErrorBadRequest(err))) } #[post("/")] async fn create_random(db: web::Data<Db>, payload: web::Payload) -> impl Responder { let target = match read_target(payload).await { Ok(target) => target, Err(err) => return Err(error::ErrorBadRequest(err)), }; create_short_url(db, target, None).or_else(|err| Err(error::ErrorBadRequest(err))) } #[actix_web::main] async fn main() -> std::io::Result<()> { let db: Db = web::Data::new(RwLock::new(HashMap::new())); HttpServer::new(move || { App::new() .data(db.clone()) .service(browse) .service(create_random) .service(create_with_id) }) .bind("127.0.0.1:8080")? .run() .await } #[cfg(test)] mod tests { use super::*; #[test] fn test_hash() { assert_eq!("4cca4", hash("something")); assert_eq!("284a1", hash("something else")); } #[test] fn test_create_short_malformed_url() { let db: Db = web::Data::new(RwLock::new(HashMap::new())); let target = "this is not a valid URL".to_string(); let id = Some("hello".to_string()); assert_eq!( Err("malformed URL: relative URL without a base".to_string()), create_short_url(web::Data::new(db), target, id) ); } #[test] fn test_create_short_url() { let db: Db = web::Data::new(RwLock::new(HashMap::new())); let target = "https://google.com".to_string(); let id = "hello".to_string(); create_short_url(web::Data::new(db.clone()), target.clone(), Some(id.clone())).unwrap(); let db = db.read().unwrap(); let got = db.get(&id).unwrap(); assert_eq!(&target, got); } #[test] fn test_create_short_url_hashed_id() { let db: Db = web::Data::new(RwLock::new(HashMap::new())); let target = "https://google.com"; create_short_url(web::Data::new(db.clone()), target.to_string(), None).unwrap(); let id = hash(target); let db = db.read().unwrap(); let got = db.get(&id).unwrap(); assert_eq!(&target, got); } #[test] fn test_create_short_url_already_exists() { let id = "hello".to_string(); let mut db: HashMap<String, String> = HashMap::new(); db.insert(id.clone(), "some existing value".to_string()); let db: Db = web::Data::new(RwLock::new(db)); let target = "https://google.com".to_string(); assert_eq!( Err("already registered".to_string()), create_short_url(web::Data::new(db), target, Some(id)) ); } } #[cfg(test)] mod integration_tests { use super::*; use actix_web::{ body::Body, http::{HeaderValue, StatusCode}, test, }; // create a new custom shorturl #[actix_rt::test] async fn integration_test_create_custom_shortened_url() { let req = test::TestRequest::post() .uri("/hello") .set_payload("https://hello.world") .to_request(); let db: Db = web::Data::new(RwLock::new(HashMap::new())); let mut app = test::init_service(App::new().data(db.clone()).service(create_with_id)).await; let resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::OK); let db = db.read().unwrap(); assert_eq!(db.get("hello"), Some(&"https://hello.world".to_string())); assert_eq!(db.get("wwerwewrew"), None); } // create a new random shorturl #[actix_rt::test] async fn integration_test_create_random_shortened_url() { let req = test::TestRequest::post() .uri("/") .set_payload("https://hello.world") .to_request(); let db: Db = web::Data::new(RwLock::new(HashMap::new())); let mut app = test::init_service(App::new().data(db.clone()).service(create_random)).await; let resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::OK); let db = db.read().unwrap(); assert_eq!( db.get(&hash("https://hello.world")), Some(&"https://hello.world".to_string()) ); assert_eq!(db.get("wwerwewrew"), None); } // follow an existing shorturl #[actix_rt::test] async fn integration_test_use_shortened_url() { let req = test::TestRequest::get().uri("/hi").to_request(); let mut db: HashMap<String, String> = HashMap::new(); db.insert("hi".into(), "https://linkedin.com/in/tsauvajon".into()); let mut app = test::init_service( App::new() .data(web::Data::new(RwLock::new(db))) .service(browse), ) .await; let mut resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::FOUND); let body = resp.take_body(); let body = body.as_ref().unwrap(); assert_eq!( &Body::from("redirecting to https://linkedin.com/in/tsauvajon..."), body ); assert_eq!( resp.headers().get("Location"), Some(&HeaderValue::from_str("https://linkedin.com/in/tsauvajon").unwrap()) ) } // try to follow a shortened URL that doesn't exist #[actix_rt::test] async fn integration_test_link_miss() { let req = test::TestRequest::get() .uri("/thislinkdoesntexist") .to_request(); let db: Db = web::Data::new(RwLock::new(HashMap::new())); let mut app = test::init_service(App::new().data(db).service(browse)).await; let mut resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::NOT_FOUND); let body = resp.take_body(); let body = body.as_ref().unwrap(); assert_eq!(&Body::from("not found"), body); assert_eq!(resp.headers().get("Location"), None) } // try to add a link for an already existing short-url #[actix_rt::test] async fn integration_test_collision() { let req = test::TestRequest::post() .uri("/alreadyexists") .set_payload("https://something.new") .to_request(); let mut db: HashMap<String, String> = HashMap::new(); db.insert( "alreadyexists".into(), "https://github.com/tsauvajon".into(), ); let mut app = test::init_service( App::new() .data(web::Data::new(RwLock::new(db))) .service(create_with_id), ) .await; let mut resp = test::call_service(&mut app, req).await; assert_eq!(resp.status(), StatusCode::BAD_REQUEST); let body = resp.take_body(); let body = body.as_ref().unwrap(); assert_eq!(&Body::from("already registered"), body); } }
browse
identifier_name
gateway.go
// Copyright 2018 Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1alpha3 import ( "fmt" xdsapi "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth" "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/envoyproxy/go-control-plane/envoy/api/v2/listener" "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" http_conn "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2" "github.com/gogo/protobuf/types" multierror "github.com/hashicorp/go-multierror" networking "istio.io/api/networking/v1alpha3" "istio.io/istio/pilot/pkg/model" istio_route "istio.io/istio/pilot/pkg/networking/core/v1alpha3/route" "istio.io/istio/pilot/pkg/networking/plugin" "istio.io/istio/pilot/pkg/networking/util" "istio.io/istio/pkg/log" ) var ( // TODO: extract this into istio.io/pkg/proto/{bool.go or types.go or values.go} boolFalse = &types.BoolValue{ Value: false, } ) func (configgen *ConfigGeneratorImpl) buildGatewayListeners(env *model.Environment, node *model.Proxy, push *model.PushContext) ([]*xdsapi.Listener, error) { // collect workload labels workloadInstances, err := env.GetProxyServiceInstances(node) if err != nil { log.Errora("Failed to get gateway instances for router ", node.ID, err) return nil, err } var workloadLabels model.LabelsCollection for _, w := range workloadInstances { workloadLabels = append(workloadLabels, w.Labels) } gatewaysForWorkload := env.Gateways(workloadLabels) if len(gatewaysForWorkload) == 0 { log.Debuga("buildGatewayListeners: no gateways for router", node.ID) return []*xdsapi.Listener{}, nil } mergedGateway := model.MergeGateways(gatewaysForWorkload...) log.Debugf("buildGatewayListeners: gateways after merging: %v", mergedGateway) errs := &multierror.Error{} listeners := make([]*xdsapi.Listener, 0, len(mergedGateway.Servers)) for portNumber, servers := range mergedGateway.Servers { protocol := model.ParseProtocol(servers[0].Port.Protocol) if protocol == model.ProtocolHTTPS { // Gateway terminates TLS connection if TLS mode is not Passthrough So, its effectively a H2 listener. // This is complicated. We have multiple servers. One of these servers could have passthrough HTTPS while // others could be a simple/mutual TLS. // The code as it is, is not capable of handling this mixed listener type and set up the proper SNI chains // such that the passthrough ones go through a TCP proxy while others get terminated and go through http connection // manager. Ideally, the merge gateway function should take care of this and intelligently create multiple // groups of servers based on their TLS types as well. For now, we simply assume that if HTTPS, // and the first server in the group is not a passthrough, then this is a HTTP connection manager. if servers[0].Tls != nil && servers[0].Tls.Mode != networking.Server_TLSOptions_PASSTHROUGH { protocol = model.ProtocolHTTP2 } } opts := buildListenerOpts{ env: env, proxy: node, ip: WildcardAddress, port: int(portNumber), bindToPort: true, } listenerType := plugin.ModelProtocolToListenerProtocol(protocol) switch listenerType { case plugin.ListenerProtocolHTTP: // virtualService.HTTP applies here for both plain text HTTP and HTTPS termination opts.filterChainOpts = configgen.createGatewayHTTPFilterChainOpts(node, env, push, servers, mergedGateway.Names) case plugin.ListenerProtocolTCP: // virtualService.TLS/virtualService.TCP applies here opts.filterChainOpts = configgen.createGatewayTCPFilterChainOpts(node, env, push, servers, mergedGateway.Names) default: log.Warnf("buildGatewayListeners: unknown listener type %v", listenerType) continue } l := buildListener(opts) mutable := &plugin.MutableObjects{ Listener: l, // Note: buildListener creates filter chains but does not populate the filters in the chain; that's what // this is for. FilterChains: make([]plugin.FilterChain, len(l.FilterChains)), } var si *model.ServiceInstance for _, w := range workloadInstances { if w.Endpoint.Port == int(portNumber) { si = w break } } for _, p := range configgen.Plugins { params := &plugin.InputParams{ ListenerProtocol: listenerType, Env: env, Node: node, ProxyInstances: workloadInstances, Push: push, ServiceInstance: si, Port: &model.Port{ Name: servers[0].Port.Name, Port: int(portNumber), Protocol: protocol, }, } if err = p.OnOutboundListener(params, mutable); err != nil { log.Warna("buildGatewayListeners: failed to build listener for gateway: ", err.Error()) } } // Filters are serialized one time into an opaque struct once we have the complete list. if err = marshalFilters(mutable.Listener, opts, mutable.FilterChains); err != nil { errs = multierror.Append(errs, fmt.Errorf("gateway omitting listener %q due to: %v", mutable.Listener.Name, err.Error())) continue } if err = mutable.Listener.Validate(); err != nil { errs = multierror.Append(errs, fmt.Errorf("gateway listener %s validation failed: %v", mutable.Listener.Name, err.Error())) continue } if log.DebugEnabled() { log.Debugf("buildGatewayListeners: constructed listener with %d filter chains:\n%v", len(mutable.Listener.FilterChains), mutable.Listener)
listeners = append(listeners, mutable.Listener) } // We'll try to return any listeners we successfully marshaled; if we have none, we'll emit the error we built up err = errs.ErrorOrNil() if err != nil { // we have some listeners to return, but we also have some errors; log them log.Info(err.Error()) } if len(listeners) == 0 { log.Error("buildGatewayListeners: Have zero listeners") return []*xdsapi.Listener{}, nil } validatedListeners := make([]*xdsapi.Listener, 0, len(mergedGateway.Servers)) for _, l := range listeners { if err := l.Validate(); err != nil { log.Warnf("buildGatewayListeners: error validating listener %s: %v.. Skipping.", l.Name, err) continue } validatedListeners = append(validatedListeners, l) } return validatedListeners, nil } func (configgen *ConfigGeneratorImpl) buildGatewayHTTPRouteConfig(env *model.Environment, node *model.Proxy, push *model.PushContext, proxyInstances []*model.ServiceInstance, services []*model.Service, routeName string) (*xdsapi.RouteConfiguration, error) { // collect workload labels var workloadLabels model.LabelsCollection for _, w := range proxyInstances { workloadLabels = append(workloadLabels, w.Labels) } gateways := env.Gateways(workloadLabels) if len(gateways) == 0 { log.Debuga("buildGatewayRoutes: no gateways for router", node.ID) return nil, nil } merged := model.MergeGateways(gateways...) log.Debugf("buildGatewayRoutes: gateways after merging: %v", merged) // make sure that there is some server listening on this port if _, ok := merged.RDSRouteConfigNames[routeName]; !ok { log.Errorf("buildGatewayRoutes: could not find server for routeName %s, have %v", routeName, merged.RDSRouteConfigNames) return nil, fmt.Errorf("buildGatewayRoutes: could not find server for routeName %s, have %v", routeName, merged.RDSRouteConfigNames) } servers := merged.RDSRouteConfigNames[routeName] nameToServiceMap := make(map[model.Hostname]*model.Service, len(services)) for _, svc := range services { nameToServiceMap[svc.Hostname] = svc } gatewayHosts := make(map[model.Hostname]bool) tlsRedirect := make(map[model.Hostname]bool) for _, server := range servers { for _, host := range server.Hosts { gatewayHosts[model.Hostname(host)] = true if server.Tls != nil && server.Tls.HttpsRedirect { tlsRedirect[model.Hostname(host)] = true } } } port := int(servers[0].Port.Number) // NOTE: WE DO NOT SUPPORT two gateways on same workload binding to same virtual service virtualServices := push.VirtualServices(merged.Names) vHostDedupMap := make(map[string]*route.VirtualHost) for _, v := range virtualServices { vs := v.Spec.(*networking.VirtualService) matchingHosts := pickMatchingGatewayHosts(gatewayHosts, vs.Hosts) if len(matchingHosts) == 0 { log.Debugf("%s omitting virtual service %q because its hosts don't match gateways %v server %d", node.ID, v.Name, gateways, port) continue } routes, err := istio_route.BuildHTTPRoutesForVirtualService(node, push, v, nameToServiceMap, port, nil, merged.Names) if err != nil { log.Debugf("%s omitting routes for service %v due to error: %v", node.ID, v, err) continue } for vsvcHost, gatewayHost := range matchingHosts { if currentVhost, exists := vHostDedupMap[vsvcHost]; exists { currentVhost.Routes = istio_route.CombineVHostRoutes(currentVhost.Routes, routes) } else { newVhost := &route.VirtualHost{ Name: fmt.Sprintf("%s:%d", vsvcHost, port), Domains: []string{vsvcHost, fmt.Sprintf("%s:%d", vsvcHost, port)}, Routes: routes, } if tlsRedirect[gatewayHost] { newVhost.RequireTls = route.VirtualHost_ALL } vHostDedupMap[vsvcHost] = newVhost } } } virtualHosts := make([]route.VirtualHost, 0, len(virtualServices)) if len(vHostDedupMap) == 0 { log.Warnf("constructed http route config for port %d with no vhosts; Setting up a default 404 vhost", port) virtualHosts = append(virtualHosts, route.VirtualHost{ Name: fmt.Sprintf("blackhole:%d", port), Domains: []string{"*"}, Routes: []route.Route{ { Match: route.RouteMatch{ PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"}, }, Action: &route.Route_DirectResponse{ DirectResponse: &route.DirectResponseAction{ Status: 404, }, }, }, }, }) } else { for _, v := range vHostDedupMap { virtualHosts = append(virtualHosts, *v) } } util.SortVirtualHosts(virtualHosts) routeCfg := &xdsapi.RouteConfiguration{ Name: routeName, VirtualHosts: virtualHosts, ValidateClusters: boolFalse, } // call plugins for _, p := range configgen.Plugins { in := &plugin.InputParams{ ListenerProtocol: plugin.ListenerProtocolHTTP, Env: env, Node: node, Push: push, } p.OnOutboundRouteConfiguration(in, routeCfg) } return routeCfg, nil } // to process HTTP and HTTPS servers along with virtualService.HTTP rules func (configgen *ConfigGeneratorImpl) createGatewayHTTPFilterChainOpts( node *model.Proxy, env *model.Environment, push *model.PushContext, servers []*networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts { httpListeners := make([]*filterChainOpts, 0, len(servers)) // Are we processing plaintext servers or HTTPS servers? // If plain text, we have to combine all servers into a single listener if model.ParseProtocol(servers[0].Port.Protocol).IsHTTP() { rdsName := model.GatewayRDSRouteName(servers[0]) o := &filterChainOpts{ // This works because we validate that only HTTPS servers can have same port but still different port names // and that no two non-HTTPS servers can be on same port or share port names. // Validation is done per gateway and also during merging sniHosts: nil, tlsContext: nil, httpOpts: &httpListenerOpts{ rds: rdsName, useRemoteAddress: true, direction: http_conn.EGRESS, // viewed as from gateway to internal connectionManager: &http_conn.HttpConnectionManager{ // Forward client cert if connection is mTLS ForwardClientCertDetails: http_conn.SANITIZE_SET, SetCurrentClientCertDetails: &http_conn.HttpConnectionManager_SetCurrentClientCertDetails{ Subject: &types.BoolValue{Value: true}, Uri: true, Dns: true, }, }, }, } httpListeners = append(httpListeners, o) } else { // Build a filter chain for each HTTPS server // We know that this is a HTTPS server because this function is called only for ports of type HTTP/HTTPS // where HTTPS server's TLS mode is not passthrough and not nil for _, server := range servers { o := &filterChainOpts{ // This works because we validate that only HTTPS servers can have same port but still different port names // and that no two non-HTTPS servers can be on same port or share port names. // Validation is done per gateway and also during merging sniHosts: getSNIHostsForServer(server), tlsContext: buildGatewayListenerTLSContext(server), httpOpts: &httpListenerOpts{ rds: model.GatewayRDSRouteName(server), useRemoteAddress: true, direction: http_conn.EGRESS, // viewed as from gateway to internal connectionManager: &http_conn.HttpConnectionManager{ // Forward client cert if connection is mTLS ForwardClientCertDetails: http_conn.SANITIZE_SET, SetCurrentClientCertDetails: &http_conn.HttpConnectionManager_SetCurrentClientCertDetails{ Subject: &types.BoolValue{Value: true}, Uri: true, Dns: true, }, }, }, } httpListeners = append(httpListeners, o) } } return httpListeners } func buildGatewayListenerTLSContext(server *networking.Server) *auth.DownstreamTlsContext { // Server.TLS cannot be nil or passthrough. But as a safety guard, return nil if server.Tls == nil || server.Tls.Mode == networking.Server_TLSOptions_PASSTHROUGH { return nil // We don't need to setup TLS context for passthrough mode } var certValidationContext *auth.CertificateValidationContext var trustedCa *core.DataSource if len(server.Tls.CaCertificates) != 0 { trustedCa = &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: server.Tls.CaCertificates, }, } } if trustedCa != nil || len(server.Tls.SubjectAltNames) > 0 { certValidationContext = &auth.CertificateValidationContext{ TrustedCa: trustedCa, VerifySubjectAltName: server.Tls.SubjectAltNames, } } requireClientCert := server.Tls.Mode == networking.Server_TLSOptions_MUTUAL // Set TLS parameters if they are non-default var tlsParams *auth.TlsParameters if len(server.Tls.CipherSuites) > 0 || server.Tls.MinProtocolVersion != networking.Server_TLSOptions_TLS_AUTO || server.Tls.MaxProtocolVersion != networking.Server_TLSOptions_TLS_AUTO { tlsParams = &auth.TlsParameters{ TlsMinimumProtocolVersion: convertTLSProtocol(server.Tls.MinProtocolVersion), TlsMaximumProtocolVersion: convertTLSProtocol(server.Tls.MaxProtocolVersion), CipherSuites: server.Tls.CipherSuites, } } return &auth.DownstreamTlsContext{ CommonTlsContext: &auth.CommonTlsContext{ TlsCertificates: []*auth.TlsCertificate{ { CertificateChain: &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: server.Tls.ServerCertificate, }, }, PrivateKey: &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: server.Tls.PrivateKey, }, }, }, }, ValidationContextType: &auth.CommonTlsContext_ValidationContext{ ValidationContext: certValidationContext, }, AlpnProtocols: ListenersALPNProtocols, TlsParams: tlsParams, }, RequireClientCertificate: &types.BoolValue{ Value: requireClientCert, }, } } func convertTLSProtocol(in networking.Server_TLSOptions_TLSProtocol) auth.TlsParameters_TlsProtocol { out := auth.TlsParameters_TlsProtocol(in) // There should be a one-to-one enum mapping if out < auth.TlsParameters_TLS_AUTO || out > auth.TlsParameters_TLSv1_3 { log.Warnf("was not able to map TLS protocol to Envoy TLS protocol") return auth.TlsParameters_TLS_AUTO } return out } func (configgen *ConfigGeneratorImpl) createGatewayTCPFilterChainOpts( node *model.Proxy, env *model.Environment, push *model.PushContext, servers []*networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts { opts := make([]*filterChainOpts, 0, len(servers)) for _, server := range servers { // We have a TCP/TLS server. This could be TLS termination (user specifies server.TLS with simple/mutual) // or opaque TCP (server.TLS is nil). or it could be a TLS passthrough with SNI based routing. // Handle the TLS termination or opaque TCP first. // This is opaque TCP server. Find matching virtual services with TCP blocks and forward if server.Tls == nil { if filters := buildGatewayNetworkFiltersFromTCPRoutes(node, env, push, server, gatewaysForWorkload); len(filters) > 0 { opts = append(opts, &filterChainOpts{ sniHosts: nil, tlsContext: nil, networkFilters: filters, }) } } else if server.Tls.Mode != networking.Server_TLSOptions_PASSTHROUGH { // TCP with TLS termination and forwarding. Setup TLS context to terminate, find matching services with TCP blocks // and forward to backend // Validation ensures that non-passthrough servers will have certs if filters := buildGatewayNetworkFiltersFromTCPRoutes(node, env, push, server, gatewaysForWorkload); len(filters) > 0 { opts = append(opts, &filterChainOpts{ sniHosts: getSNIHostsForServer(server), tlsContext: buildGatewayListenerTLSContext(server), networkFilters: filters, }) } } else { // Passthrough server. opts = append(opts, buildGatewayNetworkFiltersFromTLSRoutes(node, env, push, server, gatewaysForWorkload)...) } } return opts } // buildGatewayNetworkFiltersFromTCPRoutes builds tcp proxy routes for all VirtualServices with TCP blocks. // It first obtains all virtual services bound to the set of Gateways for this workload, filters them by this // server's port and hostnames, and produces network filters for each destination from the filtered services. func buildGatewayNetworkFiltersFromTCPRoutes(node *model.Proxy, env *model.Environment, push *model.PushContext, server *networking.Server, gatewaysForWorkload map[string]bool) []listener.Filter { port := &model.Port{ Name: server.Port.Name, Port: int(server.Port.Number), Protocol: model.ParseProtocol(server.Port.Protocol), } gatewayServerHosts := make(map[model.Hostname]bool, len(server.Hosts)) for _, host := range server.Hosts { gatewayServerHosts[model.Hostname(host)] = true } virtualServices := push.VirtualServices(gatewaysForWorkload) for _, spec := range virtualServices { vsvc := spec.Spec.(*networking.VirtualService) matchingHosts := pickMatchingGatewayHosts(gatewayServerHosts, vsvc.Hosts) if len(matchingHosts) == 0 { // the VirtualService's hosts don't include hosts advertised by server continue } // ensure we satisfy the rule's l4 match conditions, if any exist // For the moment, there can be only one match that succeeds // based on the match port/server port and the gateway name for _, tcp := range vsvc.Tcp { if l4MultiMatch(tcp.Match, server, gatewaysForWorkload) { return buildOutboundNetworkFilters(env, node, tcp.Route, push, port, spec.ConfigMeta) } } } return nil } // buildGatewayNetworkFiltersFromTLSRoutes builds tcp proxy routes for all VirtualServices with TLS blocks. // It first obtains all virtual services bound to the set of Gateways for this workload, filters them by this // server's port and hostnames, and produces network filters for each destination from the filtered services func buildGatewayNetworkFiltersFromTLSRoutes(node *model.Proxy, env *model.Environment, push *model.PushContext, server *networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts { port := &model.Port{ Name: server.Port.Name, Port: int(server.Port.Number), Protocol: model.ParseProtocol(server.Port.Protocol), } gatewayServerHosts := make(map[model.Hostname]bool, len(server.Hosts)) for _, host := range server.Hosts { gatewayServerHosts[model.Hostname(host)] = true } virtualServices := push.VirtualServices(gatewaysForWorkload) filterChains := make([]*filterChainOpts, 0) for _, spec := range virtualServices { vsvc := spec.Spec.(*networking.VirtualService) matchingHosts := pickMatchingGatewayHosts(gatewayServerHosts, vsvc.Hosts) if len(matchingHosts) == 0 { // the VirtualService's hosts don't include hosts advertised by server continue } // For every matching TLS block, generate a filter chain with sni match for _, tls := range vsvc.Tls { for _, match := range tls.Match { if l4SingleMatch(convertTLSMatchToL4Match(match), server, gatewaysForWorkload) { // the sni hosts in the match will become part of a filter chain match filterChains = append(filterChains, &filterChainOpts{ sniHosts: match.SniHosts, tlsContext: nil, // NO TLS context because this is passthrough networkFilters: buildOutboundNetworkFilters(env, node, tls.Route, push, port, spec.ConfigMeta), }) } } } } return filterChains } func pickMatchingGatewayHosts(gatewayServerHosts map[model.Hostname]bool, virtualServiceHosts []string) map[string]model.Hostname { matchingHosts := make(map[string]model.Hostname, 0) for _, vsvcHost := range virtualServiceHosts { for gatewayHost := range gatewayServerHosts { if gatewayHost.Matches(model.Hostname(vsvcHost)) { matchingHosts[vsvcHost] = gatewayHost } } } return matchingHosts } func convertTLSMatchToL4Match(tlsMatch *networking.TLSMatchAttributes) *networking.L4MatchAttributes { return &networking.L4MatchAttributes{ DestinationSubnets: tlsMatch.DestinationSubnets, Port: tlsMatch.Port, SourceSubnet: tlsMatch.SourceSubnet, SourceLabels: tlsMatch.SourceLabels, Gateways: tlsMatch.Gateways, } } func l4MultiMatch(predicates []*networking.L4MatchAttributes, server *networking.Server, gatewaysForWorkload map[string]bool) bool { // NB from proto definitions: each set of predicates is OR'd together; inside of a predicate all conditions are AND'd. // This means we can return as soon as we get any match of an entire predicate. for _, match := range predicates { if l4SingleMatch(match, server, gatewaysForWorkload) { return true } } // If we had no predicates we match; otherwise we don't match since we'd have exited at the first match. return len(predicates) == 0 } func l4SingleMatch(match *networking.L4MatchAttributes, server *networking.Server, gatewaysForWorkload map[string]bool) bool { // if there's no gateway predicate, gatewayMatch is true; otherwise we match against the gateways for this workload return isPortMatch(match.Port, server) && isGatewayMatch(gatewaysForWorkload, match.Gateways) } func isPortMatch(port uint32, server *networking.Server) bool { // if there's no port predicate, portMatch is true; otherwise we evaluate the port predicate against the server's port portMatch := port == 0 if port != 0 { portMatch = server.Port.Number == port } return portMatch } func isGatewayMatch(gatewaysForWorkload map[string]bool, gateways []string) bool { // if there's no gateway predicate, gatewayMatch is true; otherwise we match against the gateways for this workload gatewayMatch := len(gateways) == 0 if len(gateways) > 0 { for _, gateway := range gateways { gatewayMatch = gatewayMatch || gatewaysForWorkload[gateway] } } return gatewayMatch } func getSNIHostsForServer(server *networking.Server) []string { if server.Tls == nil { return nil } return server.Hosts }
}
random_line_split
gateway.go
// Copyright 2018 Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1alpha3 import ( "fmt" xdsapi "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth" "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/envoyproxy/go-control-plane/envoy/api/v2/listener" "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" http_conn "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2" "github.com/gogo/protobuf/types" multierror "github.com/hashicorp/go-multierror" networking "istio.io/api/networking/v1alpha3" "istio.io/istio/pilot/pkg/model" istio_route "istio.io/istio/pilot/pkg/networking/core/v1alpha3/route" "istio.io/istio/pilot/pkg/networking/plugin" "istio.io/istio/pilot/pkg/networking/util" "istio.io/istio/pkg/log" ) var ( // TODO: extract this into istio.io/pkg/proto/{bool.go or types.go or values.go} boolFalse = &types.BoolValue{ Value: false, } ) func (configgen *ConfigGeneratorImpl) buildGatewayListeners(env *model.Environment, node *model.Proxy, push *model.PushContext) ([]*xdsapi.Listener, error) { // collect workload labels workloadInstances, err := env.GetProxyServiceInstances(node) if err != nil { log.Errora("Failed to get gateway instances for router ", node.ID, err) return nil, err } var workloadLabels model.LabelsCollection for _, w := range workloadInstances { workloadLabels = append(workloadLabels, w.Labels) } gatewaysForWorkload := env.Gateways(workloadLabels) if len(gatewaysForWorkload) == 0 { log.Debuga("buildGatewayListeners: no gateways for router", node.ID) return []*xdsapi.Listener{}, nil } mergedGateway := model.MergeGateways(gatewaysForWorkload...) log.Debugf("buildGatewayListeners: gateways after merging: %v", mergedGateway) errs := &multierror.Error{} listeners := make([]*xdsapi.Listener, 0, len(mergedGateway.Servers)) for portNumber, servers := range mergedGateway.Servers { protocol := model.ParseProtocol(servers[0].Port.Protocol) if protocol == model.ProtocolHTTPS { // Gateway terminates TLS connection if TLS mode is not Passthrough So, its effectively a H2 listener. // This is complicated. We have multiple servers. One of these servers could have passthrough HTTPS while // others could be a simple/mutual TLS. // The code as it is, is not capable of handling this mixed listener type and set up the proper SNI chains // such that the passthrough ones go through a TCP proxy while others get terminated and go through http connection // manager. Ideally, the merge gateway function should take care of this and intelligently create multiple // groups of servers based on their TLS types as well. For now, we simply assume that if HTTPS, // and the first server in the group is not a passthrough, then this is a HTTP connection manager. if servers[0].Tls != nil && servers[0].Tls.Mode != networking.Server_TLSOptions_PASSTHROUGH { protocol = model.ProtocolHTTP2 } } opts := buildListenerOpts{ env: env, proxy: node, ip: WildcardAddress, port: int(portNumber), bindToPort: true, } listenerType := plugin.ModelProtocolToListenerProtocol(protocol) switch listenerType { case plugin.ListenerProtocolHTTP: // virtualService.HTTP applies here for both plain text HTTP and HTTPS termination opts.filterChainOpts = configgen.createGatewayHTTPFilterChainOpts(node, env, push, servers, mergedGateway.Names) case plugin.ListenerProtocolTCP: // virtualService.TLS/virtualService.TCP applies here opts.filterChainOpts = configgen.createGatewayTCPFilterChainOpts(node, env, push, servers, mergedGateway.Names) default: log.Warnf("buildGatewayListeners: unknown listener type %v", listenerType) continue } l := buildListener(opts) mutable := &plugin.MutableObjects{ Listener: l, // Note: buildListener creates filter chains but does not populate the filters in the chain; that's what // this is for. FilterChains: make([]plugin.FilterChain, len(l.FilterChains)), } var si *model.ServiceInstance for _, w := range workloadInstances { if w.Endpoint.Port == int(portNumber) { si = w break } } for _, p := range configgen.Plugins { params := &plugin.InputParams{ ListenerProtocol: listenerType, Env: env, Node: node, ProxyInstances: workloadInstances, Push: push, ServiceInstance: si, Port: &model.Port{ Name: servers[0].Port.Name, Port: int(portNumber), Protocol: protocol, }, } if err = p.OnOutboundListener(params, mutable); err != nil { log.Warna("buildGatewayListeners: failed to build listener for gateway: ", err.Error()) } } // Filters are serialized one time into an opaque struct once we have the complete list. if err = marshalFilters(mutable.Listener, opts, mutable.FilterChains); err != nil { errs = multierror.Append(errs, fmt.Errorf("gateway omitting listener %q due to: %v", mutable.Listener.Name, err.Error())) continue } if err = mutable.Listener.Validate(); err != nil { errs = multierror.Append(errs, fmt.Errorf("gateway listener %s validation failed: %v", mutable.Listener.Name, err.Error())) continue } if log.DebugEnabled() { log.Debugf("buildGatewayListeners: constructed listener with %d filter chains:\n%v", len(mutable.Listener.FilterChains), mutable.Listener) } listeners = append(listeners, mutable.Listener) } // We'll try to return any listeners we successfully marshaled; if we have none, we'll emit the error we built up err = errs.ErrorOrNil() if err != nil { // we have some listeners to return, but we also have some errors; log them log.Info(err.Error()) } if len(listeners) == 0 { log.Error("buildGatewayListeners: Have zero listeners") return []*xdsapi.Listener{}, nil } validatedListeners := make([]*xdsapi.Listener, 0, len(mergedGateway.Servers)) for _, l := range listeners { if err := l.Validate(); err != nil { log.Warnf("buildGatewayListeners: error validating listener %s: %v.. Skipping.", l.Name, err) continue } validatedListeners = append(validatedListeners, l) } return validatedListeners, nil } func (configgen *ConfigGeneratorImpl) buildGatewayHTTPRouteConfig(env *model.Environment, node *model.Proxy, push *model.PushContext, proxyInstances []*model.ServiceInstance, services []*model.Service, routeName string) (*xdsapi.RouteConfiguration, error) { // collect workload labels var workloadLabels model.LabelsCollection for _, w := range proxyInstances { workloadLabels = append(workloadLabels, w.Labels) } gateways := env.Gateways(workloadLabels) if len(gateways) == 0 { log.Debuga("buildGatewayRoutes: no gateways for router", node.ID) return nil, nil } merged := model.MergeGateways(gateways...) log.Debugf("buildGatewayRoutes: gateways after merging: %v", merged) // make sure that there is some server listening on this port if _, ok := merged.RDSRouteConfigNames[routeName]; !ok { log.Errorf("buildGatewayRoutes: could not find server for routeName %s, have %v", routeName, merged.RDSRouteConfigNames) return nil, fmt.Errorf("buildGatewayRoutes: could not find server for routeName %s, have %v", routeName, merged.RDSRouteConfigNames) } servers := merged.RDSRouteConfigNames[routeName] nameToServiceMap := make(map[model.Hostname]*model.Service, len(services)) for _, svc := range services { nameToServiceMap[svc.Hostname] = svc } gatewayHosts := make(map[model.Hostname]bool) tlsRedirect := make(map[model.Hostname]bool) for _, server := range servers { for _, host := range server.Hosts { gatewayHosts[model.Hostname(host)] = true if server.Tls != nil && server.Tls.HttpsRedirect { tlsRedirect[model.Hostname(host)] = true } } } port := int(servers[0].Port.Number) // NOTE: WE DO NOT SUPPORT two gateways on same workload binding to same virtual service virtualServices := push.VirtualServices(merged.Names) vHostDedupMap := make(map[string]*route.VirtualHost) for _, v := range virtualServices { vs := v.Spec.(*networking.VirtualService) matchingHosts := pickMatchingGatewayHosts(gatewayHosts, vs.Hosts) if len(matchingHosts) == 0 { log.Debugf("%s omitting virtual service %q because its hosts don't match gateways %v server %d", node.ID, v.Name, gateways, port) continue } routes, err := istio_route.BuildHTTPRoutesForVirtualService(node, push, v, nameToServiceMap, port, nil, merged.Names) if err != nil { log.Debugf("%s omitting routes for service %v due to error: %v", node.ID, v, err) continue } for vsvcHost, gatewayHost := range matchingHosts { if currentVhost, exists := vHostDedupMap[vsvcHost]; exists { currentVhost.Routes = istio_route.CombineVHostRoutes(currentVhost.Routes, routes) } else { newVhost := &route.VirtualHost{ Name: fmt.Sprintf("%s:%d", vsvcHost, port), Domains: []string{vsvcHost, fmt.Sprintf("%s:%d", vsvcHost, port)}, Routes: routes, } if tlsRedirect[gatewayHost] { newVhost.RequireTls = route.VirtualHost_ALL } vHostDedupMap[vsvcHost] = newVhost } } } virtualHosts := make([]route.VirtualHost, 0, len(virtualServices)) if len(vHostDedupMap) == 0 { log.Warnf("constructed http route config for port %d with no vhosts; Setting up a default 404 vhost", port) virtualHosts = append(virtualHosts, route.VirtualHost{ Name: fmt.Sprintf("blackhole:%d", port), Domains: []string{"*"}, Routes: []route.Route{ { Match: route.RouteMatch{ PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"}, }, Action: &route.Route_DirectResponse{ DirectResponse: &route.DirectResponseAction{ Status: 404, }, }, }, }, }) } else { for _, v := range vHostDedupMap { virtualHosts = append(virtualHosts, *v) } } util.SortVirtualHosts(virtualHosts) routeCfg := &xdsapi.RouteConfiguration{ Name: routeName, VirtualHosts: virtualHosts, ValidateClusters: boolFalse, } // call plugins for _, p := range configgen.Plugins
return routeCfg, nil } // to process HTTP and HTTPS servers along with virtualService.HTTP rules func (configgen *ConfigGeneratorImpl) createGatewayHTTPFilterChainOpts( node *model.Proxy, env *model.Environment, push *model.PushContext, servers []*networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts { httpListeners := make([]*filterChainOpts, 0, len(servers)) // Are we processing plaintext servers or HTTPS servers? // If plain text, we have to combine all servers into a single listener if model.ParseProtocol(servers[0].Port.Protocol).IsHTTP() { rdsName := model.GatewayRDSRouteName(servers[0]) o := &filterChainOpts{ // This works because we validate that only HTTPS servers can have same port but still different port names // and that no two non-HTTPS servers can be on same port or share port names. // Validation is done per gateway and also during merging sniHosts: nil, tlsContext: nil, httpOpts: &httpListenerOpts{ rds: rdsName, useRemoteAddress: true, direction: http_conn.EGRESS, // viewed as from gateway to internal connectionManager: &http_conn.HttpConnectionManager{ // Forward client cert if connection is mTLS ForwardClientCertDetails: http_conn.SANITIZE_SET, SetCurrentClientCertDetails: &http_conn.HttpConnectionManager_SetCurrentClientCertDetails{ Subject: &types.BoolValue{Value: true}, Uri: true, Dns: true, }, }, }, } httpListeners = append(httpListeners, o) } else { // Build a filter chain for each HTTPS server // We know that this is a HTTPS server because this function is called only for ports of type HTTP/HTTPS // where HTTPS server's TLS mode is not passthrough and not nil for _, server := range servers { o := &filterChainOpts{ // This works because we validate that only HTTPS servers can have same port but still different port names // and that no two non-HTTPS servers can be on same port or share port names. // Validation is done per gateway and also during merging sniHosts: getSNIHostsForServer(server), tlsContext: buildGatewayListenerTLSContext(server), httpOpts: &httpListenerOpts{ rds: model.GatewayRDSRouteName(server), useRemoteAddress: true, direction: http_conn.EGRESS, // viewed as from gateway to internal connectionManager: &http_conn.HttpConnectionManager{ // Forward client cert if connection is mTLS ForwardClientCertDetails: http_conn.SANITIZE_SET, SetCurrentClientCertDetails: &http_conn.HttpConnectionManager_SetCurrentClientCertDetails{ Subject: &types.BoolValue{Value: true}, Uri: true, Dns: true, }, }, }, } httpListeners = append(httpListeners, o) } } return httpListeners } func buildGatewayListenerTLSContext(server *networking.Server) *auth.DownstreamTlsContext { // Server.TLS cannot be nil or passthrough. But as a safety guard, return nil if server.Tls == nil || server.Tls.Mode == networking.Server_TLSOptions_PASSTHROUGH { return nil // We don't need to setup TLS context for passthrough mode } var certValidationContext *auth.CertificateValidationContext var trustedCa *core.DataSource if len(server.Tls.CaCertificates) != 0 { trustedCa = &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: server.Tls.CaCertificates, }, } } if trustedCa != nil || len(server.Tls.SubjectAltNames) > 0 { certValidationContext = &auth.CertificateValidationContext{ TrustedCa: trustedCa, VerifySubjectAltName: server.Tls.SubjectAltNames, } } requireClientCert := server.Tls.Mode == networking.Server_TLSOptions_MUTUAL // Set TLS parameters if they are non-default var tlsParams *auth.TlsParameters if len(server.Tls.CipherSuites) > 0 || server.Tls.MinProtocolVersion != networking.Server_TLSOptions_TLS_AUTO || server.Tls.MaxProtocolVersion != networking.Server_TLSOptions_TLS_AUTO { tlsParams = &auth.TlsParameters{ TlsMinimumProtocolVersion: convertTLSProtocol(server.Tls.MinProtocolVersion), TlsMaximumProtocolVersion: convertTLSProtocol(server.Tls.MaxProtocolVersion), CipherSuites: server.Tls.CipherSuites, } } return &auth.DownstreamTlsContext{ CommonTlsContext: &auth.CommonTlsContext{ TlsCertificates: []*auth.TlsCertificate{ { CertificateChain: &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: server.Tls.ServerCertificate, }, }, PrivateKey: &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: server.Tls.PrivateKey, }, }, }, }, ValidationContextType: &auth.CommonTlsContext_ValidationContext{ ValidationContext: certValidationContext, }, AlpnProtocols: ListenersALPNProtocols, TlsParams: tlsParams, }, RequireClientCertificate: &types.BoolValue{ Value: requireClientCert, }, } } func convertTLSProtocol(in networking.Server_TLSOptions_TLSProtocol) auth.TlsParameters_TlsProtocol { out := auth.TlsParameters_TlsProtocol(in) // There should be a one-to-one enum mapping if out < auth.TlsParameters_TLS_AUTO || out > auth.TlsParameters_TLSv1_3 { log.Warnf("was not able to map TLS protocol to Envoy TLS protocol") return auth.TlsParameters_TLS_AUTO } return out } func (configgen *ConfigGeneratorImpl) createGatewayTCPFilterChainOpts( node *model.Proxy, env *model.Environment, push *model.PushContext, servers []*networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts { opts := make([]*filterChainOpts, 0, len(servers)) for _, server := range servers { // We have a TCP/TLS server. This could be TLS termination (user specifies server.TLS with simple/mutual) // or opaque TCP (server.TLS is nil). or it could be a TLS passthrough with SNI based routing. // Handle the TLS termination or opaque TCP first. // This is opaque TCP server. Find matching virtual services with TCP blocks and forward if server.Tls == nil { if filters := buildGatewayNetworkFiltersFromTCPRoutes(node, env, push, server, gatewaysForWorkload); len(filters) > 0 { opts = append(opts, &filterChainOpts{ sniHosts: nil, tlsContext: nil, networkFilters: filters, }) } } else if server.Tls.Mode != networking.Server_TLSOptions_PASSTHROUGH { // TCP with TLS termination and forwarding. Setup TLS context to terminate, find matching services with TCP blocks // and forward to backend // Validation ensures that non-passthrough servers will have certs if filters := buildGatewayNetworkFiltersFromTCPRoutes(node, env, push, server, gatewaysForWorkload); len(filters) > 0 { opts = append(opts, &filterChainOpts{ sniHosts: getSNIHostsForServer(server), tlsContext: buildGatewayListenerTLSContext(server), networkFilters: filters, }) } } else { // Passthrough server. opts = append(opts, buildGatewayNetworkFiltersFromTLSRoutes(node, env, push, server, gatewaysForWorkload)...) } } return opts } // buildGatewayNetworkFiltersFromTCPRoutes builds tcp proxy routes for all VirtualServices with TCP blocks. // It first obtains all virtual services bound to the set of Gateways for this workload, filters them by this // server's port and hostnames, and produces network filters for each destination from the filtered services. func buildGatewayNetworkFiltersFromTCPRoutes(node *model.Proxy, env *model.Environment, push *model.PushContext, server *networking.Server, gatewaysForWorkload map[string]bool) []listener.Filter { port := &model.Port{ Name: server.Port.Name, Port: int(server.Port.Number), Protocol: model.ParseProtocol(server.Port.Protocol), } gatewayServerHosts := make(map[model.Hostname]bool, len(server.Hosts)) for _, host := range server.Hosts { gatewayServerHosts[model.Hostname(host)] = true } virtualServices := push.VirtualServices(gatewaysForWorkload) for _, spec := range virtualServices { vsvc := spec.Spec.(*networking.VirtualService) matchingHosts := pickMatchingGatewayHosts(gatewayServerHosts, vsvc.Hosts) if len(matchingHosts) == 0 { // the VirtualService's hosts don't include hosts advertised by server continue } // ensure we satisfy the rule's l4 match conditions, if any exist // For the moment, there can be only one match that succeeds // based on the match port/server port and the gateway name for _, tcp := range vsvc.Tcp { if l4MultiMatch(tcp.Match, server, gatewaysForWorkload) { return buildOutboundNetworkFilters(env, node, tcp.Route, push, port, spec.ConfigMeta) } } } return nil } // buildGatewayNetworkFiltersFromTLSRoutes builds tcp proxy routes for all VirtualServices with TLS blocks. // It first obtains all virtual services bound to the set of Gateways for this workload, filters them by this // server's port and hostnames, and produces network filters for each destination from the filtered services func buildGatewayNetworkFiltersFromTLSRoutes(node *model.Proxy, env *model.Environment, push *model.PushContext, server *networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts { port := &model.Port{ Name: server.Port.Name, Port: int(server.Port.Number), Protocol: model.ParseProtocol(server.Port.Protocol), } gatewayServerHosts := make(map[model.Hostname]bool, len(server.Hosts)) for _, host := range server.Hosts { gatewayServerHosts[model.Hostname(host)] = true } virtualServices := push.VirtualServices(gatewaysForWorkload) filterChains := make([]*filterChainOpts, 0) for _, spec := range virtualServices { vsvc := spec.Spec.(*networking.VirtualService) matchingHosts := pickMatchingGatewayHosts(gatewayServerHosts, vsvc.Hosts) if len(matchingHosts) == 0 { // the VirtualService's hosts don't include hosts advertised by server continue } // For every matching TLS block, generate a filter chain with sni match for _, tls := range vsvc.Tls { for _, match := range tls.Match { if l4SingleMatch(convertTLSMatchToL4Match(match), server, gatewaysForWorkload) { // the sni hosts in the match will become part of a filter chain match filterChains = append(filterChains, &filterChainOpts{ sniHosts: match.SniHosts, tlsContext: nil, // NO TLS context because this is passthrough networkFilters: buildOutboundNetworkFilters(env, node, tls.Route, push, port, spec.ConfigMeta), }) } } } } return filterChains } func pickMatchingGatewayHosts(gatewayServerHosts map[model.Hostname]bool, virtualServiceHosts []string) map[string]model.Hostname { matchingHosts := make(map[string]model.Hostname, 0) for _, vsvcHost := range virtualServiceHosts { for gatewayHost := range gatewayServerHosts { if gatewayHost.Matches(model.Hostname(vsvcHost)) { matchingHosts[vsvcHost] = gatewayHost } } } return matchingHosts } func convertTLSMatchToL4Match(tlsMatch *networking.TLSMatchAttributes) *networking.L4MatchAttributes { return &networking.L4MatchAttributes{ DestinationSubnets: tlsMatch.DestinationSubnets, Port: tlsMatch.Port, SourceSubnet: tlsMatch.SourceSubnet, SourceLabels: tlsMatch.SourceLabels, Gateways: tlsMatch.Gateways, } } func l4MultiMatch(predicates []*networking.L4MatchAttributes, server *networking.Server, gatewaysForWorkload map[string]bool) bool { // NB from proto definitions: each set of predicates is OR'd together; inside of a predicate all conditions are AND'd. // This means we can return as soon as we get any match of an entire predicate. for _, match := range predicates { if l4SingleMatch(match, server, gatewaysForWorkload) { return true } } // If we had no predicates we match; otherwise we don't match since we'd have exited at the first match. return len(predicates) == 0 } func l4SingleMatch(match *networking.L4MatchAttributes, server *networking.Server, gatewaysForWorkload map[string]bool) bool { // if there's no gateway predicate, gatewayMatch is true; otherwise we match against the gateways for this workload return isPortMatch(match.Port, server) && isGatewayMatch(gatewaysForWorkload, match.Gateways) } func isPortMatch(port uint32, server *networking.Server) bool { // if there's no port predicate, portMatch is true; otherwise we evaluate the port predicate against the server's port portMatch := port == 0 if port != 0 { portMatch = server.Port.Number == port } return portMatch } func isGatewayMatch(gatewaysForWorkload map[string]bool, gateways []string) bool { // if there's no gateway predicate, gatewayMatch is true; otherwise we match against the gateways for this workload gatewayMatch := len(gateways) == 0 if len(gateways) > 0 { for _, gateway := range gateways { gatewayMatch = gatewayMatch || gatewaysForWorkload[gateway] } } return gatewayMatch } func getSNIHostsForServer(server *networking.Server) []string { if server.Tls == nil { return nil } return server.Hosts }
{ in := &plugin.InputParams{ ListenerProtocol: plugin.ListenerProtocolHTTP, Env: env, Node: node, Push: push, } p.OnOutboundRouteConfiguration(in, routeCfg) }
conditional_block
gateway.go
// Copyright 2018 Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1alpha3 import ( "fmt" xdsapi "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth" "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/envoyproxy/go-control-plane/envoy/api/v2/listener" "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" http_conn "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2" "github.com/gogo/protobuf/types" multierror "github.com/hashicorp/go-multierror" networking "istio.io/api/networking/v1alpha3" "istio.io/istio/pilot/pkg/model" istio_route "istio.io/istio/pilot/pkg/networking/core/v1alpha3/route" "istio.io/istio/pilot/pkg/networking/plugin" "istio.io/istio/pilot/pkg/networking/util" "istio.io/istio/pkg/log" ) var ( // TODO: extract this into istio.io/pkg/proto/{bool.go or types.go or values.go} boolFalse = &types.BoolValue{ Value: false, } ) func (configgen *ConfigGeneratorImpl) buildGatewayListeners(env *model.Environment, node *model.Proxy, push *model.PushContext) ([]*xdsapi.Listener, error) { // collect workload labels workloadInstances, err := env.GetProxyServiceInstances(node) if err != nil { log.Errora("Failed to get gateway instances for router ", node.ID, err) return nil, err } var workloadLabels model.LabelsCollection for _, w := range workloadInstances { workloadLabels = append(workloadLabels, w.Labels) } gatewaysForWorkload := env.Gateways(workloadLabels) if len(gatewaysForWorkload) == 0 { log.Debuga("buildGatewayListeners: no gateways for router", node.ID) return []*xdsapi.Listener{}, nil } mergedGateway := model.MergeGateways(gatewaysForWorkload...) log.Debugf("buildGatewayListeners: gateways after merging: %v", mergedGateway) errs := &multierror.Error{} listeners := make([]*xdsapi.Listener, 0, len(mergedGateway.Servers)) for portNumber, servers := range mergedGateway.Servers { protocol := model.ParseProtocol(servers[0].Port.Protocol) if protocol == model.ProtocolHTTPS { // Gateway terminates TLS connection if TLS mode is not Passthrough So, its effectively a H2 listener. // This is complicated. We have multiple servers. One of these servers could have passthrough HTTPS while // others could be a simple/mutual TLS. // The code as it is, is not capable of handling this mixed listener type and set up the proper SNI chains // such that the passthrough ones go through a TCP proxy while others get terminated and go through http connection // manager. Ideally, the merge gateway function should take care of this and intelligently create multiple // groups of servers based on their TLS types as well. For now, we simply assume that if HTTPS, // and the first server in the group is not a passthrough, then this is a HTTP connection manager. if servers[0].Tls != nil && servers[0].Tls.Mode != networking.Server_TLSOptions_PASSTHROUGH { protocol = model.ProtocolHTTP2 } } opts := buildListenerOpts{ env: env, proxy: node, ip: WildcardAddress, port: int(portNumber), bindToPort: true, } listenerType := plugin.ModelProtocolToListenerProtocol(protocol) switch listenerType { case plugin.ListenerProtocolHTTP: // virtualService.HTTP applies here for both plain text HTTP and HTTPS termination opts.filterChainOpts = configgen.createGatewayHTTPFilterChainOpts(node, env, push, servers, mergedGateway.Names) case plugin.ListenerProtocolTCP: // virtualService.TLS/virtualService.TCP applies here opts.filterChainOpts = configgen.createGatewayTCPFilterChainOpts(node, env, push, servers, mergedGateway.Names) default: log.Warnf("buildGatewayListeners: unknown listener type %v", listenerType) continue } l := buildListener(opts) mutable := &plugin.MutableObjects{ Listener: l, // Note: buildListener creates filter chains but does not populate the filters in the chain; that's what // this is for. FilterChains: make([]plugin.FilterChain, len(l.FilterChains)), } var si *model.ServiceInstance for _, w := range workloadInstances { if w.Endpoint.Port == int(portNumber) { si = w break } } for _, p := range configgen.Plugins { params := &plugin.InputParams{ ListenerProtocol: listenerType, Env: env, Node: node, ProxyInstances: workloadInstances, Push: push, ServiceInstance: si, Port: &model.Port{ Name: servers[0].Port.Name, Port: int(portNumber), Protocol: protocol, }, } if err = p.OnOutboundListener(params, mutable); err != nil { log.Warna("buildGatewayListeners: failed to build listener for gateway: ", err.Error()) } } // Filters are serialized one time into an opaque struct once we have the complete list. if err = marshalFilters(mutable.Listener, opts, mutable.FilterChains); err != nil { errs = multierror.Append(errs, fmt.Errorf("gateway omitting listener %q due to: %v", mutable.Listener.Name, err.Error())) continue } if err = mutable.Listener.Validate(); err != nil { errs = multierror.Append(errs, fmt.Errorf("gateway listener %s validation failed: %v", mutable.Listener.Name, err.Error())) continue } if log.DebugEnabled() { log.Debugf("buildGatewayListeners: constructed listener with %d filter chains:\n%v", len(mutable.Listener.FilterChains), mutable.Listener) } listeners = append(listeners, mutable.Listener) } // We'll try to return any listeners we successfully marshaled; if we have none, we'll emit the error we built up err = errs.ErrorOrNil() if err != nil { // we have some listeners to return, but we also have some errors; log them log.Info(err.Error()) } if len(listeners) == 0 { log.Error("buildGatewayListeners: Have zero listeners") return []*xdsapi.Listener{}, nil } validatedListeners := make([]*xdsapi.Listener, 0, len(mergedGateway.Servers)) for _, l := range listeners { if err := l.Validate(); err != nil { log.Warnf("buildGatewayListeners: error validating listener %s: %v.. Skipping.", l.Name, err) continue } validatedListeners = append(validatedListeners, l) } return validatedListeners, nil } func (configgen *ConfigGeneratorImpl) buildGatewayHTTPRouteConfig(env *model.Environment, node *model.Proxy, push *model.PushContext, proxyInstances []*model.ServiceInstance, services []*model.Service, routeName string) (*xdsapi.RouteConfiguration, error) { // collect workload labels var workloadLabels model.LabelsCollection for _, w := range proxyInstances { workloadLabels = append(workloadLabels, w.Labels) } gateways := env.Gateways(workloadLabels) if len(gateways) == 0 { log.Debuga("buildGatewayRoutes: no gateways for router", node.ID) return nil, nil } merged := model.MergeGateways(gateways...) log.Debugf("buildGatewayRoutes: gateways after merging: %v", merged) // make sure that there is some server listening on this port if _, ok := merged.RDSRouteConfigNames[routeName]; !ok { log.Errorf("buildGatewayRoutes: could not find server for routeName %s, have %v", routeName, merged.RDSRouteConfigNames) return nil, fmt.Errorf("buildGatewayRoutes: could not find server for routeName %s, have %v", routeName, merged.RDSRouteConfigNames) } servers := merged.RDSRouteConfigNames[routeName] nameToServiceMap := make(map[model.Hostname]*model.Service, len(services)) for _, svc := range services { nameToServiceMap[svc.Hostname] = svc } gatewayHosts := make(map[model.Hostname]bool) tlsRedirect := make(map[model.Hostname]bool) for _, server := range servers { for _, host := range server.Hosts { gatewayHosts[model.Hostname(host)] = true if server.Tls != nil && server.Tls.HttpsRedirect { tlsRedirect[model.Hostname(host)] = true } } } port := int(servers[0].Port.Number) // NOTE: WE DO NOT SUPPORT two gateways on same workload binding to same virtual service virtualServices := push.VirtualServices(merged.Names) vHostDedupMap := make(map[string]*route.VirtualHost) for _, v := range virtualServices { vs := v.Spec.(*networking.VirtualService) matchingHosts := pickMatchingGatewayHosts(gatewayHosts, vs.Hosts) if len(matchingHosts) == 0 { log.Debugf("%s omitting virtual service %q because its hosts don't match gateways %v server %d", node.ID, v.Name, gateways, port) continue } routes, err := istio_route.BuildHTTPRoutesForVirtualService(node, push, v, nameToServiceMap, port, nil, merged.Names) if err != nil { log.Debugf("%s omitting routes for service %v due to error: %v", node.ID, v, err) continue } for vsvcHost, gatewayHost := range matchingHosts { if currentVhost, exists := vHostDedupMap[vsvcHost]; exists { currentVhost.Routes = istio_route.CombineVHostRoutes(currentVhost.Routes, routes) } else { newVhost := &route.VirtualHost{ Name: fmt.Sprintf("%s:%d", vsvcHost, port), Domains: []string{vsvcHost, fmt.Sprintf("%s:%d", vsvcHost, port)}, Routes: routes, } if tlsRedirect[gatewayHost] { newVhost.RequireTls = route.VirtualHost_ALL } vHostDedupMap[vsvcHost] = newVhost } } } virtualHosts := make([]route.VirtualHost, 0, len(virtualServices)) if len(vHostDedupMap) == 0 { log.Warnf("constructed http route config for port %d with no vhosts; Setting up a default 404 vhost", port) virtualHosts = append(virtualHosts, route.VirtualHost{ Name: fmt.Sprintf("blackhole:%d", port), Domains: []string{"*"}, Routes: []route.Route{ { Match: route.RouteMatch{ PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"}, }, Action: &route.Route_DirectResponse{ DirectResponse: &route.DirectResponseAction{ Status: 404, }, }, }, }, }) } else { for _, v := range vHostDedupMap { virtualHosts = append(virtualHosts, *v) } } util.SortVirtualHosts(virtualHosts) routeCfg := &xdsapi.RouteConfiguration{ Name: routeName, VirtualHosts: virtualHosts, ValidateClusters: boolFalse, } // call plugins for _, p := range configgen.Plugins { in := &plugin.InputParams{ ListenerProtocol: plugin.ListenerProtocolHTTP, Env: env, Node: node, Push: push, } p.OnOutboundRouteConfiguration(in, routeCfg) } return routeCfg, nil } // to process HTTP and HTTPS servers along with virtualService.HTTP rules func (configgen *ConfigGeneratorImpl) createGatewayHTTPFilterChainOpts( node *model.Proxy, env *model.Environment, push *model.PushContext, servers []*networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts { httpListeners := make([]*filterChainOpts, 0, len(servers)) // Are we processing plaintext servers or HTTPS servers? // If plain text, we have to combine all servers into a single listener if model.ParseProtocol(servers[0].Port.Protocol).IsHTTP() { rdsName := model.GatewayRDSRouteName(servers[0]) o := &filterChainOpts{ // This works because we validate that only HTTPS servers can have same port but still different port names // and that no two non-HTTPS servers can be on same port or share port names. // Validation is done per gateway and also during merging sniHosts: nil, tlsContext: nil, httpOpts: &httpListenerOpts{ rds: rdsName, useRemoteAddress: true, direction: http_conn.EGRESS, // viewed as from gateway to internal connectionManager: &http_conn.HttpConnectionManager{ // Forward client cert if connection is mTLS ForwardClientCertDetails: http_conn.SANITIZE_SET, SetCurrentClientCertDetails: &http_conn.HttpConnectionManager_SetCurrentClientCertDetails{ Subject: &types.BoolValue{Value: true}, Uri: true, Dns: true, }, }, }, } httpListeners = append(httpListeners, o) } else { // Build a filter chain for each HTTPS server // We know that this is a HTTPS server because this function is called only for ports of type HTTP/HTTPS // where HTTPS server's TLS mode is not passthrough and not nil for _, server := range servers { o := &filterChainOpts{ // This works because we validate that only HTTPS servers can have same port but still different port names // and that no two non-HTTPS servers can be on same port or share port names. // Validation is done per gateway and also during merging sniHosts: getSNIHostsForServer(server), tlsContext: buildGatewayListenerTLSContext(server), httpOpts: &httpListenerOpts{ rds: model.GatewayRDSRouteName(server), useRemoteAddress: true, direction: http_conn.EGRESS, // viewed as from gateway to internal connectionManager: &http_conn.HttpConnectionManager{ // Forward client cert if connection is mTLS ForwardClientCertDetails: http_conn.SANITIZE_SET, SetCurrentClientCertDetails: &http_conn.HttpConnectionManager_SetCurrentClientCertDetails{ Subject: &types.BoolValue{Value: true}, Uri: true, Dns: true, }, }, }, } httpListeners = append(httpListeners, o) } } return httpListeners } func buildGatewayListenerTLSContext(server *networking.Server) *auth.DownstreamTlsContext { // Server.TLS cannot be nil or passthrough. But as a safety guard, return nil if server.Tls == nil || server.Tls.Mode == networking.Server_TLSOptions_PASSTHROUGH { return nil // We don't need to setup TLS context for passthrough mode } var certValidationContext *auth.CertificateValidationContext var trustedCa *core.DataSource if len(server.Tls.CaCertificates) != 0 { trustedCa = &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: server.Tls.CaCertificates, }, } } if trustedCa != nil || len(server.Tls.SubjectAltNames) > 0 { certValidationContext = &auth.CertificateValidationContext{ TrustedCa: trustedCa, VerifySubjectAltName: server.Tls.SubjectAltNames, } } requireClientCert := server.Tls.Mode == networking.Server_TLSOptions_MUTUAL // Set TLS parameters if they are non-default var tlsParams *auth.TlsParameters if len(server.Tls.CipherSuites) > 0 || server.Tls.MinProtocolVersion != networking.Server_TLSOptions_TLS_AUTO || server.Tls.MaxProtocolVersion != networking.Server_TLSOptions_TLS_AUTO { tlsParams = &auth.TlsParameters{ TlsMinimumProtocolVersion: convertTLSProtocol(server.Tls.MinProtocolVersion), TlsMaximumProtocolVersion: convertTLSProtocol(server.Tls.MaxProtocolVersion), CipherSuites: server.Tls.CipherSuites, } } return &auth.DownstreamTlsContext{ CommonTlsContext: &auth.CommonTlsContext{ TlsCertificates: []*auth.TlsCertificate{ { CertificateChain: &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: server.Tls.ServerCertificate, }, }, PrivateKey: &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: server.Tls.PrivateKey, }, }, }, }, ValidationContextType: &auth.CommonTlsContext_ValidationContext{ ValidationContext: certValidationContext, }, AlpnProtocols: ListenersALPNProtocols, TlsParams: tlsParams, }, RequireClientCertificate: &types.BoolValue{ Value: requireClientCert, }, } } func convertTLSProtocol(in networking.Server_TLSOptions_TLSProtocol) auth.TlsParameters_TlsProtocol { out := auth.TlsParameters_TlsProtocol(in) // There should be a one-to-one enum mapping if out < auth.TlsParameters_TLS_AUTO || out > auth.TlsParameters_TLSv1_3 { log.Warnf("was not able to map TLS protocol to Envoy TLS protocol") return auth.TlsParameters_TLS_AUTO } return out } func (configgen *ConfigGeneratorImpl) createGatewayTCPFilterChainOpts( node *model.Proxy, env *model.Environment, push *model.PushContext, servers []*networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts { opts := make([]*filterChainOpts, 0, len(servers)) for _, server := range servers { // We have a TCP/TLS server. This could be TLS termination (user specifies server.TLS with simple/mutual) // or opaque TCP (server.TLS is nil). or it could be a TLS passthrough with SNI based routing. // Handle the TLS termination or opaque TCP first. // This is opaque TCP server. Find matching virtual services with TCP blocks and forward if server.Tls == nil { if filters := buildGatewayNetworkFiltersFromTCPRoutes(node, env, push, server, gatewaysForWorkload); len(filters) > 0 { opts = append(opts, &filterChainOpts{ sniHosts: nil, tlsContext: nil, networkFilters: filters, }) } } else if server.Tls.Mode != networking.Server_TLSOptions_PASSTHROUGH { // TCP with TLS termination and forwarding. Setup TLS context to terminate, find matching services with TCP blocks // and forward to backend // Validation ensures that non-passthrough servers will have certs if filters := buildGatewayNetworkFiltersFromTCPRoutes(node, env, push, server, gatewaysForWorkload); len(filters) > 0 { opts = append(opts, &filterChainOpts{ sniHosts: getSNIHostsForServer(server), tlsContext: buildGatewayListenerTLSContext(server), networkFilters: filters, }) } } else { // Passthrough server. opts = append(opts, buildGatewayNetworkFiltersFromTLSRoutes(node, env, push, server, gatewaysForWorkload)...) } } return opts } // buildGatewayNetworkFiltersFromTCPRoutes builds tcp proxy routes for all VirtualServices with TCP blocks. // It first obtains all virtual services bound to the set of Gateways for this workload, filters them by this // server's port and hostnames, and produces network filters for each destination from the filtered services. func buildGatewayNetworkFiltersFromTCPRoutes(node *model.Proxy, env *model.Environment, push *model.PushContext, server *networking.Server, gatewaysForWorkload map[string]bool) []listener.Filter { port := &model.Port{ Name: server.Port.Name, Port: int(server.Port.Number), Protocol: model.ParseProtocol(server.Port.Protocol), } gatewayServerHosts := make(map[model.Hostname]bool, len(server.Hosts)) for _, host := range server.Hosts { gatewayServerHosts[model.Hostname(host)] = true } virtualServices := push.VirtualServices(gatewaysForWorkload) for _, spec := range virtualServices { vsvc := spec.Spec.(*networking.VirtualService) matchingHosts := pickMatchingGatewayHosts(gatewayServerHosts, vsvc.Hosts) if len(matchingHosts) == 0 { // the VirtualService's hosts don't include hosts advertised by server continue } // ensure we satisfy the rule's l4 match conditions, if any exist // For the moment, there can be only one match that succeeds // based on the match port/server port and the gateway name for _, tcp := range vsvc.Tcp { if l4MultiMatch(tcp.Match, server, gatewaysForWorkload) { return buildOutboundNetworkFilters(env, node, tcp.Route, push, port, spec.ConfigMeta) } } } return nil } // buildGatewayNetworkFiltersFromTLSRoutes builds tcp proxy routes for all VirtualServices with TLS blocks. // It first obtains all virtual services bound to the set of Gateways for this workload, filters them by this // server's port and hostnames, and produces network filters for each destination from the filtered services func buildGatewayNetworkFiltersFromTLSRoutes(node *model.Proxy, env *model.Environment, push *model.PushContext, server *networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts { port := &model.Port{ Name: server.Port.Name, Port: int(server.Port.Number), Protocol: model.ParseProtocol(server.Port.Protocol), } gatewayServerHosts := make(map[model.Hostname]bool, len(server.Hosts)) for _, host := range server.Hosts { gatewayServerHosts[model.Hostname(host)] = true } virtualServices := push.VirtualServices(gatewaysForWorkload) filterChains := make([]*filterChainOpts, 0) for _, spec := range virtualServices { vsvc := spec.Spec.(*networking.VirtualService) matchingHosts := pickMatchingGatewayHosts(gatewayServerHosts, vsvc.Hosts) if len(matchingHosts) == 0 { // the VirtualService's hosts don't include hosts advertised by server continue } // For every matching TLS block, generate a filter chain with sni match for _, tls := range vsvc.Tls { for _, match := range tls.Match { if l4SingleMatch(convertTLSMatchToL4Match(match), server, gatewaysForWorkload) { // the sni hosts in the match will become part of a filter chain match filterChains = append(filterChains, &filterChainOpts{ sniHosts: match.SniHosts, tlsContext: nil, // NO TLS context because this is passthrough networkFilters: buildOutboundNetworkFilters(env, node, tls.Route, push, port, spec.ConfigMeta), }) } } } } return filterChains } func pickMatchingGatewayHosts(gatewayServerHosts map[model.Hostname]bool, virtualServiceHosts []string) map[string]model.Hostname { matchingHosts := make(map[string]model.Hostname, 0) for _, vsvcHost := range virtualServiceHosts { for gatewayHost := range gatewayServerHosts { if gatewayHost.Matches(model.Hostname(vsvcHost)) { matchingHosts[vsvcHost] = gatewayHost } } } return matchingHosts } func convertTLSMatchToL4Match(tlsMatch *networking.TLSMatchAttributes) *networking.L4MatchAttributes
func l4MultiMatch(predicates []*networking.L4MatchAttributes, server *networking.Server, gatewaysForWorkload map[string]bool) bool { // NB from proto definitions: each set of predicates is OR'd together; inside of a predicate all conditions are AND'd. // This means we can return as soon as we get any match of an entire predicate. for _, match := range predicates { if l4SingleMatch(match, server, gatewaysForWorkload) { return true } } // If we had no predicates we match; otherwise we don't match since we'd have exited at the first match. return len(predicates) == 0 } func l4SingleMatch(match *networking.L4MatchAttributes, server *networking.Server, gatewaysForWorkload map[string]bool) bool { // if there's no gateway predicate, gatewayMatch is true; otherwise we match against the gateways for this workload return isPortMatch(match.Port, server) && isGatewayMatch(gatewaysForWorkload, match.Gateways) } func isPortMatch(port uint32, server *networking.Server) bool { // if there's no port predicate, portMatch is true; otherwise we evaluate the port predicate against the server's port portMatch := port == 0 if port != 0 { portMatch = server.Port.Number == port } return portMatch } func isGatewayMatch(gatewaysForWorkload map[string]bool, gateways []string) bool { // if there's no gateway predicate, gatewayMatch is true; otherwise we match against the gateways for this workload gatewayMatch := len(gateways) == 0 if len(gateways) > 0 { for _, gateway := range gateways { gatewayMatch = gatewayMatch || gatewaysForWorkload[gateway] } } return gatewayMatch } func getSNIHostsForServer(server *networking.Server) []string { if server.Tls == nil { return nil } return server.Hosts }
{ return &networking.L4MatchAttributes{ DestinationSubnets: tlsMatch.DestinationSubnets, Port: tlsMatch.Port, SourceSubnet: tlsMatch.SourceSubnet, SourceLabels: tlsMatch.SourceLabels, Gateways: tlsMatch.Gateways, } }
identifier_body
gateway.go
// Copyright 2018 Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1alpha3 import ( "fmt" xdsapi "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth" "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/envoyproxy/go-control-plane/envoy/api/v2/listener" "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" http_conn "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2" "github.com/gogo/protobuf/types" multierror "github.com/hashicorp/go-multierror" networking "istio.io/api/networking/v1alpha3" "istio.io/istio/pilot/pkg/model" istio_route "istio.io/istio/pilot/pkg/networking/core/v1alpha3/route" "istio.io/istio/pilot/pkg/networking/plugin" "istio.io/istio/pilot/pkg/networking/util" "istio.io/istio/pkg/log" ) var ( // TODO: extract this into istio.io/pkg/proto/{bool.go or types.go or values.go} boolFalse = &types.BoolValue{ Value: false, } ) func (configgen *ConfigGeneratorImpl) buildGatewayListeners(env *model.Environment, node *model.Proxy, push *model.PushContext) ([]*xdsapi.Listener, error) { // collect workload labels workloadInstances, err := env.GetProxyServiceInstances(node) if err != nil { log.Errora("Failed to get gateway instances for router ", node.ID, err) return nil, err } var workloadLabels model.LabelsCollection for _, w := range workloadInstances { workloadLabels = append(workloadLabels, w.Labels) } gatewaysForWorkload := env.Gateways(workloadLabels) if len(gatewaysForWorkload) == 0 { log.Debuga("buildGatewayListeners: no gateways for router", node.ID) return []*xdsapi.Listener{}, nil } mergedGateway := model.MergeGateways(gatewaysForWorkload...) log.Debugf("buildGatewayListeners: gateways after merging: %v", mergedGateway) errs := &multierror.Error{} listeners := make([]*xdsapi.Listener, 0, len(mergedGateway.Servers)) for portNumber, servers := range mergedGateway.Servers { protocol := model.ParseProtocol(servers[0].Port.Protocol) if protocol == model.ProtocolHTTPS { // Gateway terminates TLS connection if TLS mode is not Passthrough So, its effectively a H2 listener. // This is complicated. We have multiple servers. One of these servers could have passthrough HTTPS while // others could be a simple/mutual TLS. // The code as it is, is not capable of handling this mixed listener type and set up the proper SNI chains // such that the passthrough ones go through a TCP proxy while others get terminated and go through http connection // manager. Ideally, the merge gateway function should take care of this and intelligently create multiple // groups of servers based on their TLS types as well. For now, we simply assume that if HTTPS, // and the first server in the group is not a passthrough, then this is a HTTP connection manager. if servers[0].Tls != nil && servers[0].Tls.Mode != networking.Server_TLSOptions_PASSTHROUGH { protocol = model.ProtocolHTTP2 } } opts := buildListenerOpts{ env: env, proxy: node, ip: WildcardAddress, port: int(portNumber), bindToPort: true, } listenerType := plugin.ModelProtocolToListenerProtocol(protocol) switch listenerType { case plugin.ListenerProtocolHTTP: // virtualService.HTTP applies here for both plain text HTTP and HTTPS termination opts.filterChainOpts = configgen.createGatewayHTTPFilterChainOpts(node, env, push, servers, mergedGateway.Names) case plugin.ListenerProtocolTCP: // virtualService.TLS/virtualService.TCP applies here opts.filterChainOpts = configgen.createGatewayTCPFilterChainOpts(node, env, push, servers, mergedGateway.Names) default: log.Warnf("buildGatewayListeners: unknown listener type %v", listenerType) continue } l := buildListener(opts) mutable := &plugin.MutableObjects{ Listener: l, // Note: buildListener creates filter chains but does not populate the filters in the chain; that's what // this is for. FilterChains: make([]plugin.FilterChain, len(l.FilterChains)), } var si *model.ServiceInstance for _, w := range workloadInstances { if w.Endpoint.Port == int(portNumber) { si = w break } } for _, p := range configgen.Plugins { params := &plugin.InputParams{ ListenerProtocol: listenerType, Env: env, Node: node, ProxyInstances: workloadInstances, Push: push, ServiceInstance: si, Port: &model.Port{ Name: servers[0].Port.Name, Port: int(portNumber), Protocol: protocol, }, } if err = p.OnOutboundListener(params, mutable); err != nil { log.Warna("buildGatewayListeners: failed to build listener for gateway: ", err.Error()) } } // Filters are serialized one time into an opaque struct once we have the complete list. if err = marshalFilters(mutable.Listener, opts, mutable.FilterChains); err != nil { errs = multierror.Append(errs, fmt.Errorf("gateway omitting listener %q due to: %v", mutable.Listener.Name, err.Error())) continue } if err = mutable.Listener.Validate(); err != nil { errs = multierror.Append(errs, fmt.Errorf("gateway listener %s validation failed: %v", mutable.Listener.Name, err.Error())) continue } if log.DebugEnabled() { log.Debugf("buildGatewayListeners: constructed listener with %d filter chains:\n%v", len(mutable.Listener.FilterChains), mutable.Listener) } listeners = append(listeners, mutable.Listener) } // We'll try to return any listeners we successfully marshaled; if we have none, we'll emit the error we built up err = errs.ErrorOrNil() if err != nil { // we have some listeners to return, but we also have some errors; log them log.Info(err.Error()) } if len(listeners) == 0 { log.Error("buildGatewayListeners: Have zero listeners") return []*xdsapi.Listener{}, nil } validatedListeners := make([]*xdsapi.Listener, 0, len(mergedGateway.Servers)) for _, l := range listeners { if err := l.Validate(); err != nil { log.Warnf("buildGatewayListeners: error validating listener %s: %v.. Skipping.", l.Name, err) continue } validatedListeners = append(validatedListeners, l) } return validatedListeners, nil } func (configgen *ConfigGeneratorImpl) buildGatewayHTTPRouteConfig(env *model.Environment, node *model.Proxy, push *model.PushContext, proxyInstances []*model.ServiceInstance, services []*model.Service, routeName string) (*xdsapi.RouteConfiguration, error) { // collect workload labels var workloadLabels model.LabelsCollection for _, w := range proxyInstances { workloadLabels = append(workloadLabels, w.Labels) } gateways := env.Gateways(workloadLabels) if len(gateways) == 0 { log.Debuga("buildGatewayRoutes: no gateways for router", node.ID) return nil, nil } merged := model.MergeGateways(gateways...) log.Debugf("buildGatewayRoutes: gateways after merging: %v", merged) // make sure that there is some server listening on this port if _, ok := merged.RDSRouteConfigNames[routeName]; !ok { log.Errorf("buildGatewayRoutes: could not find server for routeName %s, have %v", routeName, merged.RDSRouteConfigNames) return nil, fmt.Errorf("buildGatewayRoutes: could not find server for routeName %s, have %v", routeName, merged.RDSRouteConfigNames) } servers := merged.RDSRouteConfigNames[routeName] nameToServiceMap := make(map[model.Hostname]*model.Service, len(services)) for _, svc := range services { nameToServiceMap[svc.Hostname] = svc } gatewayHosts := make(map[model.Hostname]bool) tlsRedirect := make(map[model.Hostname]bool) for _, server := range servers { for _, host := range server.Hosts { gatewayHosts[model.Hostname(host)] = true if server.Tls != nil && server.Tls.HttpsRedirect { tlsRedirect[model.Hostname(host)] = true } } } port := int(servers[0].Port.Number) // NOTE: WE DO NOT SUPPORT two gateways on same workload binding to same virtual service virtualServices := push.VirtualServices(merged.Names) vHostDedupMap := make(map[string]*route.VirtualHost) for _, v := range virtualServices { vs := v.Spec.(*networking.VirtualService) matchingHosts := pickMatchingGatewayHosts(gatewayHosts, vs.Hosts) if len(matchingHosts) == 0 { log.Debugf("%s omitting virtual service %q because its hosts don't match gateways %v server %d", node.ID, v.Name, gateways, port) continue } routes, err := istio_route.BuildHTTPRoutesForVirtualService(node, push, v, nameToServiceMap, port, nil, merged.Names) if err != nil { log.Debugf("%s omitting routes for service %v due to error: %v", node.ID, v, err) continue } for vsvcHost, gatewayHost := range matchingHosts { if currentVhost, exists := vHostDedupMap[vsvcHost]; exists { currentVhost.Routes = istio_route.CombineVHostRoutes(currentVhost.Routes, routes) } else { newVhost := &route.VirtualHost{ Name: fmt.Sprintf("%s:%d", vsvcHost, port), Domains: []string{vsvcHost, fmt.Sprintf("%s:%d", vsvcHost, port)}, Routes: routes, } if tlsRedirect[gatewayHost] { newVhost.RequireTls = route.VirtualHost_ALL } vHostDedupMap[vsvcHost] = newVhost } } } virtualHosts := make([]route.VirtualHost, 0, len(virtualServices)) if len(vHostDedupMap) == 0 { log.Warnf("constructed http route config for port %d with no vhosts; Setting up a default 404 vhost", port) virtualHosts = append(virtualHosts, route.VirtualHost{ Name: fmt.Sprintf("blackhole:%d", port), Domains: []string{"*"}, Routes: []route.Route{ { Match: route.RouteMatch{ PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"}, }, Action: &route.Route_DirectResponse{ DirectResponse: &route.DirectResponseAction{ Status: 404, }, }, }, }, }) } else { for _, v := range vHostDedupMap { virtualHosts = append(virtualHosts, *v) } } util.SortVirtualHosts(virtualHosts) routeCfg := &xdsapi.RouteConfiguration{ Name: routeName, VirtualHosts: virtualHosts, ValidateClusters: boolFalse, } // call plugins for _, p := range configgen.Plugins { in := &plugin.InputParams{ ListenerProtocol: plugin.ListenerProtocolHTTP, Env: env, Node: node, Push: push, } p.OnOutboundRouteConfiguration(in, routeCfg) } return routeCfg, nil } // to process HTTP and HTTPS servers along with virtualService.HTTP rules func (configgen *ConfigGeneratorImpl) createGatewayHTTPFilterChainOpts( node *model.Proxy, env *model.Environment, push *model.PushContext, servers []*networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts { httpListeners := make([]*filterChainOpts, 0, len(servers)) // Are we processing plaintext servers or HTTPS servers? // If plain text, we have to combine all servers into a single listener if model.ParseProtocol(servers[0].Port.Protocol).IsHTTP() { rdsName := model.GatewayRDSRouteName(servers[0]) o := &filterChainOpts{ // This works because we validate that only HTTPS servers can have same port but still different port names // and that no two non-HTTPS servers can be on same port or share port names. // Validation is done per gateway and also during merging sniHosts: nil, tlsContext: nil, httpOpts: &httpListenerOpts{ rds: rdsName, useRemoteAddress: true, direction: http_conn.EGRESS, // viewed as from gateway to internal connectionManager: &http_conn.HttpConnectionManager{ // Forward client cert if connection is mTLS ForwardClientCertDetails: http_conn.SANITIZE_SET, SetCurrentClientCertDetails: &http_conn.HttpConnectionManager_SetCurrentClientCertDetails{ Subject: &types.BoolValue{Value: true}, Uri: true, Dns: true, }, }, }, } httpListeners = append(httpListeners, o) } else { // Build a filter chain for each HTTPS server // We know that this is a HTTPS server because this function is called only for ports of type HTTP/HTTPS // where HTTPS server's TLS mode is not passthrough and not nil for _, server := range servers { o := &filterChainOpts{ // This works because we validate that only HTTPS servers can have same port but still different port names // and that no two non-HTTPS servers can be on same port or share port names. // Validation is done per gateway and also during merging sniHosts: getSNIHostsForServer(server), tlsContext: buildGatewayListenerTLSContext(server), httpOpts: &httpListenerOpts{ rds: model.GatewayRDSRouteName(server), useRemoteAddress: true, direction: http_conn.EGRESS, // viewed as from gateway to internal connectionManager: &http_conn.HttpConnectionManager{ // Forward client cert if connection is mTLS ForwardClientCertDetails: http_conn.SANITIZE_SET, SetCurrentClientCertDetails: &http_conn.HttpConnectionManager_SetCurrentClientCertDetails{ Subject: &types.BoolValue{Value: true}, Uri: true, Dns: true, }, }, }, } httpListeners = append(httpListeners, o) } } return httpListeners } func buildGatewayListenerTLSContext(server *networking.Server) *auth.DownstreamTlsContext { // Server.TLS cannot be nil or passthrough. But as a safety guard, return nil if server.Tls == nil || server.Tls.Mode == networking.Server_TLSOptions_PASSTHROUGH { return nil // We don't need to setup TLS context for passthrough mode } var certValidationContext *auth.CertificateValidationContext var trustedCa *core.DataSource if len(server.Tls.CaCertificates) != 0 { trustedCa = &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: server.Tls.CaCertificates, }, } } if trustedCa != nil || len(server.Tls.SubjectAltNames) > 0 { certValidationContext = &auth.CertificateValidationContext{ TrustedCa: trustedCa, VerifySubjectAltName: server.Tls.SubjectAltNames, } } requireClientCert := server.Tls.Mode == networking.Server_TLSOptions_MUTUAL // Set TLS parameters if they are non-default var tlsParams *auth.TlsParameters if len(server.Tls.CipherSuites) > 0 || server.Tls.MinProtocolVersion != networking.Server_TLSOptions_TLS_AUTO || server.Tls.MaxProtocolVersion != networking.Server_TLSOptions_TLS_AUTO { tlsParams = &auth.TlsParameters{ TlsMinimumProtocolVersion: convertTLSProtocol(server.Tls.MinProtocolVersion), TlsMaximumProtocolVersion: convertTLSProtocol(server.Tls.MaxProtocolVersion), CipherSuites: server.Tls.CipherSuites, } } return &auth.DownstreamTlsContext{ CommonTlsContext: &auth.CommonTlsContext{ TlsCertificates: []*auth.TlsCertificate{ { CertificateChain: &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: server.Tls.ServerCertificate, }, }, PrivateKey: &core.DataSource{ Specifier: &core.DataSource_Filename{ Filename: server.Tls.PrivateKey, }, }, }, }, ValidationContextType: &auth.CommonTlsContext_ValidationContext{ ValidationContext: certValidationContext, }, AlpnProtocols: ListenersALPNProtocols, TlsParams: tlsParams, }, RequireClientCertificate: &types.BoolValue{ Value: requireClientCert, }, } } func
(in networking.Server_TLSOptions_TLSProtocol) auth.TlsParameters_TlsProtocol { out := auth.TlsParameters_TlsProtocol(in) // There should be a one-to-one enum mapping if out < auth.TlsParameters_TLS_AUTO || out > auth.TlsParameters_TLSv1_3 { log.Warnf("was not able to map TLS protocol to Envoy TLS protocol") return auth.TlsParameters_TLS_AUTO } return out } func (configgen *ConfigGeneratorImpl) createGatewayTCPFilterChainOpts( node *model.Proxy, env *model.Environment, push *model.PushContext, servers []*networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts { opts := make([]*filterChainOpts, 0, len(servers)) for _, server := range servers { // We have a TCP/TLS server. This could be TLS termination (user specifies server.TLS with simple/mutual) // or opaque TCP (server.TLS is nil). or it could be a TLS passthrough with SNI based routing. // Handle the TLS termination or opaque TCP first. // This is opaque TCP server. Find matching virtual services with TCP blocks and forward if server.Tls == nil { if filters := buildGatewayNetworkFiltersFromTCPRoutes(node, env, push, server, gatewaysForWorkload); len(filters) > 0 { opts = append(opts, &filterChainOpts{ sniHosts: nil, tlsContext: nil, networkFilters: filters, }) } } else if server.Tls.Mode != networking.Server_TLSOptions_PASSTHROUGH { // TCP with TLS termination and forwarding. Setup TLS context to terminate, find matching services with TCP blocks // and forward to backend // Validation ensures that non-passthrough servers will have certs if filters := buildGatewayNetworkFiltersFromTCPRoutes(node, env, push, server, gatewaysForWorkload); len(filters) > 0 { opts = append(opts, &filterChainOpts{ sniHosts: getSNIHostsForServer(server), tlsContext: buildGatewayListenerTLSContext(server), networkFilters: filters, }) } } else { // Passthrough server. opts = append(opts, buildGatewayNetworkFiltersFromTLSRoutes(node, env, push, server, gatewaysForWorkload)...) } } return opts } // buildGatewayNetworkFiltersFromTCPRoutes builds tcp proxy routes for all VirtualServices with TCP blocks. // It first obtains all virtual services bound to the set of Gateways for this workload, filters them by this // server's port and hostnames, and produces network filters for each destination from the filtered services. func buildGatewayNetworkFiltersFromTCPRoutes(node *model.Proxy, env *model.Environment, push *model.PushContext, server *networking.Server, gatewaysForWorkload map[string]bool) []listener.Filter { port := &model.Port{ Name: server.Port.Name, Port: int(server.Port.Number), Protocol: model.ParseProtocol(server.Port.Protocol), } gatewayServerHosts := make(map[model.Hostname]bool, len(server.Hosts)) for _, host := range server.Hosts { gatewayServerHosts[model.Hostname(host)] = true } virtualServices := push.VirtualServices(gatewaysForWorkload) for _, spec := range virtualServices { vsvc := spec.Spec.(*networking.VirtualService) matchingHosts := pickMatchingGatewayHosts(gatewayServerHosts, vsvc.Hosts) if len(matchingHosts) == 0 { // the VirtualService's hosts don't include hosts advertised by server continue } // ensure we satisfy the rule's l4 match conditions, if any exist // For the moment, there can be only one match that succeeds // based on the match port/server port and the gateway name for _, tcp := range vsvc.Tcp { if l4MultiMatch(tcp.Match, server, gatewaysForWorkload) { return buildOutboundNetworkFilters(env, node, tcp.Route, push, port, spec.ConfigMeta) } } } return nil } // buildGatewayNetworkFiltersFromTLSRoutes builds tcp proxy routes for all VirtualServices with TLS blocks. // It first obtains all virtual services bound to the set of Gateways for this workload, filters them by this // server's port and hostnames, and produces network filters for each destination from the filtered services func buildGatewayNetworkFiltersFromTLSRoutes(node *model.Proxy, env *model.Environment, push *model.PushContext, server *networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts { port := &model.Port{ Name: server.Port.Name, Port: int(server.Port.Number), Protocol: model.ParseProtocol(server.Port.Protocol), } gatewayServerHosts := make(map[model.Hostname]bool, len(server.Hosts)) for _, host := range server.Hosts { gatewayServerHosts[model.Hostname(host)] = true } virtualServices := push.VirtualServices(gatewaysForWorkload) filterChains := make([]*filterChainOpts, 0) for _, spec := range virtualServices { vsvc := spec.Spec.(*networking.VirtualService) matchingHosts := pickMatchingGatewayHosts(gatewayServerHosts, vsvc.Hosts) if len(matchingHosts) == 0 { // the VirtualService's hosts don't include hosts advertised by server continue } // For every matching TLS block, generate a filter chain with sni match for _, tls := range vsvc.Tls { for _, match := range tls.Match { if l4SingleMatch(convertTLSMatchToL4Match(match), server, gatewaysForWorkload) { // the sni hosts in the match will become part of a filter chain match filterChains = append(filterChains, &filterChainOpts{ sniHosts: match.SniHosts, tlsContext: nil, // NO TLS context because this is passthrough networkFilters: buildOutboundNetworkFilters(env, node, tls.Route, push, port, spec.ConfigMeta), }) } } } } return filterChains } func pickMatchingGatewayHosts(gatewayServerHosts map[model.Hostname]bool, virtualServiceHosts []string) map[string]model.Hostname { matchingHosts := make(map[string]model.Hostname, 0) for _, vsvcHost := range virtualServiceHosts { for gatewayHost := range gatewayServerHosts { if gatewayHost.Matches(model.Hostname(vsvcHost)) { matchingHosts[vsvcHost] = gatewayHost } } } return matchingHosts } func convertTLSMatchToL4Match(tlsMatch *networking.TLSMatchAttributes) *networking.L4MatchAttributes { return &networking.L4MatchAttributes{ DestinationSubnets: tlsMatch.DestinationSubnets, Port: tlsMatch.Port, SourceSubnet: tlsMatch.SourceSubnet, SourceLabels: tlsMatch.SourceLabels, Gateways: tlsMatch.Gateways, } } func l4MultiMatch(predicates []*networking.L4MatchAttributes, server *networking.Server, gatewaysForWorkload map[string]bool) bool { // NB from proto definitions: each set of predicates is OR'd together; inside of a predicate all conditions are AND'd. // This means we can return as soon as we get any match of an entire predicate. for _, match := range predicates { if l4SingleMatch(match, server, gatewaysForWorkload) { return true } } // If we had no predicates we match; otherwise we don't match since we'd have exited at the first match. return len(predicates) == 0 } func l4SingleMatch(match *networking.L4MatchAttributes, server *networking.Server, gatewaysForWorkload map[string]bool) bool { // if there's no gateway predicate, gatewayMatch is true; otherwise we match against the gateways for this workload return isPortMatch(match.Port, server) && isGatewayMatch(gatewaysForWorkload, match.Gateways) } func isPortMatch(port uint32, server *networking.Server) bool { // if there's no port predicate, portMatch is true; otherwise we evaluate the port predicate against the server's port portMatch := port == 0 if port != 0 { portMatch = server.Port.Number == port } return portMatch } func isGatewayMatch(gatewaysForWorkload map[string]bool, gateways []string) bool { // if there's no gateway predicate, gatewayMatch is true; otherwise we match against the gateways for this workload gatewayMatch := len(gateways) == 0 if len(gateways) > 0 { for _, gateway := range gateways { gatewayMatch = gatewayMatch || gatewaysForWorkload[gateway] } } return gatewayMatch } func getSNIHostsForServer(server *networking.Server) []string { if server.Tls == nil { return nil } return server.Hosts }
convertTLSProtocol
identifier_name
mod.rs
// Copyright 2020. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #![allow(dead_code, unused)] use std::{fs, path::PathBuf, str::FromStr, sync::Arc}; use log::*; use minotari_app_utilities::identity_management::setup_node_identity; use minotari_wallet::{ error::{WalletError, WalletStorageError}, output_manager_service::storage::database::OutputManagerDatabase, storage::{ database::{WalletBackend, WalletDatabase}, sqlite_utilities::initialize_sqlite_database_backends, }, wallet::{derive_comms_secret_key, read_or_create_master_seed}, Wallet, WalletConfig, WalletSqlite, }; use rpassword::prompt_password_stdout; use rustyline::Editor; use tari_common::{ configuration::{ bootstrap::{grpc_default_port, prompt, ApplicationType}, MultiaddrList, Network, }, exit_codes::{ExitCode, ExitError}, }; use tari_comms::{ multiaddr::Multiaddr, peer_manager::{Peer, PeerFeatures}, types::CommsPublicKey, NodeIdentity, }; use tari_core::{consensus::ConsensusManager, transactions::CryptoFactories}; use tari_crypto::keys::PublicKey; use tari_key_manager::{cipher_seed::CipherSeed, mnemonic::MnemonicLanguage}; use tari_p2p::{peer_seeds::SeedPeer, TransportType}; use tari_shutdown::ShutdownSignal; use tari_utilities::{hex::Hex, ByteArray, SafePassword}; use zxcvbn::zxcvbn; use crate::{ cli::Cli, utils::db::{get_custom_base_node_peer_from_db, set_custom_base_node_peer_in_db}, wallet_modes::{PeerConfig, WalletMode}, ApplicationConfig, }; pub const LOG_TARGET: &str = "wallet::console_wallet::init"; const TARI_WALLET_PASSWORD: &str = "MINOTARI_WALLET_PASSWORD"; // Maxmimum number of times we prompt for confirmation of a new passphrase, to avoid driving the user insane with an // infinite loop const PASSPHRASE_SANITY_LIMIT: u8 = 3; #[derive(Clone, Copy)] pub enum WalletBoot { New, Existing, Recovery, } /// Get and confirm a passphrase from the user, with feedback /// This is intended to be used for new or changed passphrases /// /// You must provide the initial and confirmation prompts to pass to the user /// /// We do several things: /// - Prompt the user for a passphrase /// - Have the user confirm the passphrase /// - Score the passphrase /// - If the passphrase is weak (or empty), give feedback and ask the user what to do: /// - Proceed with the weak (or empty) passphrase /// - Choose a better passphrase /// - Cancel the operation /// /// If the passphrase and confirmation don't match, or if the user cancels, returns an error /// Otherwise, returns the passphrase as a `SafePassword` fn get_new_passphrase(prompt: &str, confirm: &str) -> Result<SafePassword, ExitError> { // We may need to prompt for a passphrase multiple times loop { // Prompt the user for a passphrase and confirm it, up to the defined limit // This ensures an unlucky user doesn't get stuck let mut tries = 0; let mut passphrase = SafePassword::from(""); // initial value for scope loop { passphrase = prompt_password(prompt)?; let confirmed = prompt_password(confirm)?; // If they match, continue the process if passphrase.reveal() == confirmed.reveal() { break; } // If they don't match, keep prompting until we hit the sanity limit tries += 1; if tries == PASSPHRASE_SANITY_LIMIT { return Err(ExitError::new(ExitCode::InputError, "Passphrases don't match!")); } println!("Passphrases don't match! Try again."); } // Score the passphrase and provide feedback let weak = display_password_feedback(&passphrase); // If the passphrase is weak, see if the user wishes to change it if weak { println!("Would you like to choose a different passphrase?"); println!(" y/Y: Yes, choose a different passphrase"); println!(" n/N: No, use this passphrase"); println!(" Enter anything else if you changed your mind and want to cancel"); let mut input = "".to_string(); std::io::stdin().read_line(&mut input); match input.trim().to_lowercase().as_str() { // Choose a different passphrase "y" => { continue; }, // Use this passphrase "n" => { return Ok(passphrase); }, // By default, we cancel to be safe _ => { return Err(ExitError::new( ExitCode::InputError, "Canceling with unchanged passphrase!", )); }, } } else { // The passphrase is fine, so return it return Ok(passphrase); } } } /// Get feedback, if available, for a weak passphrase fn get_password_feedback(passphrase: &SafePassword) -> Option<Vec<String>> { std::str::from_utf8(passphrase.reveal()) .ok() .and_then(|passphrase| zxcvbn(passphrase, &[]).ok()) .and_then(|scored| scored.feedback().to_owned()) .map(|feedback| feedback.suggestions().to_owned()) .map(|suggestion| suggestion.into_iter().map(|item| item.to_string()).collect()) } /// Display passphrase feedback to the user /// /// Returns `true` if and only if the passphrase is weak fn display_password_feedback(passphrase: &SafePassword) -> bool { if passphrase.reveal().is_empty() { // The passphrase is empty, which the scoring library doesn't handle println!(); println!("An empty password puts your wallet at risk against an attacker with access to this device."); println!("Use this only if you are sure that your device is safe from prying eyes!"); println!(); true } else if let Some(feedback) = get_password_feedback(passphrase) { // The scoring library provided feedback println!(); println!( "The password you chose is weak; a determined attacker with access to your device may be able to guess it." ); println!("You may want to consider changing it to a stronger one."); println!("Here are some suggestions:"); for suggestion in feedback { println!("- {}", suggestion); } println!(); true } else { // The Force is strong with this one false } } /// Gets the password provided by command line argument or environment variable if available. /// Otherwise prompts for the password to be typed in. pub fn get_or_prompt_password( arg_password: Option<SafePassword>, config_password: Option<SafePassword>, ) -> Result<SafePassword, ExitError> { if let Some(passphrase) = arg_password { return Ok(passphrase); } let env = std::env::var_os(TARI_WALLET_PASSWORD); if let Some(p) = env { let env_password = p .into_string() .map_err(|_| ExitError::new(ExitCode::IOError, "Failed to convert OsString into String"))?; return Ok(env_password.into()); } if let Some(passphrase) = config_password { return Ok(passphrase); } let password = prompt_password("Wallet password: ")?; Ok(password) } fn prompt_password(prompt: &str) -> Result<SafePassword, ExitError> { let password = prompt_password_stdout(prompt).map_err(|e| ExitError::new(ExitCode::IOError, e))?; Ok(SafePassword::from(password)) } /// Allows the user to change the password of the wallet. pub async fn change_password( config: &ApplicationConfig, existing: SafePassword, shutdown_signal: ShutdownSignal, non_interactive_mode: bool, ) -> Result<(), ExitError> { let mut wallet = init_wallet( config, existing.clone(), None, None, shutdown_signal, non_interactive_mode, ) .await?; // Get a new passphrase let new = get_new_passphrase("New wallet passphrase: ", "Confirm new passphrase: ")?; // Use the existing and new passphrases to attempt to change the wallet passphrase wallet.db.change_passphrase(&existing, &new).map_err(|e| match e { WalletStorageError::InvalidPassphrase => { ExitError::new(ExitCode::IncorrectOrEmptyPassword, "Your password was not changed.") }, _ => ExitError::new(ExitCode::DatabaseError, "Your password was not changed."), }) } /// Populates the PeerConfig struct from: /// 1. The custom peer in the wallet config if it exists /// 2. The custom peer in the wallet db if it exists /// 3. The detected local base node if any /// 4. The service peers defined in config they exist /// 5. The peer seeds defined in config pub async fn get_base_node_peer_config( config: &ApplicationConfig, wallet: &mut WalletSqlite, non_interactive_mode: bool, ) -> Result<PeerConfig, ExitError> { let mut use_custom_base_node_peer = false; let mut selected_base_node = match config.wallet.custom_base_node { Some(ref custom) => SeedPeer::from_str(custom) .map(|node| Some(Peer::from(node))) .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed custom base node: {}", err)))?, None => { if let Some(custom_base_node_peer) = get_custom_base_node_peer_from_db(wallet) { use_custom_base_node_peer = true; Some(custom_base_node_peer) } else { None } }, }; // If the user has not explicitly set a base node in the config, we try detect one if !non_interactive_mode && config.wallet.custom_base_node.is_none() && !use_custom_base_node_peer { if let Some(detected_node) = detect_local_base_node(config.wallet.network).await { match selected_base_node { Some(ref base_node) if base_node.public_key == detected_node.public_key => { // Skip asking because it's already set }, Some(_) | None => { println!( "Local Base Node detected with public key {} and address {}", detected_node.public_key, detected_node .addresses .iter() .map(ToString::to_string) .collect::<Vec<_>>() .join(", ") ); if prompt( "Would you like to use this base node? IF YOU DID NOT START THIS BASE NODE YOU SHOULD SELECT \ NO (Y/n)", ) { let address = detected_node.addresses.first().ok_or_else(|| { ExitError::new(ExitCode::ConfigError, "No address found for detected base node") })?; set_custom_base_node_peer_in_db(wallet, &detected_node.public_key, address)?; selected_base_node = Some(detected_node.into()); } }, } } } // config let base_node_peers = config .wallet .base_node_service_peers .iter() .map(|s| SeedPeer::from_str(s)) .map(|r| r.map(Peer::from)) .collect::<Result<Vec<_>, _>>() .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed base node peer: {}", err)))?; // peer seeds let peer_seeds = config .peer_seeds .peer_seeds .iter() .map(|s| SeedPeer::from_str(s)) .map(|r| r.map(Peer::from)) .collect::<Result<Vec<_>, _>>() .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed seed peer: {}", err)))?; let peer_config = PeerConfig::new(selected_base_node, base_node_peers, peer_seeds); debug!(target: LOG_TARGET, "base node peer config: {:?}", peer_config); Ok(peer_config) } /// Determines which mode the wallet should run in. pub(crate) fn wallet_mode(cli: &Cli, boot_mode: WalletBoot) -> WalletMode { // Recovery mode if matches!(boot_mode, WalletBoot::Recovery) { if cli.non_interactive_mode { return WalletMode::RecoveryDaemon; } else { return WalletMode::RecoveryTui; } } match (cli.non_interactive_mode, cli.input_file.clone(), cli.command2.clone()) { // TUI mode (false, None, None) => WalletMode::Tui, // GRPC mode (true, None, None) => WalletMode::Grpc, // Script mode (_, Some(path), None) => WalletMode::Script(path), // Command mode (_, None, Some(command)) => WalletMode::Command(Box::new(command)), // WalletMode::Command(command), // Invalid combinations _ => WalletMode::Invalid, } } /// Set up the app environment and state for use by the UI #[allow(clippy::too_many_lines)] pub async fn init_wallet( config: &ApplicationConfig, arg_password: SafePassword, seed_words_file_name: Option<PathBuf>, recovery_seed: Option<CipherSeed>, shutdown_signal: ShutdownSignal, non_interactive_mode: bool, ) -> Result<WalletSqlite, ExitError> { fs::create_dir_all( config .wallet .db_file .parent() .expect("console_wallet_db_file cannot be set to a root directory"), ) .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating Wallet folder. {}", e)))?; fs::create_dir_all(&config.wallet.p2p.datastore_path) .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating peer db folder. {}", e)))?; debug!(target: LOG_TARGET, "Running Wallet database migrations"); let db_path = &config.wallet.db_file; // wallet should be encrypted from the beginning, so we must require a password to be provided by the user let (wallet_backend, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend) = initialize_sqlite_database_backends(db_path, arg_password, config.wallet.db_connection_pool_size)?; let wallet_db = WalletDatabase::new(wallet_backend); let output_db = OutputManagerDatabase::new(output_manager_backend.clone()); debug!(target: LOG_TARGET, "Databases Initialized. Wallet is encrypted.",); let node_addresses = if config.wallet.p2p.public_addresses.is_empty() { match wallet_db.get_node_address()? { Some(addr) => MultiaddrList::from(vec![addr]), None => MultiaddrList::default(), } } else { config.wallet.p2p.public_addresses.clone() };
let master_seed = read_or_create_master_seed(recovery_seed.clone(), &wallet_db)?; let node_identity = match config.wallet.identity_file.as_ref() { Some(identity_file) => { warn!( target: LOG_TARGET, "Node identity overridden by file {}", identity_file.to_string_lossy() ); setup_node_identity( identity_file, node_addresses.to_vec(), true, PeerFeatures::COMMUNICATION_CLIENT, )? }, None => setup_identity_from_db(&wallet_db, &master_seed, node_addresses.to_vec())?, }; let mut wallet_config = config.wallet.clone(); if let TransportType::Tor = config.wallet.p2p.transport.transport_type { wallet_config.p2p.transport.tor.identity = wallet_db.get_tor_id()?; } let consensus_manager = ConsensusManager::builder(config.wallet.network) .build() .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error consensus manager. {}", e)))?; let factories = CryptoFactories::default(); let mut wallet = Wallet::start( wallet_config, config.peer_seeds.clone(), config.auto_update.clone(), node_identity, consensus_manager, factories, wallet_db, output_db, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend, shutdown_signal, master_seed, ) .await .map_err(|e| match e { WalletError::CommsInitializationError(cie) => cie.to_exit_error(), e => ExitError::new(ExitCode::WalletError, format!("Error creating Wallet Container: {}", e)), })?; if let Some(hs) = wallet.comms.hidden_service() { wallet .db .set_tor_identity(hs.tor_identity().clone()) .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Problem writing tor identity. {}", e)))?; } if let Some(file_name) = seed_words_file_name { let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?.join(" "); let _result = fs::write(file_name, seed_words.reveal()).map_err(|e| { ExitError::new( ExitCode::WalletError, format!("Problem writing seed words to file: {}", e), ) }); }; Ok(wallet) } async fn detect_local_base_node(network: Network) -> Option<SeedPeer> { use minotari_app_grpc::tari_rpc::{base_node_client::BaseNodeClient, Empty}; let addr = format!( "http://127.0.0.1:{}", grpc_default_port(ApplicationType::BaseNode, network) ); debug!(target: LOG_TARGET, "Checking for local base node at {}", addr); let mut node_conn = match BaseNodeClient::connect(addr).await.ok() { Some(conn) => conn, None => { debug!(target: LOG_TARGET, "No local base node detected"); return None; }, }; let resp = node_conn.identify(Empty {}).await.ok()?; let identity = resp.get_ref(); let public_key = CommsPublicKey::from_bytes(&identity.public_key).ok()?; let addresses = identity .public_addresses .iter() .filter_map(|s| Multiaddr::from_str(s).ok()) .collect::<Vec<_>>(); debug!( target: LOG_TARGET, "Local base node found with pk={} and addresses={}", public_key.to_hex(), addresses.iter().map(|a| a.to_string()).collect::<Vec<_>>().join(",") ); Some(SeedPeer::new(public_key, addresses)) } fn setup_identity_from_db<D: WalletBackend + 'static>( wallet_db: &WalletDatabase<D>, master_seed: &CipherSeed, node_addresses: Vec<Multiaddr>, ) -> Result<Arc<NodeIdentity>, ExitError> { let node_features = wallet_db .get_node_features()? .unwrap_or(PeerFeatures::COMMUNICATION_CLIENT); let identity_sig = wallet_db.get_comms_identity_signature()?; let comms_secret_key = derive_comms_secret_key(master_seed)?; // This checks if anything has changed by validating the previous signature and if invalid, setting identity_sig // to None let identity_sig = identity_sig.filter(|sig| { let comms_public_key = CommsPublicKey::from_secret_key(&comms_secret_key); sig.is_valid(&comms_public_key, node_features, &node_addresses) }); // SAFETY: we are manually checking the validity of this signature before adding Some(..) let node_identity = Arc::new(NodeIdentity::with_signature_unchecked( comms_secret_key, node_addresses, node_features, identity_sig, )); if !node_identity.is_signed() { node_identity.sign(); // unreachable panic: signed above let sig = node_identity .identity_signature_read() .as_ref() .expect("unreachable panic") .clone(); wallet_db.set_comms_identity_signature(sig)?; } Ok(node_identity) } /// Starts the wallet by setting the base node peer, and restarting the transaction and broadcast protocols. pub async fn start_wallet( wallet: &mut WalletSqlite, base_node: &Peer, wallet_mode: &WalletMode, ) -> Result<(), ExitError> { debug!(target: LOG_TARGET, "Setting base node peer"); let net_address = base_node .addresses .best() .ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node has no address!"))?; wallet .set_base_node_peer(base_node.public_key.clone(), net_address.address().clone()) .await .map_err(|e| { ExitError::new( ExitCode::WalletError, format!("Error setting wallet base node peer. {}", e), ) })?; // Restart transaction protocols if not running in script or command modes if !matches!(wallet_mode, WalletMode::Command(_)) && !matches!(wallet_mode, WalletMode::Script(_)) { // NOTE: https://github.com/tari-project/tari/issues/5227 debug!("revalidating all transactions"); if let Err(e) = wallet.transaction_service.revalidate_all_transactions().await { error!(target: LOG_TARGET, "Failed to revalidate all transactions: {}", e); } debug!("restarting transaction protocols"); if let Err(e) = wallet.transaction_service.restart_transaction_protocols().await { error!(target: LOG_TARGET, "Problem restarting transaction protocols: {}", e); } debug!("validating transactions"); if let Err(e) = wallet.transaction_service.validate_transactions().await { error!( target: LOG_TARGET, "Problem validating and restarting transaction protocols: {}", e ); } // validate transaction outputs validate_txos(wallet).await?; } Ok(()) } async fn validate_txos(wallet: &mut WalletSqlite) -> Result<(), ExitError> { debug!(target: LOG_TARGET, "Starting TXO validations."); wallet.output_manager_service.validate_txos().await.map_err(|e| { error!(target: LOG_TARGET, "Error validating Unspent TXOs: {}", e); ExitError::new(ExitCode::WalletError, e) })?; debug!(target: LOG_TARGET, "TXO validations started."); Ok(()) } pub(crate) fn confirm_seed_words(wallet: &mut WalletSqlite) -> Result<(), ExitError> { let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?; println!(); println!("========================="); println!(" IMPORTANT! "); println!("========================="); println!("These are your wallet seed words."); println!("They can be used to recover your wallet and funds."); println!("WRITE THEM DOWN OR COPY THEM NOW. THIS IS YOUR ONLY CHANCE TO DO SO."); println!(); println!("========================="); println!("{}", seed_words.join(" ").reveal()); println!("========================="); println!("\x07"); // beep! let mut rl = Editor::<()>::new(); loop { println!("I confirm that I will never see these seed words again."); println!(r#"Type the word "confirm" to continue."#); let readline = rl.readline(">> "); match readline { Ok(line) => match line.to_lowercase().as_ref() { "confirm" => return Ok(()), _ => continue, }, Err(e) => { return Err(ExitError::new(ExitCode::IOError, e)); }, } } } /// Clear the terminal and print the Tari splash pub fn tari_splash_screen(heading: &str) { // clear the terminal print!("{esc}[2J{esc}[1;1H", esc = 27 as char); println!("⠀⠀⠀⠀⠀⣠⣶⣿⣿⣿⣿⣶⣦⣀ "); println!("⠀⢀⣤⣾⣿⡿⠋⠀⠀⠀⠀⠉⠛⠿⣿⣿⣶⣤⣀⠀⠀⠀⠀⠀⠀⢰⣿⣾⣾⣾⣾⣾⣾⣾⣾⣾⣿⠀⠀⠀⣾⣾⣾⡀⠀⠀⠀⠀⢰⣾⣾⣾⣾⣿⣶⣶⡀⠀⠀⠀⢸⣾⣿⠀"); println!("⠀⣿⣿⣿⣿⣿⣶⣶⣤⣄⡀⠀⠀⠀⠀⠀⠉⠛⣿⣿⠀⠀⠀⠀⠀⠈⠉⠉⠉⠉⣿⣿⡏⠉⠉⠉⠉⠀⠀⣰⣿⣿⣿⣿⠀⠀⠀⠀⢸⣿⣿⠉⠉⠉⠛⣿⣿⡆⠀⠀⢸⣿⣿⠀"); println!("⠀⣿⣿⠀⠀⠀⠈⠙⣿⡿⠿⣿⣿⣿⣶⣶⣤⣤⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⠀⠀⢠⣿⣿⠃⣿⣿⣷⠀⠀⠀⢸⣿⣿⣀⣀⣀⣴⣿⣿⠃⠀⠀⢸⣿⣿⠀"); println!("⠀⣿⣿⣤⠀⠀⠀⢸⣿⡟⠀⠀⠀⠀⠀⠉⣽⣿⣿⠟⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⠀⠀⣿⣿⣿⣤⣬⣿⣿⣆⠀⠀⢸⣿⣿⣿⣿⣿⡿⠟⠉⠀⠀⠀⢸⣿⣿⠀"); println!("⠀⠀⠙⣿⣿⣤⠀⢸⣿⡟⠀⠀⠀⣠⣾⣿⡿⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⠀⣾⣿⣿⠿⠿⠿⢿⣿⣿⡀⠀⢸⣿⣿⠙⣿⣿⣿⣄⠀⠀⠀⠀⢸⣿⣿⠀"); println!("⠀⠀⠀⠀⠙⣿⣿⣼⣿⡟⣀⣶⣿⡿⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⣰⣿⣿⠃⠀⠀⠀⠀⣿⣿⣿⠀⢸⣿⣿⠀⠀⠙⣿⣿⣷⣄⠀⠀⢸⣿⣿⠀"); println!("⠀⠀⠀⠀⠀⠀⠙⣿⣿⣿⣿⠛⠀ "); println!("⠀⠀⠀⠀⠀⠀⠀⠀⠙⠁⠀ "); println!("{}", heading); println!(); } /// Prompts the user for a new wallet or to recover an existing wallet. /// Returns the wallet bootmode indicating if it's a new or existing wallet, or if recovery is required. fn boot(cli: &Cli, wallet_config: &WalletConfig) -> Result<WalletBoot, ExitError> { let wallet_exists = wallet_config.db_file.exists(); // forced recovery if cli.recovery { if wallet_exists { return Err(ExitError::new( ExitCode::RecoveryError, format!( "Wallet already exists at {:#?}. Remove it if you really want to run recovery in this directory!", wallet_config.db_file ), )); } return Ok(WalletBoot::Recovery); } if cli.seed_words.is_some() && !wallet_exists { return Ok(WalletBoot::Recovery); } if wallet_exists { // normal startup of existing wallet Ok(WalletBoot::Existing) } else { // automation/wallet created with --password if cli.password.is_some() || wallet_config.password.is_some() { return Ok(WalletBoot::New); } // In non-interactive mode, we never prompt. Otherwise, it's not very non-interactive, now is it? if cli.non_interactive_mode { let msg = "Wallet does not exist and no password was given to create one. Since we're in non-interactive \ mode, we need to quit here. Try setting the MINOTARI_WALLET__PASSWORD envar, or setting \ --password on the command line"; return Err(ExitError::new(ExitCode::WalletError, msg)); } // prompt for new or recovery let mut rl = Editor::<()>::new(); loop { println!("1. Create a new wallet."); println!("2. Recover wallet from seed words."); let readline = rl.readline(">> "); match readline { Ok(line) => { match line.as_ref() { "1" | "c" | "n" | "create" => { // new wallet return Ok(WalletBoot::New); }, "2" | "r" | "s" | "recover" => { // recover wallet return Ok(WalletBoot::Recovery); }, _ => continue, } }, Err(e) => { return Err(ExitError::new(ExitCode::IOError, e)); }, } } } } pub(crate) fn boot_with_password( cli: &Cli, wallet_config: &WalletConfig, ) -> Result<(WalletBoot, SafePassword), ExitError> { let boot_mode = boot(cli, wallet_config)?; if cli.password.is_some() { return Ok((boot_mode, cli.password.clone().unwrap())); } if wallet_config.password.is_some() { return Ok((boot_mode, wallet_config.password.clone().unwrap())); } let password = match boot_mode { WalletBoot::New => { // Get a new passphrase debug!(target: LOG_TARGET, "Prompting for passphrase."); get_new_passphrase("Create wallet passphrase: ", "Confirm wallet passphrase: ")? }, WalletBoot::Existing | WalletBoot::Recovery => { debug!(target: LOG_TARGET, "Prompting for passphrase."); prompt_password("Enter wallet passphrase: ")? }, }; Ok((boot_mode, password)) } #[cfg(test)] mod test { use tari_utilities::SafePassword; use super::get_password_feedback; #[test] fn weak_password() { let weak_password = SafePassword::from("weak"); assert!(get_password_feedback(&weak_password).is_some()); } #[test] fn strong_password() { let strong_password = SafePassword::from("This is a reasonably strong password!"); assert!(get_password_feedback(&strong_password).is_none()); } }
random_line_split
mod.rs
// Copyright 2020. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #![allow(dead_code, unused)] use std::{fs, path::PathBuf, str::FromStr, sync::Arc}; use log::*; use minotari_app_utilities::identity_management::setup_node_identity; use minotari_wallet::{ error::{WalletError, WalletStorageError}, output_manager_service::storage::database::OutputManagerDatabase, storage::{ database::{WalletBackend, WalletDatabase}, sqlite_utilities::initialize_sqlite_database_backends, }, wallet::{derive_comms_secret_key, read_or_create_master_seed}, Wallet, WalletConfig, WalletSqlite, }; use rpassword::prompt_password_stdout; use rustyline::Editor; use tari_common::{ configuration::{ bootstrap::{grpc_default_port, prompt, ApplicationType}, MultiaddrList, Network, }, exit_codes::{ExitCode, ExitError}, }; use tari_comms::{ multiaddr::Multiaddr, peer_manager::{Peer, PeerFeatures}, types::CommsPublicKey, NodeIdentity, }; use tari_core::{consensus::ConsensusManager, transactions::CryptoFactories}; use tari_crypto::keys::PublicKey; use tari_key_manager::{cipher_seed::CipherSeed, mnemonic::MnemonicLanguage}; use tari_p2p::{peer_seeds::SeedPeer, TransportType}; use tari_shutdown::ShutdownSignal; use tari_utilities::{hex::Hex, ByteArray, SafePassword}; use zxcvbn::zxcvbn; use crate::{ cli::Cli, utils::db::{get_custom_base_node_peer_from_db, set_custom_base_node_peer_in_db}, wallet_modes::{PeerConfig, WalletMode}, ApplicationConfig, }; pub const LOG_TARGET: &str = "wallet::console_wallet::init"; const TARI_WALLET_PASSWORD: &str = "MINOTARI_WALLET_PASSWORD"; // Maxmimum number of times we prompt for confirmation of a new passphrase, to avoid driving the user insane with an // infinite loop const PASSPHRASE_SANITY_LIMIT: u8 = 3; #[derive(Clone, Copy)] pub enum WalletBoot { New, Existing, Recovery, } /// Get and confirm a passphrase from the user, with feedback /// This is intended to be used for new or changed passphrases /// /// You must provide the initial and confirmation prompts to pass to the user /// /// We do several things: /// - Prompt the user for a passphrase /// - Have the user confirm the passphrase /// - Score the passphrase /// - If the passphrase is weak (or empty), give feedback and ask the user what to do: /// - Proceed with the weak (or empty) passphrase /// - Choose a better passphrase /// - Cancel the operation /// /// If the passphrase and confirmation don't match, or if the user cancels, returns an error /// Otherwise, returns the passphrase as a `SafePassword` fn get_new_passphrase(prompt: &str, confirm: &str) -> Result<SafePassword, ExitError> { // We may need to prompt for a passphrase multiple times loop { // Prompt the user for a passphrase and confirm it, up to the defined limit // This ensures an unlucky user doesn't get stuck let mut tries = 0; let mut passphrase = SafePassword::from(""); // initial value for scope loop { passphrase = prompt_password(prompt)?; let confirmed = prompt_password(confirm)?; // If they match, continue the process if passphrase.reveal() == confirmed.reveal() { break; } // If they don't match, keep prompting until we hit the sanity limit tries += 1; if tries == PASSPHRASE_SANITY_LIMIT { return Err(ExitError::new(ExitCode::InputError, "Passphrases don't match!")); } println!("Passphrases don't match! Try again."); } // Score the passphrase and provide feedback let weak = display_password_feedback(&passphrase); // If the passphrase is weak, see if the user wishes to change it if weak { println!("Would you like to choose a different passphrase?"); println!(" y/Y: Yes, choose a different passphrase"); println!(" n/N: No, use this passphrase"); println!(" Enter anything else if you changed your mind and want to cancel"); let mut input = "".to_string(); std::io::stdin().read_line(&mut input); match input.trim().to_lowercase().as_str() { // Choose a different passphrase "y" => { continue; }, // Use this passphrase "n" => { return Ok(passphrase); }, // By default, we cancel to be safe _ => { return Err(ExitError::new( ExitCode::InputError, "Canceling with unchanged passphrase!", )); }, } } else { // The passphrase is fine, so return it return Ok(passphrase); } } } /// Get feedback, if available, for a weak passphrase fn get_password_feedback(passphrase: &SafePassword) -> Option<Vec<String>> { std::str::from_utf8(passphrase.reveal()) .ok() .and_then(|passphrase| zxcvbn(passphrase, &[]).ok()) .and_then(|scored| scored.feedback().to_owned()) .map(|feedback| feedback.suggestions().to_owned()) .map(|suggestion| suggestion.into_iter().map(|item| item.to_string()).collect()) } /// Display passphrase feedback to the user /// /// Returns `true` if and only if the passphrase is weak fn display_password_feedback(passphrase: &SafePassword) -> bool { if passphrase.reveal().is_empty() { // The passphrase is empty, which the scoring library doesn't handle println!(); println!("An empty password puts your wallet at risk against an attacker with access to this device."); println!("Use this only if you are sure that your device is safe from prying eyes!"); println!(); true } else if let Some(feedback) = get_password_feedback(passphrase) { // The scoring library provided feedback println!(); println!( "The password you chose is weak; a determined attacker with access to your device may be able to guess it." ); println!("You may want to consider changing it to a stronger one."); println!("Here are some suggestions:"); for suggestion in feedback { println!("- {}", suggestion); } println!(); true } else { // The Force is strong with this one false } } /// Gets the password provided by command line argument or environment variable if available. /// Otherwise prompts for the password to be typed in. pub fn get_or_prompt_password( arg_password: Option<SafePassword>, config_password: Option<SafePassword>, ) -> Result<SafePassword, ExitError> { if let Some(passphrase) = arg_password { return Ok(passphrase); } let env = std::env::var_os(TARI_WALLET_PASSWORD); if let Some(p) = env { let env_password = p .into_string() .map_err(|_| ExitError::new(ExitCode::IOError, "Failed to convert OsString into String"))?; return Ok(env_password.into()); } if let Some(passphrase) = config_password { return Ok(passphrase); } let password = prompt_password("Wallet password: ")?; Ok(password) } fn prompt_password(prompt: &str) -> Result<SafePassword, ExitError> { let password = prompt_password_stdout(prompt).map_err(|e| ExitError::new(ExitCode::IOError, e))?; Ok(SafePassword::from(password)) } /// Allows the user to change the password of the wallet. pub async fn change_password( config: &ApplicationConfig, existing: SafePassword, shutdown_signal: ShutdownSignal, non_interactive_mode: bool, ) -> Result<(), ExitError> { let mut wallet = init_wallet( config, existing.clone(), None, None, shutdown_signal, non_interactive_mode, ) .await?; // Get a new passphrase let new = get_new_passphrase("New wallet passphrase: ", "Confirm new passphrase: ")?; // Use the existing and new passphrases to attempt to change the wallet passphrase wallet.db.change_passphrase(&existing, &new).map_err(|e| match e { WalletStorageError::InvalidPassphrase => { ExitError::new(ExitCode::IncorrectOrEmptyPassword, "Your password was not changed.") }, _ => ExitError::new(ExitCode::DatabaseError, "Your password was not changed."), }) } /// Populates the PeerConfig struct from: /// 1. The custom peer in the wallet config if it exists /// 2. The custom peer in the wallet db if it exists /// 3. The detected local base node if any /// 4. The service peers defined in config they exist /// 5. The peer seeds defined in config pub async fn get_base_node_peer_config( config: &ApplicationConfig, wallet: &mut WalletSqlite, non_interactive_mode: bool, ) -> Result<PeerConfig, ExitError> { let mut use_custom_base_node_peer = false; let mut selected_base_node = match config.wallet.custom_base_node { Some(ref custom) => SeedPeer::from_str(custom) .map(|node| Some(Peer::from(node))) .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed custom base node: {}", err)))?, None => { if let Some(custom_base_node_peer) = get_custom_base_node_peer_from_db(wallet) { use_custom_base_node_peer = true; Some(custom_base_node_peer) } else { None } }, }; // If the user has not explicitly set a base node in the config, we try detect one if !non_interactive_mode && config.wallet.custom_base_node.is_none() && !use_custom_base_node_peer { if let Some(detected_node) = detect_local_base_node(config.wallet.network).await { match selected_base_node { Some(ref base_node) if base_node.public_key == detected_node.public_key => { // Skip asking because it's already set }, Some(_) | None => { println!( "Local Base Node detected with public key {} and address {}", detected_node.public_key, detected_node .addresses .iter() .map(ToString::to_string) .collect::<Vec<_>>() .join(", ") ); if prompt( "Would you like to use this base node? IF YOU DID NOT START THIS BASE NODE YOU SHOULD SELECT \ NO (Y/n)", ) { let address = detected_node.addresses.first().ok_or_else(|| { ExitError::new(ExitCode::ConfigError, "No address found for detected base node") })?; set_custom_base_node_peer_in_db(wallet, &detected_node.public_key, address)?; selected_base_node = Some(detected_node.into()); } }, } } } // config let base_node_peers = config .wallet .base_node_service_peers .iter() .map(|s| SeedPeer::from_str(s)) .map(|r| r.map(Peer::from)) .collect::<Result<Vec<_>, _>>() .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed base node peer: {}", err)))?; // peer seeds let peer_seeds = config .peer_seeds .peer_seeds .iter() .map(|s| SeedPeer::from_str(s)) .map(|r| r.map(Peer::from)) .collect::<Result<Vec<_>, _>>() .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed seed peer: {}", err)))?; let peer_config = PeerConfig::new(selected_base_node, base_node_peers, peer_seeds); debug!(target: LOG_TARGET, "base node peer config: {:?}", peer_config); Ok(peer_config) } /// Determines which mode the wallet should run in. pub(crate) fn wallet_mode(cli: &Cli, boot_mode: WalletBoot) -> WalletMode { // Recovery mode if matches!(boot_mode, WalletBoot::Recovery) { if cli.non_interactive_mode { return WalletMode::RecoveryDaemon; } else { return WalletMode::RecoveryTui; } } match (cli.non_interactive_mode, cli.input_file.clone(), cli.command2.clone()) { // TUI mode (false, None, None) => WalletMode::Tui, // GRPC mode (true, None, None) => WalletMode::Grpc, // Script mode (_, Some(path), None) => WalletMode::Script(path), // Command mode (_, None, Some(command)) => WalletMode::Command(Box::new(command)), // WalletMode::Command(command), // Invalid combinations _ => WalletMode::Invalid, } } /// Set up the app environment and state for use by the UI #[allow(clippy::too_many_lines)] pub async fn init_wallet( config: &ApplicationConfig, arg_password: SafePassword, seed_words_file_name: Option<PathBuf>, recovery_seed: Option<CipherSeed>, shutdown_signal: ShutdownSignal, non_interactive_mode: bool, ) -> Result<WalletSqlite, ExitError> { fs::create_dir_all( config .wallet .db_file .parent() .expect("console_wallet_db_file cannot be set to a root directory"), ) .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating Wallet folder. {}", e)))?; fs::create_dir_all(&config.wallet.p2p.datastore_path) .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating peer db folder. {}", e)))?; debug!(target: LOG_TARGET, "Running Wallet database migrations"); let db_path = &config.wallet.db_file; // wallet should be encrypted from the beginning, so we must require a password to be provided by the user let (wallet_backend, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend) = initialize_sqlite_database_backends(db_path, arg_password, config.wallet.db_connection_pool_size)?; let wallet_db = WalletDatabase::new(wallet_backend); let output_db = OutputManagerDatabase::new(output_manager_backend.clone()); debug!(target: LOG_TARGET, "Databases Initialized. Wallet is encrypted.",); let node_addresses = if config.wallet.p2p.public_addresses.is_empty() { match wallet_db.get_node_address()? { Some(addr) => MultiaddrList::from(vec![addr]), None => MultiaddrList::default(), } } else { config.wallet.p2p.public_addresses.clone() }; let master_seed = read_or_create_master_seed(recovery_seed.clone(), &wallet_db)?; let node_identity = match config.wallet.identity_file.as_ref() { Some(identity_file) => { warn!( target: LOG_TARGET, "Node identity overridden by file {}", identity_file.to_string_lossy() ); setup_node_identity( identity_file, node_addresses.to_vec(), true, PeerFeatures::COMMUNICATION_CLIENT, )? }, None => setup_identity_from_db(&wallet_db, &master_seed, node_addresses.to_vec())?, }; let mut wallet_config = config.wallet.clone(); if let TransportType::Tor = config.wallet.p2p.transport.transport_type { wallet_config.p2p.transport.tor.identity = wallet_db.get_tor_id()?; } let consensus_manager = ConsensusManager::builder(config.wallet.network) .build() .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error consensus manager. {}", e)))?; let factories = CryptoFactories::default(); let mut wallet = Wallet::start( wallet_config, config.peer_seeds.clone(), config.auto_update.clone(), node_identity, consensus_manager, factories, wallet_db, output_db, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend, shutdown_signal, master_seed, ) .await .map_err(|e| match e { WalletError::CommsInitializationError(cie) => cie.to_exit_error(), e => ExitError::new(ExitCode::WalletError, format!("Error creating Wallet Container: {}", e)), })?; if let Some(hs) = wallet.comms.hidden_service() { wallet .db .set_tor_identity(hs.tor_identity().clone()) .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Problem writing tor identity. {}", e)))?; } if let Some(file_name) = seed_words_file_name { let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?.join(" "); let _result = fs::write(file_name, seed_words.reveal()).map_err(|e| { ExitError::new( ExitCode::WalletError, format!("Problem writing seed words to file: {}", e), ) }); }; Ok(wallet) } async fn detect_local_base_node(network: Network) -> Option<SeedPeer> { use minotari_app_grpc::tari_rpc::{base_node_client::BaseNodeClient, Empty}; let addr = format!( "http://127.0.0.1:{}", grpc_default_port(ApplicationType::BaseNode, network) ); debug!(target: LOG_TARGET, "Checking for local base node at {}", addr); let mut node_conn = match BaseNodeClient::connect(addr).await.ok() { Some(conn) => conn, None => { debug!(target: LOG_TARGET, "No local base node detected"); return None; }, }; let resp = node_conn.identify(Empty {}).await.ok()?; let identity = resp.get_ref(); let public_key = CommsPublicKey::from_bytes(&identity.public_key).ok()?; let addresses = identity .public_addresses .iter() .filter_map(|s| Multiaddr::from_str(s).ok()) .collect::<Vec<_>>(); debug!( target: LOG_TARGET, "Local base node found with pk={} and addresses={}", public_key.to_hex(), addresses.iter().map(|a| a.to_string()).collect::<Vec<_>>().join(",") ); Some(SeedPeer::new(public_key, addresses)) } fn setup_identity_from_db<D: WalletBackend + 'static>( wallet_db: &WalletDatabase<D>, master_seed: &CipherSeed, node_addresses: Vec<Multiaddr>, ) -> Result<Arc<NodeIdentity>, ExitError> { let node_features = wallet_db .get_node_features()? .unwrap_or(PeerFeatures::COMMUNICATION_CLIENT); let identity_sig = wallet_db.get_comms_identity_signature()?; let comms_secret_key = derive_comms_secret_key(master_seed)?; // This checks if anything has changed by validating the previous signature and if invalid, setting identity_sig // to None let identity_sig = identity_sig.filter(|sig| { let comms_public_key = CommsPublicKey::from_secret_key(&comms_secret_key); sig.is_valid(&comms_public_key, node_features, &node_addresses) }); // SAFETY: we are manually checking the validity of this signature before adding Some(..) let node_identity = Arc::new(NodeIdentity::with_signature_unchecked( comms_secret_key, node_addresses, node_features, identity_sig, )); if !node_identity.is_signed() { node_identity.sign(); // unreachable panic: signed above let sig = node_identity .identity_signature_read() .as_ref() .expect("unreachable panic") .clone(); wallet_db.set_comms_identity_signature(sig)?; } Ok(node_identity) } /// Starts the wallet by setting the base node peer, and restarting the transaction and broadcast protocols. pub async fn start_wallet( wallet: &mut WalletSqlite, base_node: &Peer, wallet_mode: &WalletMode, ) -> Result<(), ExitError> { debug!(target: LOG_TARGET, "Setting base node peer"); let net_address = base_node .addresses .best() .ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node has no address!"))?; wallet .set_base_node_peer(base_node.public_key.clone(), net_address.address().clone()) .await .map_err(|e| { ExitError::new( ExitCode::WalletError, format!("Error setting wallet base node peer. {}", e), ) })?; // Restart transaction protocols if not running in script or command modes if !matches!(wallet_mode, WalletMode::Command(_)) && !matches!(wallet_mode, WalletMode::Script(_)) { // NOTE: https://github.com/tari-project/tari/issues/5227 debug!("revalidating all transactions"); if let Err(e) = wallet.transaction_service.revalidate_all_transactions().await { error!(target: LOG_TARGET, "Failed to revalidate all transactions: {}", e); } debug!("restarting transaction protocols"); if let Err(e) = wallet.transaction_service.restart_transaction_protocols().await { error!(target: LOG_TARGET, "Problem restarting transaction protocols: {}", e); } debug!("validating transactions"); if let Err(e) = wallet.transaction_service.validate_transactions().await { error!( target: LOG_TARGET, "Problem validating and restarting transaction protocols: {}", e ); } // validate transaction outputs validate_txos(wallet).await?; } Ok(()) } async fn validate_txos(wallet: &mut WalletSqlite) -> Result<(), ExitError> { debug!(target: LOG_TARGET, "Starting TXO validations."); wallet.output_manager_service.validate_txos().await.map_err(|e| { error!(target: LOG_TARGET, "Error validating Unspent TXOs: {}", e); ExitError::new(ExitCode::WalletError, e) })?; debug!(target: LOG_TARGET, "TXO validations started."); Ok(()) } pub(crate) fn confirm_seed_words(wallet: &mut WalletSqlite) -> Result<(), ExitError> { let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?; println!(); println!("========================="); println!(" IMPORTANT! "); println!("========================="); println!("These are your wallet seed words."); println!("They can be used to recover your wallet and funds."); println!("WRITE THEM DOWN OR COPY THEM NOW. THIS IS YOUR ONLY CHANCE TO DO SO."); println!(); println!("========================="); println!("{}", seed_words.join(" ").reveal()); println!("========================="); println!("\x07"); // beep! let mut rl = Editor::<()>::new(); loop { println!("I confirm that I will never see these seed words again."); println!(r#"Type the word "confirm" to continue."#); let readline = rl.readline(">> "); match readline { Ok(line) => match line.to_lowercase().as_ref() { "confirm" => return Ok(()), _ => continue, }, Err(e) => { return Err(ExitError::new(ExitCode::IOError, e)); }, } } } /// Clear the terminal and print the Tari splash pub fn
(heading: &str) { // clear the terminal print!("{esc}[2J{esc}[1;1H", esc = 27 as char); println!("⠀⠀⠀⠀⠀⣠⣶⣿⣿⣿⣿⣶⣦⣀ "); println!("⠀⢀⣤⣾⣿⡿⠋⠀⠀⠀⠀⠉⠛⠿⣿⣿⣶⣤⣀⠀⠀⠀⠀⠀⠀⢰⣿⣾⣾⣾⣾⣾⣾⣾⣾⣾⣿⠀⠀⠀⣾⣾⣾⡀⠀⠀⠀⠀⢰⣾⣾⣾⣾⣿⣶⣶⡀⠀⠀⠀⢸⣾⣿⠀"); println!("⠀⣿⣿⣿⣿⣿⣶⣶⣤⣄⡀⠀⠀⠀⠀⠀⠉⠛⣿⣿⠀⠀⠀⠀⠀⠈⠉⠉⠉⠉⣿⣿⡏⠉⠉⠉⠉⠀⠀⣰⣿⣿⣿⣿⠀⠀⠀⠀⢸⣿⣿⠉⠉⠉⠛⣿⣿⡆⠀⠀⢸⣿⣿⠀"); println!("⠀⣿⣿⠀⠀⠀⠈⠙⣿⡿⠿⣿⣿⣿⣶⣶⣤⣤⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⠀⠀⢠⣿⣿⠃⣿⣿⣷⠀⠀⠀⢸⣿⣿⣀⣀⣀⣴⣿⣿⠃⠀⠀⢸⣿⣿⠀"); println!("⠀⣿⣿⣤⠀⠀⠀⢸⣿⡟⠀⠀⠀⠀⠀⠉⣽⣿⣿⠟⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⠀⠀⣿⣿⣿⣤⣬⣿⣿⣆⠀⠀⢸⣿⣿⣿⣿⣿⡿⠟⠉⠀⠀⠀⢸⣿⣿⠀"); println!("⠀⠀⠙⣿⣿⣤⠀⢸⣿⡟⠀⠀⠀⣠⣾⣿⡿⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⠀⣾⣿⣿⠿⠿⠿⢿⣿⣿⡀⠀⢸⣿⣿⠙⣿⣿⣿⣄⠀⠀⠀⠀⢸⣿⣿⠀"); println!("⠀⠀⠀⠀⠙⣿⣿⣼⣿⡟⣀⣶⣿⡿⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⣰⣿⣿⠃⠀⠀⠀⠀⣿⣿⣿⠀⢸⣿⣿⠀⠀⠙⣿⣿⣷⣄⠀⠀⢸⣿⣿⠀"); println!("⠀⠀⠀⠀⠀⠀⠙⣿⣿⣿⣿⠛⠀ "); println!("⠀⠀⠀⠀⠀⠀⠀⠀⠙⠁⠀ "); println!("{}", heading); println!(); } /// Prompts the user for a new wallet or to recover an existing wallet. /// Returns the wallet bootmode indicating if it's a new or existing wallet, or if recovery is required. fn boot(cli: &Cli, wallet_config: &WalletConfig) -> Result<WalletBoot, ExitError> { let wallet_exists = wallet_config.db_file.exists(); // forced recovery if cli.recovery { if wallet_exists { return Err(ExitError::new( ExitCode::RecoveryError, format!( "Wallet already exists at {:#?}. Remove it if you really want to run recovery in this directory!", wallet_config.db_file ), )); } return Ok(WalletBoot::Recovery); } if cli.seed_words.is_some() && !wallet_exists { return Ok(WalletBoot::Recovery); } if wallet_exists { // normal startup of existing wallet Ok(WalletBoot::Existing) } else { // automation/wallet created with --password if cli.password.is_some() || wallet_config.password.is_some() { return Ok(WalletBoot::New); } // In non-interactive mode, we never prompt. Otherwise, it's not very non-interactive, now is it? if cli.non_interactive_mode { let msg = "Wallet does not exist and no password was given to create one. Since we're in non-interactive \ mode, we need to quit here. Try setting the MINOTARI_WALLET__PASSWORD envar, or setting \ --password on the command line"; return Err(ExitError::new(ExitCode::WalletError, msg)); } // prompt for new or recovery let mut rl = Editor::<()>::new(); loop { println!("1. Create a new wallet."); println!("2. Recover wallet from seed words."); let readline = rl.readline(">> "); match readline { Ok(line) => { match line.as_ref() { "1" | "c" | "n" | "create" => { // new wallet return Ok(WalletBoot::New); }, "2" | "r" | "s" | "recover" => { // recover wallet return Ok(WalletBoot::Recovery); }, _ => continue, } }, Err(e) => { return Err(ExitError::new(ExitCode::IOError, e)); }, } } } } pub(crate) fn boot_with_password( cli: &Cli, wallet_config: &WalletConfig, ) -> Result<(WalletBoot, SafePassword), ExitError> { let boot_mode = boot(cli, wallet_config)?; if cli.password.is_some() { return Ok((boot_mode, cli.password.clone().unwrap())); } if wallet_config.password.is_some() { return Ok((boot_mode, wallet_config.password.clone().unwrap())); } let password = match boot_mode { WalletBoot::New => { // Get a new passphrase debug!(target: LOG_TARGET, "Prompting for passphrase."); get_new_passphrase("Create wallet passphrase: ", "Confirm wallet passphrase: ")? }, WalletBoot::Existing | WalletBoot::Recovery => { debug!(target: LOG_TARGET, "Prompting for passphrase."); prompt_password("Enter wallet passphrase: ")? }, }; Ok((boot_mode, password)) } #[cfg(test)] mod test { use tari_utilities::SafePassword; use super::get_password_feedback; #[test] fn weak_password() { let weak_password = SafePassword::from("weak"); assert!(get_password_feedback(&weak_password).is_some()); } #[test] fn strong_password() { let strong_password = SafePassword::from("This is a reasonably strong password!"); assert!(get_password_feedback(&strong_password).is_none()); } }
tari_splash_screen
identifier_name
mod.rs
// Copyright 2020. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #![allow(dead_code, unused)] use std::{fs, path::PathBuf, str::FromStr, sync::Arc}; use log::*; use minotari_app_utilities::identity_management::setup_node_identity; use minotari_wallet::{ error::{WalletError, WalletStorageError}, output_manager_service::storage::database::OutputManagerDatabase, storage::{ database::{WalletBackend, WalletDatabase}, sqlite_utilities::initialize_sqlite_database_backends, }, wallet::{derive_comms_secret_key, read_or_create_master_seed}, Wallet, WalletConfig, WalletSqlite, }; use rpassword::prompt_password_stdout; use rustyline::Editor; use tari_common::{ configuration::{ bootstrap::{grpc_default_port, prompt, ApplicationType}, MultiaddrList, Network, }, exit_codes::{ExitCode, ExitError}, }; use tari_comms::{ multiaddr::Multiaddr, peer_manager::{Peer, PeerFeatures}, types::CommsPublicKey, NodeIdentity, }; use tari_core::{consensus::ConsensusManager, transactions::CryptoFactories}; use tari_crypto::keys::PublicKey; use tari_key_manager::{cipher_seed::CipherSeed, mnemonic::MnemonicLanguage}; use tari_p2p::{peer_seeds::SeedPeer, TransportType}; use tari_shutdown::ShutdownSignal; use tari_utilities::{hex::Hex, ByteArray, SafePassword}; use zxcvbn::zxcvbn; use crate::{ cli::Cli, utils::db::{get_custom_base_node_peer_from_db, set_custom_base_node_peer_in_db}, wallet_modes::{PeerConfig, WalletMode}, ApplicationConfig, }; pub const LOG_TARGET: &str = "wallet::console_wallet::init"; const TARI_WALLET_PASSWORD: &str = "MINOTARI_WALLET_PASSWORD"; // Maxmimum number of times we prompt for confirmation of a new passphrase, to avoid driving the user insane with an // infinite loop const PASSPHRASE_SANITY_LIMIT: u8 = 3; #[derive(Clone, Copy)] pub enum WalletBoot { New, Existing, Recovery, } /// Get and confirm a passphrase from the user, with feedback /// This is intended to be used for new or changed passphrases /// /// You must provide the initial and confirmation prompts to pass to the user /// /// We do several things: /// - Prompt the user for a passphrase /// - Have the user confirm the passphrase /// - Score the passphrase /// - If the passphrase is weak (or empty), give feedback and ask the user what to do: /// - Proceed with the weak (or empty) passphrase /// - Choose a better passphrase /// - Cancel the operation /// /// If the passphrase and confirmation don't match, or if the user cancels, returns an error /// Otherwise, returns the passphrase as a `SafePassword` fn get_new_passphrase(prompt: &str, confirm: &str) -> Result<SafePassword, ExitError> { // We may need to prompt for a passphrase multiple times loop { // Prompt the user for a passphrase and confirm it, up to the defined limit // This ensures an unlucky user doesn't get stuck let mut tries = 0; let mut passphrase = SafePassword::from(""); // initial value for scope loop { passphrase = prompt_password(prompt)?; let confirmed = prompt_password(confirm)?; // If they match, continue the process if passphrase.reveal() == confirmed.reveal() { break; } // If they don't match, keep prompting until we hit the sanity limit tries += 1; if tries == PASSPHRASE_SANITY_LIMIT { return Err(ExitError::new(ExitCode::InputError, "Passphrases don't match!")); } println!("Passphrases don't match! Try again."); } // Score the passphrase and provide feedback let weak = display_password_feedback(&passphrase); // If the passphrase is weak, see if the user wishes to change it if weak { println!("Would you like to choose a different passphrase?"); println!(" y/Y: Yes, choose a different passphrase"); println!(" n/N: No, use this passphrase"); println!(" Enter anything else if you changed your mind and want to cancel"); let mut input = "".to_string(); std::io::stdin().read_line(&mut input); match input.trim().to_lowercase().as_str() { // Choose a different passphrase "y" => { continue; }, // Use this passphrase "n" => { return Ok(passphrase); }, // By default, we cancel to be safe _ => { return Err(ExitError::new( ExitCode::InputError, "Canceling with unchanged passphrase!", )); }, } } else { // The passphrase is fine, so return it return Ok(passphrase); } } } /// Get feedback, if available, for a weak passphrase fn get_password_feedback(passphrase: &SafePassword) -> Option<Vec<String>> { std::str::from_utf8(passphrase.reveal()) .ok() .and_then(|passphrase| zxcvbn(passphrase, &[]).ok()) .and_then(|scored| scored.feedback().to_owned()) .map(|feedback| feedback.suggestions().to_owned()) .map(|suggestion| suggestion.into_iter().map(|item| item.to_string()).collect()) } /// Display passphrase feedback to the user /// /// Returns `true` if and only if the passphrase is weak fn display_password_feedback(passphrase: &SafePassword) -> bool { if passphrase.reveal().is_empty() { // The passphrase is empty, which the scoring library doesn't handle println!(); println!("An empty password puts your wallet at risk against an attacker with access to this device."); println!("Use this only if you are sure that your device is safe from prying eyes!"); println!(); true } else if let Some(feedback) = get_password_feedback(passphrase) { // The scoring library provided feedback println!(); println!( "The password you chose is weak; a determined attacker with access to your device may be able to guess it." ); println!("You may want to consider changing it to a stronger one."); println!("Here are some suggestions:"); for suggestion in feedback { println!("- {}", suggestion); } println!(); true } else { // The Force is strong with this one false } } /// Gets the password provided by command line argument or environment variable if available. /// Otherwise prompts for the password to be typed in. pub fn get_or_prompt_password( arg_password: Option<SafePassword>, config_password: Option<SafePassword>, ) -> Result<SafePassword, ExitError> { if let Some(passphrase) = arg_password { return Ok(passphrase); } let env = std::env::var_os(TARI_WALLET_PASSWORD); if let Some(p) = env { let env_password = p .into_string() .map_err(|_| ExitError::new(ExitCode::IOError, "Failed to convert OsString into String"))?; return Ok(env_password.into()); } if let Some(passphrase) = config_password { return Ok(passphrase); } let password = prompt_password("Wallet password: ")?; Ok(password) } fn prompt_password(prompt: &str) -> Result<SafePassword, ExitError> { let password = prompt_password_stdout(prompt).map_err(|e| ExitError::new(ExitCode::IOError, e))?; Ok(SafePassword::from(password)) } /// Allows the user to change the password of the wallet. pub async fn change_password( config: &ApplicationConfig, existing: SafePassword, shutdown_signal: ShutdownSignal, non_interactive_mode: bool, ) -> Result<(), ExitError> { let mut wallet = init_wallet( config, existing.clone(), None, None, shutdown_signal, non_interactive_mode, ) .await?; // Get a new passphrase let new = get_new_passphrase("New wallet passphrase: ", "Confirm new passphrase: ")?; // Use the existing and new passphrases to attempt to change the wallet passphrase wallet.db.change_passphrase(&existing, &new).map_err(|e| match e { WalletStorageError::InvalidPassphrase => { ExitError::new(ExitCode::IncorrectOrEmptyPassword, "Your password was not changed.") }, _ => ExitError::new(ExitCode::DatabaseError, "Your password was not changed."), }) } /// Populates the PeerConfig struct from: /// 1. The custom peer in the wallet config if it exists /// 2. The custom peer in the wallet db if it exists /// 3. The detected local base node if any /// 4. The service peers defined in config they exist /// 5. The peer seeds defined in config pub async fn get_base_node_peer_config( config: &ApplicationConfig, wallet: &mut WalletSqlite, non_interactive_mode: bool, ) -> Result<PeerConfig, ExitError> { let mut use_custom_base_node_peer = false; let mut selected_base_node = match config.wallet.custom_base_node { Some(ref custom) => SeedPeer::from_str(custom) .map(|node| Some(Peer::from(node))) .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed custom base node: {}", err)))?, None => { if let Some(custom_base_node_peer) = get_custom_base_node_peer_from_db(wallet) { use_custom_base_node_peer = true; Some(custom_base_node_peer) } else { None } }, }; // If the user has not explicitly set a base node in the config, we try detect one if !non_interactive_mode && config.wallet.custom_base_node.is_none() && !use_custom_base_node_peer { if let Some(detected_node) = detect_local_base_node(config.wallet.network).await { match selected_base_node { Some(ref base_node) if base_node.public_key == detected_node.public_key => { // Skip asking because it's already set }, Some(_) | None => { println!( "Local Base Node detected with public key {} and address {}", detected_node.public_key, detected_node .addresses .iter() .map(ToString::to_string) .collect::<Vec<_>>() .join(", ") ); if prompt( "Would you like to use this base node? IF YOU DID NOT START THIS BASE NODE YOU SHOULD SELECT \ NO (Y/n)", ) { let address = detected_node.addresses.first().ok_or_else(|| { ExitError::new(ExitCode::ConfigError, "No address found for detected base node") })?; set_custom_base_node_peer_in_db(wallet, &detected_node.public_key, address)?; selected_base_node = Some(detected_node.into()); } }, } } } // config let base_node_peers = config .wallet .base_node_service_peers .iter() .map(|s| SeedPeer::from_str(s)) .map(|r| r.map(Peer::from)) .collect::<Result<Vec<_>, _>>() .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed base node peer: {}", err)))?; // peer seeds let peer_seeds = config .peer_seeds .peer_seeds .iter() .map(|s| SeedPeer::from_str(s)) .map(|r| r.map(Peer::from)) .collect::<Result<Vec<_>, _>>() .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed seed peer: {}", err)))?; let peer_config = PeerConfig::new(selected_base_node, base_node_peers, peer_seeds); debug!(target: LOG_TARGET, "base node peer config: {:?}", peer_config); Ok(peer_config) } /// Determines which mode the wallet should run in. pub(crate) fn wallet_mode(cli: &Cli, boot_mode: WalletBoot) -> WalletMode { // Recovery mode if matches!(boot_mode, WalletBoot::Recovery) { if cli.non_interactive_mode { return WalletMode::RecoveryDaemon; } else { return WalletMode::RecoveryTui; } } match (cli.non_interactive_mode, cli.input_file.clone(), cli.command2.clone()) { // TUI mode (false, None, None) => WalletMode::Tui, // GRPC mode (true, None, None) => WalletMode::Grpc, // Script mode (_, Some(path), None) => WalletMode::Script(path), // Command mode (_, None, Some(command)) => WalletMode::Command(Box::new(command)), // WalletMode::Command(command), // Invalid combinations _ => WalletMode::Invalid, } } /// Set up the app environment and state for use by the UI #[allow(clippy::too_many_lines)] pub async fn init_wallet( config: &ApplicationConfig, arg_password: SafePassword, seed_words_file_name: Option<PathBuf>, recovery_seed: Option<CipherSeed>, shutdown_signal: ShutdownSignal, non_interactive_mode: bool, ) -> Result<WalletSqlite, ExitError> { fs::create_dir_all( config .wallet .db_file .parent() .expect("console_wallet_db_file cannot be set to a root directory"), ) .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating Wallet folder. {}", e)))?; fs::create_dir_all(&config.wallet.p2p.datastore_path) .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating peer db folder. {}", e)))?; debug!(target: LOG_TARGET, "Running Wallet database migrations"); let db_path = &config.wallet.db_file; // wallet should be encrypted from the beginning, so we must require a password to be provided by the user let (wallet_backend, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend) = initialize_sqlite_database_backends(db_path, arg_password, config.wallet.db_connection_pool_size)?; let wallet_db = WalletDatabase::new(wallet_backend); let output_db = OutputManagerDatabase::new(output_manager_backend.clone()); debug!(target: LOG_TARGET, "Databases Initialized. Wallet is encrypted.",); let node_addresses = if config.wallet.p2p.public_addresses.is_empty() { match wallet_db.get_node_address()? { Some(addr) => MultiaddrList::from(vec![addr]), None => MultiaddrList::default(), } } else { config.wallet.p2p.public_addresses.clone() }; let master_seed = read_or_create_master_seed(recovery_seed.clone(), &wallet_db)?; let node_identity = match config.wallet.identity_file.as_ref() { Some(identity_file) => { warn!( target: LOG_TARGET, "Node identity overridden by file {}", identity_file.to_string_lossy() ); setup_node_identity( identity_file, node_addresses.to_vec(), true, PeerFeatures::COMMUNICATION_CLIENT, )? }, None => setup_identity_from_db(&wallet_db, &master_seed, node_addresses.to_vec())?, }; let mut wallet_config = config.wallet.clone(); if let TransportType::Tor = config.wallet.p2p.transport.transport_type { wallet_config.p2p.transport.tor.identity = wallet_db.get_tor_id()?; } let consensus_manager = ConsensusManager::builder(config.wallet.network) .build() .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error consensus manager. {}", e)))?; let factories = CryptoFactories::default(); let mut wallet = Wallet::start( wallet_config, config.peer_seeds.clone(), config.auto_update.clone(), node_identity, consensus_manager, factories, wallet_db, output_db, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend, shutdown_signal, master_seed, ) .await .map_err(|e| match e { WalletError::CommsInitializationError(cie) => cie.to_exit_error(), e => ExitError::new(ExitCode::WalletError, format!("Error creating Wallet Container: {}", e)), })?; if let Some(hs) = wallet.comms.hidden_service() { wallet .db .set_tor_identity(hs.tor_identity().clone()) .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Problem writing tor identity. {}", e)))?; } if let Some(file_name) = seed_words_file_name { let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?.join(" "); let _result = fs::write(file_name, seed_words.reveal()).map_err(|e| { ExitError::new( ExitCode::WalletError, format!("Problem writing seed words to file: {}", e), ) }); }; Ok(wallet) } async fn detect_local_base_node(network: Network) -> Option<SeedPeer> { use minotari_app_grpc::tari_rpc::{base_node_client::BaseNodeClient, Empty}; let addr = format!( "http://127.0.0.1:{}", grpc_default_port(ApplicationType::BaseNode, network) ); debug!(target: LOG_TARGET, "Checking for local base node at {}", addr); let mut node_conn = match BaseNodeClient::connect(addr).await.ok() { Some(conn) => conn, None => { debug!(target: LOG_TARGET, "No local base node detected"); return None; }, }; let resp = node_conn.identify(Empty {}).await.ok()?; let identity = resp.get_ref(); let public_key = CommsPublicKey::from_bytes(&identity.public_key).ok()?; let addresses = identity .public_addresses .iter() .filter_map(|s| Multiaddr::from_str(s).ok()) .collect::<Vec<_>>(); debug!( target: LOG_TARGET, "Local base node found with pk={} and addresses={}", public_key.to_hex(), addresses.iter().map(|a| a.to_string()).collect::<Vec<_>>().join(",") ); Some(SeedPeer::new(public_key, addresses)) } fn setup_identity_from_db<D: WalletBackend + 'static>( wallet_db: &WalletDatabase<D>, master_seed: &CipherSeed, node_addresses: Vec<Multiaddr>, ) -> Result<Arc<NodeIdentity>, ExitError> { let node_features = wallet_db .get_node_features()? .unwrap_or(PeerFeatures::COMMUNICATION_CLIENT); let identity_sig = wallet_db.get_comms_identity_signature()?; let comms_secret_key = derive_comms_secret_key(master_seed)?; // This checks if anything has changed by validating the previous signature and if invalid, setting identity_sig // to None let identity_sig = identity_sig.filter(|sig| { let comms_public_key = CommsPublicKey::from_secret_key(&comms_secret_key); sig.is_valid(&comms_public_key, node_features, &node_addresses) }); // SAFETY: we are manually checking the validity of this signature before adding Some(..) let node_identity = Arc::new(NodeIdentity::with_signature_unchecked( comms_secret_key, node_addresses, node_features, identity_sig, )); if !node_identity.is_signed() { node_identity.sign(); // unreachable panic: signed above let sig = node_identity .identity_signature_read() .as_ref() .expect("unreachable panic") .clone(); wallet_db.set_comms_identity_signature(sig)?; } Ok(node_identity) } /// Starts the wallet by setting the base node peer, and restarting the transaction and broadcast protocols. pub async fn start_wallet( wallet: &mut WalletSqlite, base_node: &Peer, wallet_mode: &WalletMode, ) -> Result<(), ExitError>
async fn validate_txos(wallet: &mut WalletSqlite) -> Result<(), ExitError> { debug!(target: LOG_TARGET, "Starting TXO validations."); wallet.output_manager_service.validate_txos().await.map_err(|e| { error!(target: LOG_TARGET, "Error validating Unspent TXOs: {}", e); ExitError::new(ExitCode::WalletError, e) })?; debug!(target: LOG_TARGET, "TXO validations started."); Ok(()) } pub(crate) fn confirm_seed_words(wallet: &mut WalletSqlite) -> Result<(), ExitError> { let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?; println!(); println!("========================="); println!(" IMPORTANT! "); println!("========================="); println!("These are your wallet seed words."); println!("They can be used to recover your wallet and funds."); println!("WRITE THEM DOWN OR COPY THEM NOW. THIS IS YOUR ONLY CHANCE TO DO SO."); println!(); println!("========================="); println!("{}", seed_words.join(" ").reveal()); println!("========================="); println!("\x07"); // beep! let mut rl = Editor::<()>::new(); loop { println!("I confirm that I will never see these seed words again."); println!(r#"Type the word "confirm" to continue."#); let readline = rl.readline(">> "); match readline { Ok(line) => match line.to_lowercase().as_ref() { "confirm" => return Ok(()), _ => continue, }, Err(e) => { return Err(ExitError::new(ExitCode::IOError, e)); }, } } } /// Clear the terminal and print the Tari splash pub fn tari_splash_screen(heading: &str) { // clear the terminal print!("{esc}[2J{esc}[1;1H", esc = 27 as char); println!("⠀⠀⠀⠀⠀⣠⣶⣿⣿⣿⣿⣶⣦⣀ "); println!("⠀⢀⣤⣾⣿⡿⠋⠀⠀⠀⠀⠉⠛⠿⣿⣿⣶⣤⣀⠀⠀⠀⠀⠀⠀⢰⣿⣾⣾⣾⣾⣾⣾⣾⣾⣾⣿⠀⠀⠀⣾⣾⣾⡀⠀⠀⠀⠀⢰⣾⣾⣾⣾⣿⣶⣶⡀⠀⠀⠀⢸⣾⣿⠀"); println!("⠀⣿⣿⣿⣿⣿⣶⣶⣤⣄⡀⠀⠀⠀⠀⠀⠉⠛⣿⣿⠀⠀⠀⠀⠀⠈⠉⠉⠉⠉⣿⣿⡏⠉⠉⠉⠉⠀⠀⣰⣿⣿⣿⣿⠀⠀⠀⠀⢸⣿⣿⠉⠉⠉⠛⣿⣿⡆⠀⠀⢸⣿⣿⠀"); println!("⠀⣿⣿⠀⠀⠀⠈⠙⣿⡿⠿⣿⣿⣿⣶⣶⣤⣤⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⠀⠀⢠⣿⣿⠃⣿⣿⣷⠀⠀⠀⢸⣿⣿⣀⣀⣀⣴⣿⣿⠃⠀⠀⢸⣿⣿⠀"); println!("⠀⣿⣿⣤⠀⠀⠀⢸⣿⡟⠀⠀⠀⠀⠀⠉⣽⣿⣿⠟⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⠀⠀⣿⣿⣿⣤⣬⣿⣿⣆⠀⠀⢸⣿⣿⣿⣿⣿⡿⠟⠉⠀⠀⠀⢸⣿⣿⠀"); println!("⠀⠀⠙⣿⣿⣤⠀⢸⣿⡟⠀⠀⠀⣠⣾⣿⡿⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⠀⣾⣿⣿⠿⠿⠿⢿⣿⣿⡀⠀⢸⣿⣿⠙⣿⣿⣿⣄⠀⠀⠀⠀⢸⣿⣿⠀"); println!("⠀⠀⠀⠀⠙⣿⣿⣼⣿⡟⣀⣶⣿⡿⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⣰⣿⣿⠃⠀⠀⠀⠀⣿⣿⣿⠀⢸⣿⣿⠀⠀⠙⣿⣿⣷⣄⠀⠀⢸⣿⣿⠀"); println!("⠀⠀⠀⠀⠀⠀⠙⣿⣿⣿⣿⠛⠀ "); println!("⠀⠀⠀⠀⠀⠀⠀⠀⠙⠁⠀ "); println!("{}", heading); println!(); } /// Prompts the user for a new wallet or to recover an existing wallet. /// Returns the wallet bootmode indicating if it's a new or existing wallet, or if recovery is required. fn boot(cli: &Cli, wallet_config: &WalletConfig) -> Result<WalletBoot, ExitError> { let wallet_exists = wallet_config.db_file.exists(); // forced recovery if cli.recovery { if wallet_exists { return Err(ExitError::new( ExitCode::RecoveryError, format!( "Wallet already exists at {:#?}. Remove it if you really want to run recovery in this directory!", wallet_config.db_file ), )); } return Ok(WalletBoot::Recovery); } if cli.seed_words.is_some() && !wallet_exists { return Ok(WalletBoot::Recovery); } if wallet_exists { // normal startup of existing wallet Ok(WalletBoot::Existing) } else { // automation/wallet created with --password if cli.password.is_some() || wallet_config.password.is_some() { return Ok(WalletBoot::New); } // In non-interactive mode, we never prompt. Otherwise, it's not very non-interactive, now is it? if cli.non_interactive_mode { let msg = "Wallet does not exist and no password was given to create one. Since we're in non-interactive \ mode, we need to quit here. Try setting the MINOTARI_WALLET__PASSWORD envar, or setting \ --password on the command line"; return Err(ExitError::new(ExitCode::WalletError, msg)); } // prompt for new or recovery let mut rl = Editor::<()>::new(); loop { println!("1. Create a new wallet."); println!("2. Recover wallet from seed words."); let readline = rl.readline(">> "); match readline { Ok(line) => { match line.as_ref() { "1" | "c" | "n" | "create" => { // new wallet return Ok(WalletBoot::New); }, "2" | "r" | "s" | "recover" => { // recover wallet return Ok(WalletBoot::Recovery); }, _ => continue, } }, Err(e) => { return Err(ExitError::new(ExitCode::IOError, e)); }, } } } } pub(crate) fn boot_with_password( cli: &Cli, wallet_config: &WalletConfig, ) -> Result<(WalletBoot, SafePassword), ExitError> { let boot_mode = boot(cli, wallet_config)?; if cli.password.is_some() { return Ok((boot_mode, cli.password.clone().unwrap())); } if wallet_config.password.is_some() { return Ok((boot_mode, wallet_config.password.clone().unwrap())); } let password = match boot_mode { WalletBoot::New => { // Get a new passphrase debug!(target: LOG_TARGET, "Prompting for passphrase."); get_new_passphrase("Create wallet passphrase: ", "Confirm wallet passphrase: ")? }, WalletBoot::Existing | WalletBoot::Recovery => { debug!(target: LOG_TARGET, "Prompting for passphrase."); prompt_password("Enter wallet passphrase: ")? }, }; Ok((boot_mode, password)) } #[cfg(test)] mod test { use tari_utilities::SafePassword; use super::get_password_feedback; #[test] fn weak_password() { let weak_password = SafePassword::from("weak"); assert!(get_password_feedback(&weak_password).is_some()); } #[test] fn strong_password() { let strong_password = SafePassword::from("This is a reasonably strong password!"); assert!(get_password_feedback(&strong_password).is_none()); } }
{ debug!(target: LOG_TARGET, "Setting base node peer"); let net_address = base_node .addresses .best() .ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node has no address!"))?; wallet .set_base_node_peer(base_node.public_key.clone(), net_address.address().clone()) .await .map_err(|e| { ExitError::new( ExitCode::WalletError, format!("Error setting wallet base node peer. {}", e), ) })?; // Restart transaction protocols if not running in script or command modes if !matches!(wallet_mode, WalletMode::Command(_)) && !matches!(wallet_mode, WalletMode::Script(_)) { // NOTE: https://github.com/tari-project/tari/issues/5227 debug!("revalidating all transactions"); if let Err(e) = wallet.transaction_service.revalidate_all_transactions().await { error!(target: LOG_TARGET, "Failed to revalidate all transactions: {}", e); } debug!("restarting transaction protocols"); if let Err(e) = wallet.transaction_service.restart_transaction_protocols().await { error!(target: LOG_TARGET, "Problem restarting transaction protocols: {}", e); } debug!("validating transactions"); if let Err(e) = wallet.transaction_service.validate_transactions().await { error!( target: LOG_TARGET, "Problem validating and restarting transaction protocols: {}", e ); } // validate transaction outputs validate_txos(wallet).await?; } Ok(()) }
identifier_body
mod.rs
// Copyright 2020. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #![allow(dead_code, unused)] use std::{fs, path::PathBuf, str::FromStr, sync::Arc}; use log::*; use minotari_app_utilities::identity_management::setup_node_identity; use minotari_wallet::{ error::{WalletError, WalletStorageError}, output_manager_service::storage::database::OutputManagerDatabase, storage::{ database::{WalletBackend, WalletDatabase}, sqlite_utilities::initialize_sqlite_database_backends, }, wallet::{derive_comms_secret_key, read_or_create_master_seed}, Wallet, WalletConfig, WalletSqlite, }; use rpassword::prompt_password_stdout; use rustyline::Editor; use tari_common::{ configuration::{ bootstrap::{grpc_default_port, prompt, ApplicationType}, MultiaddrList, Network, }, exit_codes::{ExitCode, ExitError}, }; use tari_comms::{ multiaddr::Multiaddr, peer_manager::{Peer, PeerFeatures}, types::CommsPublicKey, NodeIdentity, }; use tari_core::{consensus::ConsensusManager, transactions::CryptoFactories}; use tari_crypto::keys::PublicKey; use tari_key_manager::{cipher_seed::CipherSeed, mnemonic::MnemonicLanguage}; use tari_p2p::{peer_seeds::SeedPeer, TransportType}; use tari_shutdown::ShutdownSignal; use tari_utilities::{hex::Hex, ByteArray, SafePassword}; use zxcvbn::zxcvbn; use crate::{ cli::Cli, utils::db::{get_custom_base_node_peer_from_db, set_custom_base_node_peer_in_db}, wallet_modes::{PeerConfig, WalletMode}, ApplicationConfig, }; pub const LOG_TARGET: &str = "wallet::console_wallet::init"; const TARI_WALLET_PASSWORD: &str = "MINOTARI_WALLET_PASSWORD"; // Maxmimum number of times we prompt for confirmation of a new passphrase, to avoid driving the user insane with an // infinite loop const PASSPHRASE_SANITY_LIMIT: u8 = 3; #[derive(Clone, Copy)] pub enum WalletBoot { New, Existing, Recovery, } /// Get and confirm a passphrase from the user, with feedback /// This is intended to be used for new or changed passphrases /// /// You must provide the initial and confirmation prompts to pass to the user /// /// We do several things: /// - Prompt the user for a passphrase /// - Have the user confirm the passphrase /// - Score the passphrase /// - If the passphrase is weak (or empty), give feedback and ask the user what to do: /// - Proceed with the weak (or empty) passphrase /// - Choose a better passphrase /// - Cancel the operation /// /// If the passphrase and confirmation don't match, or if the user cancels, returns an error /// Otherwise, returns the passphrase as a `SafePassword` fn get_new_passphrase(prompt: &str, confirm: &str) -> Result<SafePassword, ExitError> { // We may need to prompt for a passphrase multiple times loop { // Prompt the user for a passphrase and confirm it, up to the defined limit // This ensures an unlucky user doesn't get stuck let mut tries = 0; let mut passphrase = SafePassword::from(""); // initial value for scope loop { passphrase = prompt_password(prompt)?; let confirmed = prompt_password(confirm)?; // If they match, continue the process if passphrase.reveal() == confirmed.reveal() { break; } // If they don't match, keep prompting until we hit the sanity limit tries += 1; if tries == PASSPHRASE_SANITY_LIMIT { return Err(ExitError::new(ExitCode::InputError, "Passphrases don't match!")); } println!("Passphrases don't match! Try again."); } // Score the passphrase and provide feedback let weak = display_password_feedback(&passphrase); // If the passphrase is weak, see if the user wishes to change it if weak { println!("Would you like to choose a different passphrase?"); println!(" y/Y: Yes, choose a different passphrase"); println!(" n/N: No, use this passphrase"); println!(" Enter anything else if you changed your mind and want to cancel"); let mut input = "".to_string(); std::io::stdin().read_line(&mut input); match input.trim().to_lowercase().as_str() { // Choose a different passphrase "y" => { continue; }, // Use this passphrase "n" => { return Ok(passphrase); }, // By default, we cancel to be safe _ => { return Err(ExitError::new( ExitCode::InputError, "Canceling with unchanged passphrase!", )); }, } } else { // The passphrase is fine, so return it return Ok(passphrase); } } } /// Get feedback, if available, for a weak passphrase fn get_password_feedback(passphrase: &SafePassword) -> Option<Vec<String>> { std::str::from_utf8(passphrase.reveal()) .ok() .and_then(|passphrase| zxcvbn(passphrase, &[]).ok()) .and_then(|scored| scored.feedback().to_owned()) .map(|feedback| feedback.suggestions().to_owned()) .map(|suggestion| suggestion.into_iter().map(|item| item.to_string()).collect()) } /// Display passphrase feedback to the user /// /// Returns `true` if and only if the passphrase is weak fn display_password_feedback(passphrase: &SafePassword) -> bool { if passphrase.reveal().is_empty() { // The passphrase is empty, which the scoring library doesn't handle println!(); println!("An empty password puts your wallet at risk against an attacker with access to this device."); println!("Use this only if you are sure that your device is safe from prying eyes!"); println!(); true } else if let Some(feedback) = get_password_feedback(passphrase) { // The scoring library provided feedback println!(); println!( "The password you chose is weak; a determined attacker with access to your device may be able to guess it." ); println!("You may want to consider changing it to a stronger one."); println!("Here are some suggestions:"); for suggestion in feedback { println!("- {}", suggestion); } println!(); true } else { // The Force is strong with this one false } } /// Gets the password provided by command line argument or environment variable if available. /// Otherwise prompts for the password to be typed in. pub fn get_or_prompt_password( arg_password: Option<SafePassword>, config_password: Option<SafePassword>, ) -> Result<SafePassword, ExitError> { if let Some(passphrase) = arg_password { return Ok(passphrase); } let env = std::env::var_os(TARI_WALLET_PASSWORD); if let Some(p) = env { let env_password = p .into_string() .map_err(|_| ExitError::new(ExitCode::IOError, "Failed to convert OsString into String"))?; return Ok(env_password.into()); } if let Some(passphrase) = config_password { return Ok(passphrase); } let password = prompt_password("Wallet password: ")?; Ok(password) } fn prompt_password(prompt: &str) -> Result<SafePassword, ExitError> { let password = prompt_password_stdout(prompt).map_err(|e| ExitError::new(ExitCode::IOError, e))?; Ok(SafePassword::from(password)) } /// Allows the user to change the password of the wallet. pub async fn change_password( config: &ApplicationConfig, existing: SafePassword, shutdown_signal: ShutdownSignal, non_interactive_mode: bool, ) -> Result<(), ExitError> { let mut wallet = init_wallet( config, existing.clone(), None, None, shutdown_signal, non_interactive_mode, ) .await?; // Get a new passphrase let new = get_new_passphrase("New wallet passphrase: ", "Confirm new passphrase: ")?; // Use the existing and new passphrases to attempt to change the wallet passphrase wallet.db.change_passphrase(&existing, &new).map_err(|e| match e { WalletStorageError::InvalidPassphrase =>
, _ => ExitError::new(ExitCode::DatabaseError, "Your password was not changed."), }) } /// Populates the PeerConfig struct from: /// 1. The custom peer in the wallet config if it exists /// 2. The custom peer in the wallet db if it exists /// 3. The detected local base node if any /// 4. The service peers defined in config they exist /// 5. The peer seeds defined in config pub async fn get_base_node_peer_config( config: &ApplicationConfig, wallet: &mut WalletSqlite, non_interactive_mode: bool, ) -> Result<PeerConfig, ExitError> { let mut use_custom_base_node_peer = false; let mut selected_base_node = match config.wallet.custom_base_node { Some(ref custom) => SeedPeer::from_str(custom) .map(|node| Some(Peer::from(node))) .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed custom base node: {}", err)))?, None => { if let Some(custom_base_node_peer) = get_custom_base_node_peer_from_db(wallet) { use_custom_base_node_peer = true; Some(custom_base_node_peer) } else { None } }, }; // If the user has not explicitly set a base node in the config, we try detect one if !non_interactive_mode && config.wallet.custom_base_node.is_none() && !use_custom_base_node_peer { if let Some(detected_node) = detect_local_base_node(config.wallet.network).await { match selected_base_node { Some(ref base_node) if base_node.public_key == detected_node.public_key => { // Skip asking because it's already set }, Some(_) | None => { println!( "Local Base Node detected with public key {} and address {}", detected_node.public_key, detected_node .addresses .iter() .map(ToString::to_string) .collect::<Vec<_>>() .join(", ") ); if prompt( "Would you like to use this base node? IF YOU DID NOT START THIS BASE NODE YOU SHOULD SELECT \ NO (Y/n)", ) { let address = detected_node.addresses.first().ok_or_else(|| { ExitError::new(ExitCode::ConfigError, "No address found for detected base node") })?; set_custom_base_node_peer_in_db(wallet, &detected_node.public_key, address)?; selected_base_node = Some(detected_node.into()); } }, } } } // config let base_node_peers = config .wallet .base_node_service_peers .iter() .map(|s| SeedPeer::from_str(s)) .map(|r| r.map(Peer::from)) .collect::<Result<Vec<_>, _>>() .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed base node peer: {}", err)))?; // peer seeds let peer_seeds = config .peer_seeds .peer_seeds .iter() .map(|s| SeedPeer::from_str(s)) .map(|r| r.map(Peer::from)) .collect::<Result<Vec<_>, _>>() .map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed seed peer: {}", err)))?; let peer_config = PeerConfig::new(selected_base_node, base_node_peers, peer_seeds); debug!(target: LOG_TARGET, "base node peer config: {:?}", peer_config); Ok(peer_config) } /// Determines which mode the wallet should run in. pub(crate) fn wallet_mode(cli: &Cli, boot_mode: WalletBoot) -> WalletMode { // Recovery mode if matches!(boot_mode, WalletBoot::Recovery) { if cli.non_interactive_mode { return WalletMode::RecoveryDaemon; } else { return WalletMode::RecoveryTui; } } match (cli.non_interactive_mode, cli.input_file.clone(), cli.command2.clone()) { // TUI mode (false, None, None) => WalletMode::Tui, // GRPC mode (true, None, None) => WalletMode::Grpc, // Script mode (_, Some(path), None) => WalletMode::Script(path), // Command mode (_, None, Some(command)) => WalletMode::Command(Box::new(command)), // WalletMode::Command(command), // Invalid combinations _ => WalletMode::Invalid, } } /// Set up the app environment and state for use by the UI #[allow(clippy::too_many_lines)] pub async fn init_wallet( config: &ApplicationConfig, arg_password: SafePassword, seed_words_file_name: Option<PathBuf>, recovery_seed: Option<CipherSeed>, shutdown_signal: ShutdownSignal, non_interactive_mode: bool, ) -> Result<WalletSqlite, ExitError> { fs::create_dir_all( config .wallet .db_file .parent() .expect("console_wallet_db_file cannot be set to a root directory"), ) .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating Wallet folder. {}", e)))?; fs::create_dir_all(&config.wallet.p2p.datastore_path) .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating peer db folder. {}", e)))?; debug!(target: LOG_TARGET, "Running Wallet database migrations"); let db_path = &config.wallet.db_file; // wallet should be encrypted from the beginning, so we must require a password to be provided by the user let (wallet_backend, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend) = initialize_sqlite_database_backends(db_path, arg_password, config.wallet.db_connection_pool_size)?; let wallet_db = WalletDatabase::new(wallet_backend); let output_db = OutputManagerDatabase::new(output_manager_backend.clone()); debug!(target: LOG_TARGET, "Databases Initialized. Wallet is encrypted.",); let node_addresses = if config.wallet.p2p.public_addresses.is_empty() { match wallet_db.get_node_address()? { Some(addr) => MultiaddrList::from(vec![addr]), None => MultiaddrList::default(), } } else { config.wallet.p2p.public_addresses.clone() }; let master_seed = read_or_create_master_seed(recovery_seed.clone(), &wallet_db)?; let node_identity = match config.wallet.identity_file.as_ref() { Some(identity_file) => { warn!( target: LOG_TARGET, "Node identity overridden by file {}", identity_file.to_string_lossy() ); setup_node_identity( identity_file, node_addresses.to_vec(), true, PeerFeatures::COMMUNICATION_CLIENT, )? }, None => setup_identity_from_db(&wallet_db, &master_seed, node_addresses.to_vec())?, }; let mut wallet_config = config.wallet.clone(); if let TransportType::Tor = config.wallet.p2p.transport.transport_type { wallet_config.p2p.transport.tor.identity = wallet_db.get_tor_id()?; } let consensus_manager = ConsensusManager::builder(config.wallet.network) .build() .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error consensus manager. {}", e)))?; let factories = CryptoFactories::default(); let mut wallet = Wallet::start( wallet_config, config.peer_seeds.clone(), config.auto_update.clone(), node_identity, consensus_manager, factories, wallet_db, output_db, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend, shutdown_signal, master_seed, ) .await .map_err(|e| match e { WalletError::CommsInitializationError(cie) => cie.to_exit_error(), e => ExitError::new(ExitCode::WalletError, format!("Error creating Wallet Container: {}", e)), })?; if let Some(hs) = wallet.comms.hidden_service() { wallet .db .set_tor_identity(hs.tor_identity().clone()) .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Problem writing tor identity. {}", e)))?; } if let Some(file_name) = seed_words_file_name { let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?.join(" "); let _result = fs::write(file_name, seed_words.reveal()).map_err(|e| { ExitError::new( ExitCode::WalletError, format!("Problem writing seed words to file: {}", e), ) }); }; Ok(wallet) } async fn detect_local_base_node(network: Network) -> Option<SeedPeer> { use minotari_app_grpc::tari_rpc::{base_node_client::BaseNodeClient, Empty}; let addr = format!( "http://127.0.0.1:{}", grpc_default_port(ApplicationType::BaseNode, network) ); debug!(target: LOG_TARGET, "Checking for local base node at {}", addr); let mut node_conn = match BaseNodeClient::connect(addr).await.ok() { Some(conn) => conn, None => { debug!(target: LOG_TARGET, "No local base node detected"); return None; }, }; let resp = node_conn.identify(Empty {}).await.ok()?; let identity = resp.get_ref(); let public_key = CommsPublicKey::from_bytes(&identity.public_key).ok()?; let addresses = identity .public_addresses .iter() .filter_map(|s| Multiaddr::from_str(s).ok()) .collect::<Vec<_>>(); debug!( target: LOG_TARGET, "Local base node found with pk={} and addresses={}", public_key.to_hex(), addresses.iter().map(|a| a.to_string()).collect::<Vec<_>>().join(",") ); Some(SeedPeer::new(public_key, addresses)) } fn setup_identity_from_db<D: WalletBackend + 'static>( wallet_db: &WalletDatabase<D>, master_seed: &CipherSeed, node_addresses: Vec<Multiaddr>, ) -> Result<Arc<NodeIdentity>, ExitError> { let node_features = wallet_db .get_node_features()? .unwrap_or(PeerFeatures::COMMUNICATION_CLIENT); let identity_sig = wallet_db.get_comms_identity_signature()?; let comms_secret_key = derive_comms_secret_key(master_seed)?; // This checks if anything has changed by validating the previous signature and if invalid, setting identity_sig // to None let identity_sig = identity_sig.filter(|sig| { let comms_public_key = CommsPublicKey::from_secret_key(&comms_secret_key); sig.is_valid(&comms_public_key, node_features, &node_addresses) }); // SAFETY: we are manually checking the validity of this signature before adding Some(..) let node_identity = Arc::new(NodeIdentity::with_signature_unchecked( comms_secret_key, node_addresses, node_features, identity_sig, )); if !node_identity.is_signed() { node_identity.sign(); // unreachable panic: signed above let sig = node_identity .identity_signature_read() .as_ref() .expect("unreachable panic") .clone(); wallet_db.set_comms_identity_signature(sig)?; } Ok(node_identity) } /// Starts the wallet by setting the base node peer, and restarting the transaction and broadcast protocols. pub async fn start_wallet( wallet: &mut WalletSqlite, base_node: &Peer, wallet_mode: &WalletMode, ) -> Result<(), ExitError> { debug!(target: LOG_TARGET, "Setting base node peer"); let net_address = base_node .addresses .best() .ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node has no address!"))?; wallet .set_base_node_peer(base_node.public_key.clone(), net_address.address().clone()) .await .map_err(|e| { ExitError::new( ExitCode::WalletError, format!("Error setting wallet base node peer. {}", e), ) })?; // Restart transaction protocols if not running in script or command modes if !matches!(wallet_mode, WalletMode::Command(_)) && !matches!(wallet_mode, WalletMode::Script(_)) { // NOTE: https://github.com/tari-project/tari/issues/5227 debug!("revalidating all transactions"); if let Err(e) = wallet.transaction_service.revalidate_all_transactions().await { error!(target: LOG_TARGET, "Failed to revalidate all transactions: {}", e); } debug!("restarting transaction protocols"); if let Err(e) = wallet.transaction_service.restart_transaction_protocols().await { error!(target: LOG_TARGET, "Problem restarting transaction protocols: {}", e); } debug!("validating transactions"); if let Err(e) = wallet.transaction_service.validate_transactions().await { error!( target: LOG_TARGET, "Problem validating and restarting transaction protocols: {}", e ); } // validate transaction outputs validate_txos(wallet).await?; } Ok(()) } async fn validate_txos(wallet: &mut WalletSqlite) -> Result<(), ExitError> { debug!(target: LOG_TARGET, "Starting TXO validations."); wallet.output_manager_service.validate_txos().await.map_err(|e| { error!(target: LOG_TARGET, "Error validating Unspent TXOs: {}", e); ExitError::new(ExitCode::WalletError, e) })?; debug!(target: LOG_TARGET, "TXO validations started."); Ok(()) } pub(crate) fn confirm_seed_words(wallet: &mut WalletSqlite) -> Result<(), ExitError> { let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?; println!(); println!("========================="); println!(" IMPORTANT! "); println!("========================="); println!("These are your wallet seed words."); println!("They can be used to recover your wallet and funds."); println!("WRITE THEM DOWN OR COPY THEM NOW. THIS IS YOUR ONLY CHANCE TO DO SO."); println!(); println!("========================="); println!("{}", seed_words.join(" ").reveal()); println!("========================="); println!("\x07"); // beep! let mut rl = Editor::<()>::new(); loop { println!("I confirm that I will never see these seed words again."); println!(r#"Type the word "confirm" to continue."#); let readline = rl.readline(">> "); match readline { Ok(line) => match line.to_lowercase().as_ref() { "confirm" => return Ok(()), _ => continue, }, Err(e) => { return Err(ExitError::new(ExitCode::IOError, e)); }, } } } /// Clear the terminal and print the Tari splash pub fn tari_splash_screen(heading: &str) { // clear the terminal print!("{esc}[2J{esc}[1;1H", esc = 27 as char); println!("⠀⠀⠀⠀⠀⣠⣶⣿⣿⣿⣿⣶⣦⣀ "); println!("⠀⢀⣤⣾⣿⡿⠋⠀⠀⠀⠀⠉⠛⠿⣿⣿⣶⣤⣀⠀⠀⠀⠀⠀⠀⢰⣿⣾⣾⣾⣾⣾⣾⣾⣾⣾⣿⠀⠀⠀⣾⣾⣾⡀⠀⠀⠀⠀⢰⣾⣾⣾⣾⣿⣶⣶⡀⠀⠀⠀⢸⣾⣿⠀"); println!("⠀⣿⣿⣿⣿⣿⣶⣶⣤⣄⡀⠀⠀⠀⠀⠀⠉⠛⣿⣿⠀⠀⠀⠀⠀⠈⠉⠉⠉⠉⣿⣿⡏⠉⠉⠉⠉⠀⠀⣰⣿⣿⣿⣿⠀⠀⠀⠀⢸⣿⣿⠉⠉⠉⠛⣿⣿⡆⠀⠀⢸⣿⣿⠀"); println!("⠀⣿⣿⠀⠀⠀⠈⠙⣿⡿⠿⣿⣿⣿⣶⣶⣤⣤⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⠀⠀⢠⣿⣿⠃⣿⣿⣷⠀⠀⠀⢸⣿⣿⣀⣀⣀⣴⣿⣿⠃⠀⠀⢸⣿⣿⠀"); println!("⠀⣿⣿⣤⠀⠀⠀⢸⣿⡟⠀⠀⠀⠀⠀⠉⣽⣿⣿⠟⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⠀⠀⣿⣿⣿⣤⣬⣿⣿⣆⠀⠀⢸⣿⣿⣿⣿⣿⡿⠟⠉⠀⠀⠀⢸⣿⣿⠀"); println!("⠀⠀⠙⣿⣿⣤⠀⢸⣿⡟⠀⠀⠀⣠⣾⣿⡿⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⠀⣾⣿⣿⠿⠿⠿⢿⣿⣿⡀⠀⢸⣿⣿⠙⣿⣿⣿⣄⠀⠀⠀⠀⢸⣿⣿⠀"); println!("⠀⠀⠀⠀⠙⣿⣿⣼⣿⡟⣀⣶⣿⡿⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⣰⣿⣿⠃⠀⠀⠀⠀⣿⣿⣿⠀⢸⣿⣿⠀⠀⠙⣿⣿⣷⣄⠀⠀⢸⣿⣿⠀"); println!("⠀⠀⠀⠀⠀⠀⠙⣿⣿⣿⣿⠛⠀ "); println!("⠀⠀⠀⠀⠀⠀⠀⠀⠙⠁⠀ "); println!("{}", heading); println!(); } /// Prompts the user for a new wallet or to recover an existing wallet. /// Returns the wallet bootmode indicating if it's a new or existing wallet, or if recovery is required. fn boot(cli: &Cli, wallet_config: &WalletConfig) -> Result<WalletBoot, ExitError> { let wallet_exists = wallet_config.db_file.exists(); // forced recovery if cli.recovery { if wallet_exists { return Err(ExitError::new( ExitCode::RecoveryError, format!( "Wallet already exists at {:#?}. Remove it if you really want to run recovery in this directory!", wallet_config.db_file ), )); } return Ok(WalletBoot::Recovery); } if cli.seed_words.is_some() && !wallet_exists { return Ok(WalletBoot::Recovery); } if wallet_exists { // normal startup of existing wallet Ok(WalletBoot::Existing) } else { // automation/wallet created with --password if cli.password.is_some() || wallet_config.password.is_some() { return Ok(WalletBoot::New); } // In non-interactive mode, we never prompt. Otherwise, it's not very non-interactive, now is it? if cli.non_interactive_mode { let msg = "Wallet does not exist and no password was given to create one. Since we're in non-interactive \ mode, we need to quit here. Try setting the MINOTARI_WALLET__PASSWORD envar, or setting \ --password on the command line"; return Err(ExitError::new(ExitCode::WalletError, msg)); } // prompt for new or recovery let mut rl = Editor::<()>::new(); loop { println!("1. Create a new wallet."); println!("2. Recover wallet from seed words."); let readline = rl.readline(">> "); match readline { Ok(line) => { match line.as_ref() { "1" | "c" | "n" | "create" => { // new wallet return Ok(WalletBoot::New); }, "2" | "r" | "s" | "recover" => { // recover wallet return Ok(WalletBoot::Recovery); }, _ => continue, } }, Err(e) => { return Err(ExitError::new(ExitCode::IOError, e)); }, } } } } pub(crate) fn boot_with_password( cli: &Cli, wallet_config: &WalletConfig, ) -> Result<(WalletBoot, SafePassword), ExitError> { let boot_mode = boot(cli, wallet_config)?; if cli.password.is_some() { return Ok((boot_mode, cli.password.clone().unwrap())); } if wallet_config.password.is_some() { return Ok((boot_mode, wallet_config.password.clone().unwrap())); } let password = match boot_mode { WalletBoot::New => { // Get a new passphrase debug!(target: LOG_TARGET, "Prompting for passphrase."); get_new_passphrase("Create wallet passphrase: ", "Confirm wallet passphrase: ")? }, WalletBoot::Existing | WalletBoot::Recovery => { debug!(target: LOG_TARGET, "Prompting for passphrase."); prompt_password("Enter wallet passphrase: ")? }, }; Ok((boot_mode, password)) } #[cfg(test)] mod test { use tari_utilities::SafePassword; use super::get_password_feedback; #[test] fn weak_password() { let weak_password = SafePassword::from("weak"); assert!(get_password_feedback(&weak_password).is_some()); } #[test] fn strong_password() { let strong_password = SafePassword::from("This is a reasonably strong password!"); assert!(get_password_feedback(&strong_password).is_none()); } }
{ ExitError::new(ExitCode::IncorrectOrEmptyPassword, "Your password was not changed.") }
conditional_block
ChatEventCtrl.ts
import BagUtils from '../../../common/utils/BagUtils'; import ConfigManager from '../../../common/managers/ConfigManager'; import FootHoldModel from '../../guild/ctrl/footHold/FootHoldModel'; import GlobalUtil from '../../../common/utils/GlobalUtil'; import HeroDetailViewCtrl from '../../lottery/ctrl/HeroDetailViewCtrl'; import HeroModel from '../../../common/models/HeroModel'; import JumpUtils from '../../../common/utils/JumpUtils'; import LookHeroViewCtrl from '../../role/ctrl2/lookHero/LookHeroViewCtrl'; import ModelManager from '../../../common/managers/ModelManager'; import NetManager from '../../../common/managers/NetManager'; import PanelId from '../../../configs/ids/PanelId'; import Reader from '../../../../boot/common/core/reader'; import RelicModel from '../../relic/model/RelicModel'; import RoleModel from '../../../common/models/RoleModel'; import { AskInfoType } from '../../../common/widgets/AskPanel'; import { BagItem, BagType } from '../../../common/models/BagModel'; import { HeroCfg, SystemCfg, UniqueCfg } from '../../../a/config'; /** * @Description: 富文本点击控制器 * @Author: weiliang.huang * @Date: 2019-03-22 13:32:05 * @Last Modified by: jiangping * @Last Modified time: 2021-10-13 16:43:36 */ const { ccclass, property } = cc._decorator; @ccclass export default class ChatEventCtrl extends cc.Component { testFunc(event, param) { // console.log("testFunc", param) } equipClick(event, param) { let info: icmsg.EquipInfo = JSON.parse(param); let item: BagItem = { series: info.equipId, itemId: info.equipId, type: BagType.EQUIP, itemNum: 1, extInfo: info }; gdk.panel.setArgs(PanelId.EquipTips, { itemInfo: item, noBtn: true, isOther: true }); gdk.panel.open(PanelId.EquipTips); } itemClick(event, param) { // 找出param参数中的道具id let id = param.replace(/{(.*)}/, "$1") id = parseInt(id) let type = BagUtils.getItemTypeById(id) let item: BagItem = { series: id, itemId: id, type: type, itemNum: 1, extInfo: null } GlobalUtil.openItemTips(item, true) } heroClick(event, param) { let id = param.replace(/{(.*)}/, "$1") id = parseInt(id) let heroCfg = ConfigManager.getItemById(HeroCfg, id); gdk.panel.open(PanelId.HeroDetail, (node: cc.Node) => { let comp = node.getComponent(HeroDetailViewCtrl) comp.initHeroInfo(heroCfg) }) } heroImageClick(event, param) { let data: icmsg.RoleHeroImageRsp = new icmsg.RoleHeroImageRsp() data.hero = JSON.parse(param) data.type = 1 gdk.panel.setArgs(PanelId.MainSetHeroInfoTip, data) gdk.panel.open(PanelId.MainSetHeroInfoTip); } playerClick(event, param) { // let str: String = param.replace(/{(.*)}/, "$1"); // let arr = str.split(","); // let newArr: number[] = [] // arr.forEach(e => { // newArr.push(parseInt(e)); // }) // let id: Uint8Array = new Uint8Array(newArr); // let msg = new RoleImageReq() // msg.playerId = parseInt(param); // NetManager.send(msg) gdk.panel.setArgs(PanelId.MainSet, parseInt(param)) gdk.panel.open(PanelId.MainSet) } joinGuildClick(event, param) { let roleModel = ModelManager.get(RoleModel) let joinLv = ConfigManager.getItemById(SystemCfg, 2400).openLv if (roleModel.level < joinLv) { gdk.gui.showMessage(`指挥官${joinLv}级才可加入`) return } let guildId = parseInt(param) let msg = new icmsg.GuildJoinReq() msg.guildId = guildId NetManager.send(msg, (data: icmsg.GuildJoinRsp) => { // //正常加入 // if (data.error == -1) { // gdk.gui.showMessage("申请成功,等待会长审核") // } else if (data.error == 0) { // if (data.guildId && data.camp) { // gdk.panel.hide(PanelId.Friend) // gdk.panel.hide(PanelId.Chat) // gdk.gui.showMessage(`成功加入${data.camp.guild.name}公会`) // roleModel.guildId = data.guildId // roleModel.guildName = data.camp.guild.name // gdk.panel.open(PanelId.GuildMain) // } // } else { // gdk.gui.showMessage(ErrorManager.get(data.error, [data.minLv])) // } }, this) } /**打开赏金板 */ bountyClick(event, param) { gdk.panel.hide(PanelId.Chat) gdk.panel.open(PanelId.BountyList) } /**月卡点击 */ monthCardClick(event, param) { gdk.panel.hide(PanelId.Chat) let index = parseInt(param) gdk.panel.setArgs(PanelId.MonthCard, index) gdk.panel.open(PanelId.MonthCard) } vipClick(event, param) { gdk.panel.hide(PanelId.Chat) JumpUtils.openRechargeView([2]) } tqClick(event, param) { gdk.panel.hide(PanelId.Chat) JumpUtils.openRechargeView([0]) } //打开爬塔副本 towerClick() { if (!JumpUtils.ifSysOpen(705)) { return; } gdk.panel.hide(PanelId.Chat) gdk.panel.open(PanelId.TowerPanel) } dailyRechargeClick() { if (!JumpUtils.ifSysOpen(2834)) { return; } gdk.panel.hide(PanelId.Chat); gdk.panel.open(PanelId.DailyFirstRecharge)
gdk.panel.hide(PanelId.HelpTipsPanel); JumpUtils.openRechargeView([0]) } scoreSysClick() { gdk.panel.open(PanelId.ScoreSytemView); } adventureClick() { JumpUtils.openActivityMain([9]) } shareHeroClick(event, param) { let msg = new icmsg.ShareInfoReq() msg.shareId = param NetManager.send(msg, (data: icmsg.ShareInfoRsp) => { gdk.panel.open(PanelId.LookHeroView, (node: cc.Node) => { let model = ModelManager.get(HeroModel) model.heroImage = data.info let comp = node.getComponent(LookHeroViewCtrl) comp.updateHeroInfo() }) }) } shareHeroCommentClick(event, param) { let ids = (param as string).split("@") gdk.panel.setArgs(PanelId.SubHeroCommentPanel, ids[0], ids[1], ids[2]) gdk.panel.open(PanelId.SubHeroCommentPanel) } relicCallGuildATK(event, param) { if (!JumpUtils.ifSysOpen(2861, true)) { return; } gdk.panel.hide(PanelId.Chat); let m = ModelManager.get(RelicModel); m.jumpArgs = param; gdk.panel.open(PanelId.RelicMainView); } relicGoToATK(event, param) { if (!JumpUtils.ifSysOpen(2861, true)) { return; } gdk.panel.hide(PanelId.Chat); gdk.panel.hide(PanelId.RelicUnderAtkNoticeView); let m = ModelManager.get(RelicModel); m.jumpArgs = param; gdk.panel.open(PanelId.RelicMainView); } joinCooperation(event, param) { let guildId = parseInt(param) let footHoldModel = ModelManager.get(FootHoldModel) let roleModel = ModelManager.get(RoleModel) if (roleModel.guildId == 0) { let info: AskInfoType = { sureCb: () => { gdk.panel.hide(PanelId.FHCooperationMain) gdk.panel.setArgs(PanelId.GuildJoin, guildId, false) gdk.panel.open(PanelId.GuildJoin) }, closeCb: () => { gdk.panel.hide(PanelId.FHCooperationMain) gdk.panel.open(PanelId.GuildList) }, sureText: "加入该公会", closeText: "公会列表", descText: `加入公会后才可参与据点争夺战,推荐先加入公会`, thisArg: this, } GlobalUtil.openAskPanel(info) return } let msg = new icmsg.FootholdCoopApplyAskReq() msg.guildId = guildId NetManager.send(msg, (data: icmsg.FootholdCoopApplyAskRsp) => { if (data.autoJoin) { footHoldModel.coopGuildId = data.guildId gdk.gui.showMessage("成功加入协战,请前往据点争夺战战场") } else { gdk.gui.showMessage("申请成功,请敬候佳音") } }, this) } replayBounty(event, param) { let msg1 = new icmsg.BountyQueryReq() msg1.missionId = parseInt(param) NetManager.send(msg1, (data1: icmsg.BountyQueryRsp) => { let msg2 = new icmsg.BountyFightReplyReq() msg2.missionId = parseInt(param) NetManager.send(msg2, (data2: icmsg.BountyFightReplyRsp) => { gdk.panel.setArgs(PanelId.BountyItemReplay, data1.mission, data2) gdk.panel.open(PanelId.BountyItemReplay) }) }) } goToguildPower(event) { JumpUtils.openGuildPowerView() } goToCustumeCustom(event) { if (JumpUtils.ifSysOpen(2938, true)) { gdk.panel.open(PanelId.CostumeCustomMain); } } custumeCustomClick(event, param) { if (!param) return; let info = this.parseProtoParam(param); if (info && info instanceof icmsg.CostumeCustomRsp) { let item: BagItem = { series: null, itemId: info.costume.typeId, itemNum: 1, type: BagType.COSTUME, extInfo: info.costume } GlobalUtil.openItemTips(item); } } parseProtoParam(str: string): icmsg.Message { //str2ab // let temp = str.split(','); // let arryBuff = new ArrayBuffer(temp.length); // 2 bytes for each char // var bufView = new Uint8Array(arryBuff); // for (var i = 0; i < temp.length; i++) { // // bufView[i] = temp[i].charCodeAt(i); // bufView[i] = parseInt(temp[i]); // } let arryBuff = gdk.Buffer.from(str, 'binary'); //parse Message let reader: Reader = new Reader(); let msgType: number; let msg: icmsg.Message; // 网络数据 reader.WriteBuff(arryBuff); while (reader.HasMessage) { msgType = reader.BeginMessage(); let clazz = icmsg.MessageClass[msgType]; if (clazz) { try { msg = new clazz(); msg.decode(reader); } catch (err) { cc.error("网络错误:", err); } } else { cc.error(`找不到${msgType}对应的Message类定义,请检查协议代码,或重新生成协议代码`); } reader.FinishMessage(); } reader.Clear(); return msg; } uniqueEquipClick(event, param) { let id = param.replace(/{(.*)}/, "$1") id = parseInt(id) let c = ConfigManager.getItemById(UniqueCfg, id); let extInfo = new icmsg.UniqueEquip(); extInfo.id = -1 extInfo.itemId = c.id extInfo.star = c.star_max let item: BagItem = { series: null, itemId: c.id, itemNum: 1, type: BagUtils.getItemTypeById(c.id), extInfo: extInfo } GlobalUtil.openItemTips(item); } }
; } goToTQStore() {
conditional_block
ChatEventCtrl.ts
import BagUtils from '../../../common/utils/BagUtils'; import ConfigManager from '../../../common/managers/ConfigManager'; import FootHoldModel from '../../guild/ctrl/footHold/FootHoldModel'; import GlobalUtil from '../../../common/utils/GlobalUtil'; import HeroDetailViewCtrl from '../../lottery/ctrl/HeroDetailViewCtrl'; import HeroModel from '../../../common/models/HeroModel'; import JumpUtils from '../../../common/utils/JumpUtils'; import LookHeroViewCtrl from '../../role/ctrl2/lookHero/LookHeroViewCtrl'; import ModelManager from '../../../common/managers/ModelManager'; import NetManager from '../../../common/managers/NetManager'; import PanelId from '../../../configs/ids/PanelId'; import Reader from '../../../../boot/common/core/reader'; import RelicModel from '../../relic/model/RelicModel'; import RoleModel from '../../../common/models/RoleModel'; import { AskInfoType } from '../../../common/widgets/AskPanel'; import { BagItem, BagType } from '../../../common/models/BagModel'; import { HeroCfg, SystemCfg, UniqueCfg } from '../../../a/config'; /** * @Description: 富文本点击控制器 * @Author: weiliang.huang * @Date: 2019-03-22 13:32:05 * @Last Modified by: jiangping * @Last Modified time: 2021-10-13 16:43:36 */ const { ccclass, property } = cc._decorator; @ccclass export default class ChatEventCtrl extends cc.Component { testFunc(event, param) { // console.log("testFunc", param) } equipClick(event, param) { let info: icmsg.EquipInfo = JSON.parse(param); let item: BagItem = { series: info.equipId, itemId: info.equipId, type: BagType.EQUIP, itemNum: 1, extInfo: info }; gdk.panel.setArgs(PanelId.EquipTips, { itemInfo: item, noBtn: true, isOther: true }); gdk.panel.open(PanelId.EquipTips); } itemClick(event, param) { // 找出param参数中的道具id let id = param.replace(/{(.*)}/, "$1") id = parseInt(id) let type = BagUtils.getItemTypeById(id) let item: BagItem = { series: id, itemId: id, type: type, itemNum: 1, extInfo: null } GlobalUtil.openItemTips(item, true) } heroClick(event, param) { let id = param.replace(/{(.*)}/, "$1") id = parseInt(id) let heroCfg = ConfigManager.getItemById(HeroCfg, id); gdk.panel.open(PanelId.HeroDetail, (node: cc.Node) => { let comp = node.getComponent(HeroDetailViewCtrl) comp.initHeroInfo(heroCfg) }) } heroImageClick(event, param) { let data: icmsg.RoleHeroImageRsp = new icmsg.RoleHeroImageRsp() data.hero = JSON.parse(param) data.type = 1 gdk.panel.setArgs(PanelId.MainSetHeroInfoTip, data) gdk.panel.open(PanelId.MainSetHeroInfoTip); } playerClick(event, param) { // let str: String = param.replace(/{(.*)}/, "$1"); // let arr = str.split(","); // let newArr: number[] = [] // arr.forEach(e => { // newArr.push(parseInt(e)); // }) // let id: Uint8Array = new Uint8Array(newArr); // let msg = new RoleImageReq() // msg.playerId = parseInt(param); // NetManager.send(msg) gdk.panel.setArgs(PanelId.MainSet, parseInt(param)) gdk.panel.open(PanelId.MainSet) } joinGuildClick(event, param) { let roleModel = ModelManager.get(RoleModel) let joinLv = ConfigManager.getItemById(SystemCfg, 2400).openLv if (roleModel.level < joinLv) { gdk.gui.showMessage(`指挥官${joinLv}级才可加入`) return } let guildId = parseInt(param) let msg = new icmsg.GuildJoinReq() msg.guildId = guildId NetManager.send(msg, (data: icmsg.GuildJoinRsp) => { // //正常加入 // if (data.error == -1) { // gdk.gui.showMessage("申请成功,等待会长审核") // } else if (data.error == 0) { // if (data.guildId && data.camp) {
// roleModel.guildId = data.guildId // roleModel.guildName = data.camp.guild.name // gdk.panel.open(PanelId.GuildMain) // } // } else { // gdk.gui.showMessage(ErrorManager.get(data.error, [data.minLv])) // } }, this) } /**打开赏金板 */ bountyClick(event, param) { gdk.panel.hide(PanelId.Chat) gdk.panel.open(PanelId.BountyList) } /**月卡点击 */ monthCardClick(event, param) { gdk.panel.hide(PanelId.Chat) let index = parseInt(param) gdk.panel.setArgs(PanelId.MonthCard, index) gdk.panel.open(PanelId.MonthCard) } vipClick(event, param) { gdk.panel.hide(PanelId.Chat) JumpUtils.openRechargeView([2]) } tqClick(event, param) { gdk.panel.hide(PanelId.Chat) JumpUtils.openRechargeView([0]) } //打开爬塔副本 towerClick() { if (!JumpUtils.ifSysOpen(705)) { return; } gdk.panel.hide(PanelId.Chat) gdk.panel.open(PanelId.TowerPanel) } dailyRechargeClick() { if (!JumpUtils.ifSysOpen(2834)) { return; } gdk.panel.hide(PanelId.Chat); gdk.panel.open(PanelId.DailyFirstRecharge); } goToTQStore() { gdk.panel.hide(PanelId.HelpTipsPanel); JumpUtils.openRechargeView([0]) } scoreSysClick() { gdk.panel.open(PanelId.ScoreSytemView); } adventureClick() { JumpUtils.openActivityMain([9]) } shareHeroClick(event, param) { let msg = new icmsg.ShareInfoReq() msg.shareId = param NetManager.send(msg, (data: icmsg.ShareInfoRsp) => { gdk.panel.open(PanelId.LookHeroView, (node: cc.Node) => { let model = ModelManager.get(HeroModel) model.heroImage = data.info let comp = node.getComponent(LookHeroViewCtrl) comp.updateHeroInfo() }) }) } shareHeroCommentClick(event, param) { let ids = (param as string).split("@") gdk.panel.setArgs(PanelId.SubHeroCommentPanel, ids[0], ids[1], ids[2]) gdk.panel.open(PanelId.SubHeroCommentPanel) } relicCallGuildATK(event, param) { if (!JumpUtils.ifSysOpen(2861, true)) { return; } gdk.panel.hide(PanelId.Chat); let m = ModelManager.get(RelicModel); m.jumpArgs = param; gdk.panel.open(PanelId.RelicMainView); } relicGoToATK(event, param) { if (!JumpUtils.ifSysOpen(2861, true)) { return; } gdk.panel.hide(PanelId.Chat); gdk.panel.hide(PanelId.RelicUnderAtkNoticeView); let m = ModelManager.get(RelicModel); m.jumpArgs = param; gdk.panel.open(PanelId.RelicMainView); } joinCooperation(event, param) { let guildId = parseInt(param) let footHoldModel = ModelManager.get(FootHoldModel) let roleModel = ModelManager.get(RoleModel) if (roleModel.guildId == 0) { let info: AskInfoType = { sureCb: () => { gdk.panel.hide(PanelId.FHCooperationMain) gdk.panel.setArgs(PanelId.GuildJoin, guildId, false) gdk.panel.open(PanelId.GuildJoin) }, closeCb: () => { gdk.panel.hide(PanelId.FHCooperationMain) gdk.panel.open(PanelId.GuildList) }, sureText: "加入该公会", closeText: "公会列表", descText: `加入公会后才可参与据点争夺战,推荐先加入公会`, thisArg: this, } GlobalUtil.openAskPanel(info) return } let msg = new icmsg.FootholdCoopApplyAskReq() msg.guildId = guildId NetManager.send(msg, (data: icmsg.FootholdCoopApplyAskRsp) => { if (data.autoJoin) { footHoldModel.coopGuildId = data.guildId gdk.gui.showMessage("成功加入协战,请前往据点争夺战战场") } else { gdk.gui.showMessage("申请成功,请敬候佳音") } }, this) } replayBounty(event, param) { let msg1 = new icmsg.BountyQueryReq() msg1.missionId = parseInt(param) NetManager.send(msg1, (data1: icmsg.BountyQueryRsp) => { let msg2 = new icmsg.BountyFightReplyReq() msg2.missionId = parseInt(param) NetManager.send(msg2, (data2: icmsg.BountyFightReplyRsp) => { gdk.panel.setArgs(PanelId.BountyItemReplay, data1.mission, data2) gdk.panel.open(PanelId.BountyItemReplay) }) }) } goToguildPower(event) { JumpUtils.openGuildPowerView() } goToCustumeCustom(event) { if (JumpUtils.ifSysOpen(2938, true)) { gdk.panel.open(PanelId.CostumeCustomMain); } } custumeCustomClick(event, param) { if (!param) return; let info = this.parseProtoParam(param); if (info && info instanceof icmsg.CostumeCustomRsp) { let item: BagItem = { series: null, itemId: info.costume.typeId, itemNum: 1, type: BagType.COSTUME, extInfo: info.costume } GlobalUtil.openItemTips(item); } } parseProtoParam(str: string): icmsg.Message { //str2ab // let temp = str.split(','); // let arryBuff = new ArrayBuffer(temp.length); // 2 bytes for each char // var bufView = new Uint8Array(arryBuff); // for (var i = 0; i < temp.length; i++) { // // bufView[i] = temp[i].charCodeAt(i); // bufView[i] = parseInt(temp[i]); // } let arryBuff = gdk.Buffer.from(str, 'binary'); //parse Message let reader: Reader = new Reader(); let msgType: number; let msg: icmsg.Message; // 网络数据 reader.WriteBuff(arryBuff); while (reader.HasMessage) { msgType = reader.BeginMessage(); let clazz = icmsg.MessageClass[msgType]; if (clazz) { try { msg = new clazz(); msg.decode(reader); } catch (err) { cc.error("网络错误:", err); } } else { cc.error(`找不到${msgType}对应的Message类定义,请检查协议代码,或重新生成协议代码`); } reader.FinishMessage(); } reader.Clear(); return msg; } uniqueEquipClick(event, param) { let id = param.replace(/{(.*)}/, "$1") id = parseInt(id) let c = ConfigManager.getItemById(UniqueCfg, id); let extInfo = new icmsg.UniqueEquip(); extInfo.id = -1 extInfo.itemId = c.id extInfo.star = c.star_max let item: BagItem = { series: null, itemId: c.id, itemNum: 1, type: BagUtils.getItemTypeById(c.id), extInfo: extInfo } GlobalUtil.openItemTips(item); } }
// gdk.panel.hide(PanelId.Friend) // gdk.panel.hide(PanelId.Chat) // gdk.gui.showMessage(`成功加入${data.camp.guild.name}公会`)
random_line_split
ChatEventCtrl.ts
import BagUtils from '../../../common/utils/BagUtils'; import ConfigManager from '../../../common/managers/ConfigManager'; import FootHoldModel from '../../guild/ctrl/footHold/FootHoldModel'; import GlobalUtil from '../../../common/utils/GlobalUtil'; import HeroDetailViewCtrl from '../../lottery/ctrl/HeroDetailViewCtrl'; import HeroModel from '../../../common/models/HeroModel'; import JumpUtils from '../../../common/utils/JumpUtils'; import LookHeroViewCtrl from '../../role/ctrl2/lookHero/LookHeroViewCtrl'; import ModelManager from '../../../common/managers/ModelManager'; import NetManager from '../../../common/managers/NetManager'; import PanelId from '../../../configs/ids/PanelId'; import Reader from '../../../../boot/common/core/reader'; import RelicModel from '../../relic/model/RelicModel'; import RoleModel from '../../../common/models/RoleModel'; import { AskInfoType } from '../../../common/widgets/AskPanel'; import { BagItem, BagType } from '../../../common/models/BagModel'; import { HeroCfg, SystemCfg, UniqueCfg } from '../../../a/config'; /** * @Description: 富文本点击控制器 * @Author: weiliang.huang * @Date: 2019-03-22 13:32:05 * @Last Modified by: jiangping * @Last Modified time: 2021-10-13 16:43:36 */ const { ccclass, property } = cc._decorator; @ccclass export default class ChatEventCtrl extends cc.Component { testFunc(event, param) { // console.log("testFunc", param) } equipClick(event, param) { let info: icmsg.EquipInfo = JSON.parse(param); let item: BagItem = { series: info.equipId, itemId: info.equipId, type: BagType.EQUIP, itemNum: 1, extInfo: info }; gdk.panel.setArgs(PanelId.EquipTips, { itemInfo: item, noBtn: true, isOther: true }); gdk.panel.open(PanelId.EquipTips); } itemClick(event, param) { // 找出param参数中的道具id let id = param.replace(/{(.*)}/, "$1") id = parseInt(id) let type = BagUtils.getItemTypeById(id) let item: BagItem = { series: id, itemId: id, type: type, itemNum: 1, extInfo: null } GlobalUtil.openItemTips(item, true) } heroClick(event, param) { let id = param.replace(/{(.*)}/, "$1") id = parseInt(id) let heroCfg = ConfigManager.getItemById(HeroCfg, id); gdk.panel.open(PanelId.HeroDetail, (node: cc.Node) => { let comp = node.getComponent(HeroDetailViewCtrl) comp.initHeroInfo(heroCfg) }) } heroImageClick(event, param) { let data: icmsg.RoleHeroImageRsp = new icmsg.RoleHeroImageRsp() data.hero = JSON.parse(param) data.type = 1 gdk.panel.setArgs(PanelId.MainSetHeroInfoTip, data) gdk.panel.open(PanelId.MainSetHeroInfoTip); } playerClick(event, param) { // let str: String = param.replace(/{(.*)}/, "$1"); // let arr = str.split(","); // let newArr: number[] = [] // arr.forEach(e => { // newArr.push(parseInt(e)); // }) // let id: Uint8Array = new Uint8Array(newArr); // let msg = new RoleImageReq() // msg.playerId = parseInt(param); // NetManager.send(msg) gdk.panel.setArgs(PanelId.MainSet, parseInt(param)) gdk.panel.open(PanelId.MainSet) } joinGuildClick(event, param) { let roleModel = ModelM
gdk.panel.open(PanelId.BountyList) } /**月卡点击 */ monthCardClick(event, param) { gdk.panel.hide(PanelId.Chat) let index = parseInt(param) gdk.panel.setArgs(PanelId.MonthCard, index) gdk.panel.open(PanelId.MonthCard) } vipClick(event, param) { gdk.panel.hide(PanelId.Chat) JumpUtils.openRechargeView([2]) } tqClick(event, param) { gdk.panel.hide(PanelId.Chat) JumpUtils.openRechargeView([0]) } //打开爬塔副本 towerClick() { if (!JumpUtils.ifSysOpen(705)) { return; } gdk.panel.hide(PanelId.Chat) gdk.panel.open(PanelId.TowerPanel) } dailyRechargeClick() { if (!JumpUtils.ifSysOpen(2834)) { return; } gdk.panel.hide(PanelId.Chat); gdk.panel.open(PanelId.DailyFirstRecharge); } goToTQStore() { gdk.panel.hide(PanelId.HelpTipsPanel); JumpUtils.openRechargeView([0]) } scoreSysClick() { gdk.panel.open(PanelId.ScoreSytemView); } adventureClick() { JumpUtils.openActivityMain([9]) } shareHeroClick(event, param) { let msg = new icmsg.ShareInfoReq() msg.shareId = param NetManager.send(msg, (data: icmsg.ShareInfoRsp) => { gdk.panel.open(PanelId.LookHeroView, (node: cc.Node) => { let model = ModelManager.get(HeroModel) model.heroImage = data.info let comp = node.getComponent(LookHeroViewCtrl) comp.updateHeroInfo() }) }) } shareHeroCommentClick(event, param) { let ids = (param as string).split("@") gdk.panel.setArgs(PanelId.SubHeroCommentPanel, ids[0], ids[1], ids[2]) gdk.panel.open(PanelId.SubHeroCommentPanel) } relicCallGuildATK(event, param) { if (!JumpUtils.ifSysOpen(2861, true)) { return; } gdk.panel.hide(PanelId.Chat); let m = ModelManager.get(RelicModel); m.jumpArgs = param; gdk.panel.open(PanelId.RelicMainView); } relicGoToATK(event, param) { if (!JumpUtils.ifSysOpen(2861, true)) { return; } gdk.panel.hide(PanelId.Chat); gdk.panel.hide(PanelId.RelicUnderAtkNoticeView); let m = ModelManager.get(RelicModel); m.jumpArgs = param; gdk.panel.open(PanelId.RelicMainView); } joinCooperation(event, param) { let guildId = parseInt(param) let footHoldModel = ModelManager.get(FootHoldModel) let roleModel = ModelManager.get(RoleModel) if (roleModel.guildId == 0) { let info: AskInfoType = { sureCb: () => { gdk.panel.hide(PanelId.FHCooperationMain) gdk.panel.setArgs(PanelId.GuildJoin, guildId, false) gdk.panel.open(PanelId.GuildJoin) }, closeCb: () => { gdk.panel.hide(PanelId.FHCooperationMain) gdk.panel.open(PanelId.GuildList) }, sureText: "加入该公会", closeText: "公会列表", descText: `加入公会后才可参与据点争夺战,推荐先加入公会`, thisArg: this, } GlobalUtil.openAskPanel(info) return } let msg = new icmsg.FootholdCoopApplyAskReq() msg.guildId = guildId NetManager.send(msg, (data: icmsg.FootholdCoopApplyAskRsp) => { if (data.autoJoin) { footHoldModel.coopGuildId = data.guildId gdk.gui.showMessage("成功加入协战,请前往据点争夺战战场") } else { gdk.gui.showMessage("申请成功,请敬候佳音") } }, this) } replayBounty(event, param) { let msg1 = new icmsg.BountyQueryReq() msg1.missionId = parseInt(param) NetManager.send(msg1, (data1: icmsg.BountyQueryRsp) => { let msg2 = new icmsg.BountyFightReplyReq() msg2.missionId = parseInt(param) NetManager.send(msg2, (data2: icmsg.BountyFightReplyRsp) => { gdk.panel.setArgs(PanelId.BountyItemReplay, data1.mission, data2) gdk.panel.open(PanelId.BountyItemReplay) }) }) } goToguildPower(event) { JumpUtils.openGuildPowerView() } goToCustumeCustom(event) { if (JumpUtils.ifSysOpen(2938, true)) { gdk.panel.open(PanelId.CostumeCustomMain); } } custumeCustomClick(event, param) { if (!param) return; let info = this.parseProtoParam(param); if (info && info instanceof icmsg.CostumeCustomRsp) { let item: BagItem = { series: null, itemId: info.costume.typeId, itemNum: 1, type: BagType.COSTUME, extInfo: info.costume } GlobalUtil.openItemTips(item); } } parseProtoParam(str: string): icmsg.Message { //str2ab // let temp = str.split(','); // let arryBuff = new ArrayBuffer(temp.length); // 2 bytes for each char // var bufView = new Uint8Array(arryBuff); // for (var i = 0; i < temp.length; i++) { // // bufView[i] = temp[i].charCodeAt(i); // bufView[i] = parseInt(temp[i]); // } let arryBuff = gdk.Buffer.from(str, 'binary'); //parse Message let reader: Reader = new Reader(); let msgType: number; let msg: icmsg.Message; // 网络数据 reader.WriteBuff(arryBuff); while (reader.HasMessage) { msgType = reader.BeginMessage(); let clazz = icmsg.MessageClass[msgType]; if (clazz) { try { msg = new clazz(); msg.decode(reader); } catch (err) { cc.error("网络错误:", err); } } else { cc.error(`找不到${msgType}对应的Message类定义,请检查协议代码,或重新生成协议代码`); } reader.FinishMessage(); } reader.Clear(); return msg; } uniqueEquipClick(event, param) { let id = param.replace(/{(.*)}/, "$1") id = parseInt(id) let c = ConfigManager.getItemById(UniqueCfg, id); let extInfo = new icmsg.UniqueEquip(); extInfo.id = -1 extInfo.itemId = c.id extInfo.star = c.star_max let item: BagItem = { series: null, itemId: c.id, itemNum: 1, type: BagUtils.getItemTypeById(c.id), extInfo: extInfo } GlobalUtil.openItemTips(item); } }
anager.get(RoleModel) let joinLv = ConfigManager.getItemById(SystemCfg, 2400).openLv if (roleModel.level < joinLv) { gdk.gui.showMessage(`指挥官${joinLv}级才可加入`) return } let guildId = parseInt(param) let msg = new icmsg.GuildJoinReq() msg.guildId = guildId NetManager.send(msg, (data: icmsg.GuildJoinRsp) => { // //正常加入 // if (data.error == -1) { // gdk.gui.showMessage("申请成功,等待会长审核") // } else if (data.error == 0) { // if (data.guildId && data.camp) { // gdk.panel.hide(PanelId.Friend) // gdk.panel.hide(PanelId.Chat) // gdk.gui.showMessage(`成功加入${data.camp.guild.name}公会`) // roleModel.guildId = data.guildId // roleModel.guildName = data.camp.guild.name // gdk.panel.open(PanelId.GuildMain) // } // } else { // gdk.gui.showMessage(ErrorManager.get(data.error, [data.minLv])) // } }, this) } /**打开赏金板 */ bountyClick(event, param) { gdk.panel.hide(PanelId.Chat)
identifier_body
ChatEventCtrl.ts
import BagUtils from '../../../common/utils/BagUtils'; import ConfigManager from '../../../common/managers/ConfigManager'; import FootHoldModel from '../../guild/ctrl/footHold/FootHoldModel'; import GlobalUtil from '../../../common/utils/GlobalUtil'; import HeroDetailViewCtrl from '../../lottery/ctrl/HeroDetailViewCtrl'; import HeroModel from '../../../common/models/HeroModel'; import JumpUtils from '../../../common/utils/JumpUtils'; import LookHeroViewCtrl from '../../role/ctrl2/lookHero/LookHeroViewCtrl'; import ModelManager from '../../../common/managers/ModelManager'; import NetManager from '../../../common/managers/NetManager'; import PanelId from '../../../configs/ids/PanelId'; import Reader from '../../../../boot/common/core/reader'; import RelicModel from '../../relic/model/RelicModel'; import RoleModel from '../../../common/models/RoleModel'; import { AskInfoType } from '../../../common/widgets/AskPanel'; import { BagItem, BagType } from '../../../common/models/BagModel'; import { HeroCfg, SystemCfg, UniqueCfg } from '../../../a/config'; /** * @Description: 富文本点击控制器 * @Author: weiliang.huang * @Date: 2019-03-22 13:32:05 * @Last Modified by: jiangping * @Last Modified time: 2021-10-13 16:43:36 */ const { ccclass, property } = cc._decorator; @ccclass export default class ChatEventCtrl extends cc.Component { testFunc(event, param) { // console.log("testFunc", param) } equipClick(event, param) { let info: icmsg.EquipInfo = JSON.parse(param); let item: BagItem = { series: info.equipId, itemId: info.equipId, type: BagType.EQUIP, itemNum: 1, extInfo: info }; gdk.panel.setArgs(PanelId.EquipTips, { itemInfo: item, noBtn: true, isOther: true }); gdk.panel.open(PanelId.EquipTips); } itemClick(event, param) { // 找出param参数中的道具id let id = param.replace(/{(.*)}/, "$1") id = parseInt(id) let type = BagUtils.getItemTypeById(id) let item: BagItem = { series: id, itemId: id, type: type, itemNum: 1, extInfo: null } GlobalUtil.openItemTips(item, true) } heroClick(event, param) { let id = param.replace(/{(.*)}/, "$1") id = parseInt(id) let heroCfg = ConfigManager.getItemById(HeroCfg, id); gdk.panel.open(PanelId.HeroDetail, (node: cc.Node) => { let comp = node.getComponent(HeroDetailViewCtrl) comp.initHeroInfo(heroCfg) }) } heroImageClick(event, param) { let data: icmsg.RoleHeroImageRsp = new icmsg.RoleHeroImageRsp() data.hero = JSON.parse(param) data.type = 1 gdk.panel.setArgs(PanelId.MainSetHeroInfoTip, data) gdk.panel.open(PanelId.MainSetHeroInfoTip); } playerClick(event, param) { // let str: String = param.replace(/{(.*)}/, "$1"); // let arr = str.split(","); // let newArr: number[] = [] // arr.forEach(e => { // newArr.push(parseInt(e)); // }) // let id: Uint8Array = new Uint8Array(newArr); // let msg = new RoleImageReq() // msg.playerId = parseInt(param); // NetManager.send(msg) gdk.panel.setArgs(PanelId.MainSet, parseInt(param)) gdk.panel.open(PanelId.MainSet) } joinGuildClick(event, param) { let roleModel = ModelManager.get(RoleModel) let joinLv = ConfigManager.getItemById(SystemCfg, 2400).openLv if (roleModel.level < joinLv) { gdk.gui.showMessage(`指挥官${joinLv}级才可加入`) return } let guildId = parseInt(param) let msg = new icmsg.GuildJoinReq() msg.guildId = guildId NetManager.send(msg, (data: icmsg.GuildJoinRsp) => { // //正常加入 // if (data.error == -1) { // gdk.gui.showMessage("申请成功,等待会长审核") // } else if (data.error == 0) { // if (data.guildId && data.camp) { // gdk.panel.hide(PanelId.Friend) // gdk.panel.hide(PanelId.Chat) // gdk.gui.showMessage(`成功加入${data.camp.guild.name}公会`) // roleModel.guildId = data.guildId // roleModel.guildName = data.camp.guild.name // gdk.panel.open(PanelId.GuildMain) // } // } else { // gdk.gui.showMessage(ErrorManager.get(data.error, [data.minLv])) // } }, this) } /**打开赏金板 */ bountyClick(event, param) { gdk.panel.hide(PanelId.Chat) gdk.panel.open(PanelId.BountyList) } /**月卡点击 */ monthCardClick(event, param) { gdk.panel.hide(PanelId.Chat) let index = parseInt(param) gdk.panel.setArgs(PanelId.MonthCard, index) gdk.panel.open(PanelId.MonthCard) } vipClick(event, param) { gdk.panel.hide(PanelId.Chat) JumpUtils.openRechargeView([2]) } tqClick(event, param) { gdk.panel.hide(PanelId.Chat) JumpUtils.openRechargeView([0]) } //打开爬塔副本 towerClick() { if (!JumpUtils.ifSysOpen(705)) { return; } gdk.panel.hide(PanelId.Chat) gdk.panel.open(PanelId.TowerPanel) } dailyRechargeClick() { if (!JumpUtils.ifSysOpen(2834)) { return; } gdk.panel.hide(PanelId.Chat); gdk.panel.open(PanelId.DailyFirstRecharge); } goToTQStore() { gdk.panel.hide(PanelId.HelpTipsPanel); JumpUtils.openRechargeView([0]) } scoreSysClick() { gdk.panel.open(PanelId.ScoreSytemView); } adventureClick() { JumpUtils.openActivityMain([9]) } shareHeroClick(event, param) { let msg =
eInfoReq() msg.shareId = param NetManager.send(msg, (data: icmsg.ShareInfoRsp) => { gdk.panel.open(PanelId.LookHeroView, (node: cc.Node) => { let model = ModelManager.get(HeroModel) model.heroImage = data.info let comp = node.getComponent(LookHeroViewCtrl) comp.updateHeroInfo() }) }) } shareHeroCommentClick(event, param) { let ids = (param as string).split("@") gdk.panel.setArgs(PanelId.SubHeroCommentPanel, ids[0], ids[1], ids[2]) gdk.panel.open(PanelId.SubHeroCommentPanel) } relicCallGuildATK(event, param) { if (!JumpUtils.ifSysOpen(2861, true)) { return; } gdk.panel.hide(PanelId.Chat); let m = ModelManager.get(RelicModel); m.jumpArgs = param; gdk.panel.open(PanelId.RelicMainView); } relicGoToATK(event, param) { if (!JumpUtils.ifSysOpen(2861, true)) { return; } gdk.panel.hide(PanelId.Chat); gdk.panel.hide(PanelId.RelicUnderAtkNoticeView); let m = ModelManager.get(RelicModel); m.jumpArgs = param; gdk.panel.open(PanelId.RelicMainView); } joinCooperation(event, param) { let guildId = parseInt(param) let footHoldModel = ModelManager.get(FootHoldModel) let roleModel = ModelManager.get(RoleModel) if (roleModel.guildId == 0) { let info: AskInfoType = { sureCb: () => { gdk.panel.hide(PanelId.FHCooperationMain) gdk.panel.setArgs(PanelId.GuildJoin, guildId, false) gdk.panel.open(PanelId.GuildJoin) }, closeCb: () => { gdk.panel.hide(PanelId.FHCooperationMain) gdk.panel.open(PanelId.GuildList) }, sureText: "加入该公会", closeText: "公会列表", descText: `加入公会后才可参与据点争夺战,推荐先加入公会`, thisArg: this, } GlobalUtil.openAskPanel(info) return } let msg = new icmsg.FootholdCoopApplyAskReq() msg.guildId = guildId NetManager.send(msg, (data: icmsg.FootholdCoopApplyAskRsp) => { if (data.autoJoin) { footHoldModel.coopGuildId = data.guildId gdk.gui.showMessage("成功加入协战,请前往据点争夺战战场") } else { gdk.gui.showMessage("申请成功,请敬候佳音") } }, this) } replayBounty(event, param) { let msg1 = new icmsg.BountyQueryReq() msg1.missionId = parseInt(param) NetManager.send(msg1, (data1: icmsg.BountyQueryRsp) => { let msg2 = new icmsg.BountyFightReplyReq() msg2.missionId = parseInt(param) NetManager.send(msg2, (data2: icmsg.BountyFightReplyRsp) => { gdk.panel.setArgs(PanelId.BountyItemReplay, data1.mission, data2) gdk.panel.open(PanelId.BountyItemReplay) }) }) } goToguildPower(event) { JumpUtils.openGuildPowerView() } goToCustumeCustom(event) { if (JumpUtils.ifSysOpen(2938, true)) { gdk.panel.open(PanelId.CostumeCustomMain); } } custumeCustomClick(event, param) { if (!param) return; let info = this.parseProtoParam(param); if (info && info instanceof icmsg.CostumeCustomRsp) { let item: BagItem = { series: null, itemId: info.costume.typeId, itemNum: 1, type: BagType.COSTUME, extInfo: info.costume } GlobalUtil.openItemTips(item); } } parseProtoParam(str: string): icmsg.Message { //str2ab // let temp = str.split(','); // let arryBuff = new ArrayBuffer(temp.length); // 2 bytes for each char // var bufView = new Uint8Array(arryBuff); // for (var i = 0; i < temp.length; i++) { // // bufView[i] = temp[i].charCodeAt(i); // bufView[i] = parseInt(temp[i]); // } let arryBuff = gdk.Buffer.from(str, 'binary'); //parse Message let reader: Reader = new Reader(); let msgType: number; let msg: icmsg.Message; // 网络数据 reader.WriteBuff(arryBuff); while (reader.HasMessage) { msgType = reader.BeginMessage(); let clazz = icmsg.MessageClass[msgType]; if (clazz) { try { msg = new clazz(); msg.decode(reader); } catch (err) { cc.error("网络错误:", err); } } else { cc.error(`找不到${msgType}对应的Message类定义,请检查协议代码,或重新生成协议代码`); } reader.FinishMessage(); } reader.Clear(); return msg; } uniqueEquipClick(event, param) { let id = param.replace(/{(.*)}/, "$1") id = parseInt(id) let c = ConfigManager.getItemById(UniqueCfg, id); let extInfo = new icmsg.UniqueEquip(); extInfo.id = -1 extInfo.itemId = c.id extInfo.star = c.star_max let item: BagItem = { series: null, itemId: c.id, itemNum: 1, type: BagUtils.getItemTypeById(c.id), extInfo: extInfo } GlobalUtil.openItemTips(item); } }
new icmsg.Shar
identifier_name
goldenFile.go
package testhelper import ( "bytes" "errors" "flag" "fmt" "os" "path" "path/filepath" "strings" "testing" ) const ( pBits = 0o644 dirPBits = 0o755 ) // AddUpdateFlag adds a new flag to the standard flag package. The flag is // used to control whether or not to update the Golden files with the new // values rather than reporting differences as test errors. If there is // already a Golden file present then this will be preserved in a file with // the same name as the Golden file but with ".orig" as a suffix. This can // then be set on the command line when testing and looked up by the // GoldenFileCfg.Check method. The Check method will report the flag name to // use if any is available. func (gfc *GoldenFileCfg) AddUpdateFlag() { if gfc.updFlagAdded { return } gfGlob := gfc.PathName("*") if gfc.UpdFlagName == "" { panic(errors.New( "AddUpdateFlag has been called for files in " + gfGlob + " but the GoldenFileCfg has no flag name set")) } flag.BoolVar(&gfc.updFlag, gfc.UpdFlagName, false, "set this flag to update the golden files in "+gfGlob) gfc.updFlagAdded = true } // AddKeepBadResultsFlag adds a new flag to the standard flag package. The // flag is used to control whether or not to keep the bad results in a // file. The name of the file will be the name of the Golden file with // ".badResults" as a suffix. These files can then be compared with the // Golden files do see what changes have been made . This can then be set on // the command line when testing and looked up by the GoldenFileCfg.Check // method. The Check method will report the flag name to use if any is // available. func (gfc *GoldenFileCfg) AddKeepBadResultsFlag() { if gfc.keepBadResultsFlagAdded { return } gfGlob := gfc.PathName("*") if gfc.KeepBadResultsFlagName == "" { panic(errors.New( "AddKeepBadResultsFlag has been called for files in " + gfGlob + " but the GoldenFileCfg has no flag name set")) } flag.BoolVar(&gfc.keepBadResultsFlag, gfc.KeepBadResultsFlagName, false, "set this flag to keep bad results in"+gfGlob) gfc.keepBadResultsFlagAdded = true } // GoldenFileCfg holds common configuration details for a collection of // golden files. It helps with consistent naming of golden files without // having to repeat common parts throughout the code. // // A golden file is a file that holds expected output (typically lengthy) // that can be compared as part of a test. It avoids the need to have a long // string in the body of a test. // // DirNames is a slice of strings holding the parts of the directory path // to the file // // Pfx is an optional prefix - leave it as an empty string to exclude it // // Sfx is an optional suffix - as for the prefix // // UpdFlagName is the name of a flag that will set a bool used to decide // whether or not to update the golden file. If it is not set then it is // ignored. If you have set this then you should also call the AddUpdateFlag // method (typically in an init() function) and then use the Check method // to compare with the file // // KeepBadResultsFlagName is the name of a flag that will set a bool used // to decide whether or not to keep bad results. If it is not set then it // is ignored. If you have set this then you should also call the // AddKeepBadResultsFlag method (typically in an init() function) and then // use the Check method to compare with the file type GoldenFileCfg struct { DirNames []string Pfx string Sfx string UpdFlagName string updFlag bool updFlagAdded bool KeepBadResultsFlagName string keepBadResultsFlag bool keepBadResultsFlagAdded bool } // Check confirms that the value given matches the contents of the golden // file and returns true if it does, false otherwise. It will report any // errors it finds including any problems reading from or writing to the // golden file itself. // // If UpdFlagName is not empty and the AddUpdateFlag method // has been called (typically in an init() function) then the corresponding // flag value will be looked up and if the flag is set to true the golden // file will be updated with the supplied value. You can set this value // through a command-line parameter to the test and then pass that to this // function as follows: // // gfc := testhelper.GoldenFileCfg{ // DirNames: []string{"testdata"}, // Pfx: "values", // Sfx: "txt", // UpdFlagName: "upd-gf", // } // // func init() { // gfc.AddUpdateFlag() // } // ... // gfc.Check(t, "my value test", t.Name(), val) // // Then to update the golden files you would invoke the test command as follows: // // go test -upd-gf // // Similarly with the KeepBadResultsFlag. // // Give the -v argument to go test to see what is being updated. // // An advantage of using this method (over using the // testhelper.CheckAgainstGoldenFile function) is that this will show the // name of the flag to use in order to update the files. You save the hassle // of scanning the code to find out what you called the flag. func (gfc GoldenFileCfg) Check(t *testing.T, id, gfName string, val []byte) bool { t.Helper() if gfc.UpdFlagName != "" && !gfc.updFlagAdded { panic(fmt.Errorf( "the name of the flag to update the golden files has been"+ " given (%q) but the flag has not been added."+ " You should call the AddUpdateFlag() method"+ " (typically in an init() function)", gfc.UpdFlagName)) } if gfc.KeepBadResultsFlagName != "" && !gfc.keepBadResultsFlagAdded { panic(fmt.Errorf( "the name of the flag to keep bad results has been"+ " given (%q) but the flag has not been added."+ " You should call the AddKeepBadResultsFlag() method"+ " (typically in an init() function)", gfc.KeepBadResultsFlagName)) } return gfc.checkFile(t, id, gfc.PathName(gfName), val) } // PathName will return the name of a golden file. It applies the directory // names and any prefix or suffix to the supplied string to give a well-formed // name using the appropriate filepath separators for the operating system. A // suggested name to pass to this method might be the name of the current // test as given by the Name() method on testing.T. // // Note that any supplied name is "cleaned" by removing any part prior to an // embedded filepath.Separator. func (gfc GoldenFileCfg) PathName(name string) string { fNameParts := make([]string, 0, 3) if gfc.Pfx != "" { fNameParts = append(fNameParts, gfc.Pfx) } fNameParts = append(fNameParts, filepath.Base(name)) if gfc.Sfx != "" { fNameParts = append(fNameParts, gfc.Sfx) } fName := strings.Join(fNameParts, ".") pathParts := make([]string, 0, len(gfc.DirNames)+1) pathParts = append(pathParts, gfc.DirNames...) pathParts = append(pathParts, fName) return filepath.Join(pathParts...) } // CheckAgainstGoldenFile confirms that the value given matches the contents // of the golden file and returns true if it does, false otherwise. It will // report any errors it finds including any problems reading from or writing // to the golden file itself. If the updGF flag is set to true then the // golden file will be updated with the supplied value. You can set this // value through a command-line parameter to the test and then pass that to // this function as follows // // var upd = flag.Bool("upd-gf", false, "update the golden files") // gfc := testhelper.GoldenFileCfg{ // DirNames: []string{"testdata"}, // Pfx: "values", // Sfx: "txt", // } // ... // testhelper.CheckAgainstGoldenFile(t, // "my value test", // val, // gfc.PathName(t.Name()), // *upd) // // Then to update the golden files you would invoke the test command as follows // // go test -upd-gf // // Give the -v argument to go test to see what is being updated. // // Deprecated: use the Check method on the GoldenFileCfg func CheckAgainstGoldenFile(t *testing.T, testID string, val []byte, gfName string, updGF bool) bool { t.Helper() return checkFile(t, testID, gfName, val, updGF) } // getExpVal reads the contents of the golden file. If the updGF flag is set // then if will write the contents of the file before reading it. It returns // the contents and true if all went well, nil and false otherwise. It will // report any errors it finds including any problems reading from or writing // to the golden file itself. func getExpVal(t *testing.T, id, gfName string, val []byte, updGF bool) ([]byte, bool) { t.Helper() if updGF { if !updateGoldenFile(t, gfName, val) { return nil, false } } expVal, err := os.ReadFile(gfName) // nolint: gosec if err != nil { t.Log(id) t.Logf("\t: Problem with the golden file: %q", gfName) t.Errorf("\t: Couldn't read the expected value. Error: %s", err) return nil, false } return expVal, true } // checkFile confirms that the value given matches the contents of the golden // file and returns true if it does, false otherwise. It will report any // errors it finds including any problems reading from or writing to the // golden file itself. If the updGF flag is set to true then the golden file // will be updated with the supplied value. func checkFile(t *testing.T, id, gfName string, val []byte, updGF bool) bool { t.Helper() expVal, ok := getExpVal(t, id, gfName, val, updGF) if !ok { t.Errorf("\t: Actual\n" + string(val)) return false } return actEqualsExp(t, id, gfName, val, expVal) } // checkFile confirms that the value given matches the contents of the golden // file and returns true if it does, false otherwise. It will report any // errors it finds including any problems reading from or writing to the // golden file itself. If the updGF flag is set to true then the golden file // will be updated with the supplied value. func (gfc GoldenFileCfg) checkFile(t *testing.T, id, gfName string, val []byte) bool { t.Helper() expVal, ok := getExpVal(t, id, gfName, val, gfc.updFlag) if !ok { if gfc.UpdFlagName != "" { t.Errorf("\t: To update the golden file with the new value"+ " pass %q to the go test command", "-"+gfc.UpdFlagName) } t.Errorf("\t: Actual\n" + string(val)) return false } if actEqualsExp(t, id, gfName, val, expVal) { return true } if gfc.UpdFlagName != "" { t.Errorf("\t: To update the golden file with the new value"+ " pass %q to the go test command", "-"+gfc.UpdFlagName) } if gfc.keepBadResultsFlag { keepBadResults(t, gfName, val) } else if gfc.KeepBadResultsFlagName != "" { t.Errorf("\t: To keep the (bad) Actual results for later"+ " investigation pass %q to the go test command", "-"+gfc.KeepBadResultsFlagName) } return false } // actEqualsExp compares the expected value against the actual and reports any // difference. It will return true if they are equal and false otherwise func actEqualsExp(t *testing.T, id, gfName string, actVal, expVal []byte) bool { t.Helper() if bytes.Equal(actVal, expVal) { return true } t.Log(id) t.Log("\t: Expected\n" + string(expVal)) t.Log("\t: Actual\n" + string(actVal)) t.Errorf("\t: The value given differs from the golden file value: %q", gfName) return false } // updateGoldenFile will attempt to update the golden file with the new // content and return true if it succeeds or false otherwise. If there is an // existing golden file it will try to preverve the contents so that they can // be compared with the new file. It reports its progress; if the file hasn't // changed it does nothing. func updateGoldenFile(t *testing.T, gfName string, val []byte) bool
// keepBadResults will attempt to write the bad results to a new file. func keepBadResults(t *testing.T, gfName string, val []byte) { t.Helper() fName := gfName + ".badResults" writeFile(t, fName, "bad results", val) } // writeFile will write the values into the file. If the parent directories // do not exist then it will create them and try again. func writeFile(t *testing.T, fName, desc string, val []byte) (rval bool) { t.Helper() rval = true var err error defer func() { if err != nil { t.Logf("\t: Couldn't write to the %s file", desc) t.Error("\t: ", err) rval = false } }() t.Logf("Updating/Creating the %s file: %q", desc, fName) err = os.WriteFile(fName, val, pBits) if os.IsNotExist(err) { dir := path.Dir(fName) if dir == "." { return } err = os.MkdirAll(dir, dirPBits) if err != nil { return } err = os.WriteFile(fName, val, pBits) } return }
{ t.Helper() origVal, err := os.ReadFile(gfName) // nolint: gosec if err == nil { if bytes.Equal(val, origVal) { return true } origFileName := gfName + ".orig" writeFile(t, origFileName, "original contents", origVal) } else if !os.IsNotExist(err) { t.Log("Couldn't preserve the original contents") t.Logf("\t: Couldn't read the golden file: %q", gfName) t.Error("\t: ", err) } if !writeFile(t, gfName, "golden", val) { return false } return true }
identifier_body
goldenFile.go
package testhelper import ( "bytes" "errors" "flag" "fmt" "os" "path" "path/filepath" "strings" "testing" ) const ( pBits = 0o644 dirPBits = 0o755 ) // AddUpdateFlag adds a new flag to the standard flag package. The flag is // used to control whether or not to update the Golden files with the new // values rather than reporting differences as test errors. If there is // already a Golden file present then this will be preserved in a file with // the same name as the Golden file but with ".orig" as a suffix. This can // then be set on the command line when testing and looked up by the // GoldenFileCfg.Check method. The Check method will report the flag name to // use if any is available. func (gfc *GoldenFileCfg) AddUpdateFlag() { if gfc.updFlagAdded { return } gfGlob := gfc.PathName("*") if gfc.UpdFlagName == "" { panic(errors.New( "AddUpdateFlag has been called for files in " + gfGlob + " but the GoldenFileCfg has no flag name set")) } flag.BoolVar(&gfc.updFlag, gfc.UpdFlagName, false, "set this flag to update the golden files in "+gfGlob) gfc.updFlagAdded = true } // AddKeepBadResultsFlag adds a new flag to the standard flag package. The // flag is used to control whether or not to keep the bad results in a // file. The name of the file will be the name of the Golden file with // ".badResults" as a suffix. These files can then be compared with the // Golden files do see what changes have been made . This can then be set on // the command line when testing and looked up by the GoldenFileCfg.Check // method. The Check method will report the flag name to use if any is // available. func (gfc *GoldenFileCfg) AddKeepBadResultsFlag() { if gfc.keepBadResultsFlagAdded { return } gfGlob := gfc.PathName("*") if gfc.KeepBadResultsFlagName == "" { panic(errors.New( "AddKeepBadResultsFlag has been called for files in " + gfGlob + " but the GoldenFileCfg has no flag name set")) } flag.BoolVar(&gfc.keepBadResultsFlag, gfc.KeepBadResultsFlagName, false, "set this flag to keep bad results in"+gfGlob) gfc.keepBadResultsFlagAdded = true } // GoldenFileCfg holds common configuration details for a collection of // golden files. It helps with consistent naming of golden files without // having to repeat common parts throughout the code. // // A golden file is a file that holds expected output (typically lengthy) // that can be compared as part of a test. It avoids the need to have a long // string in the body of a test. // // DirNames is a slice of strings holding the parts of the directory path // to the file // // Pfx is an optional prefix - leave it as an empty string to exclude it // // Sfx is an optional suffix - as for the prefix // // UpdFlagName is the name of a flag that will set a bool used to decide // whether or not to update the golden file. If it is not set then it is // ignored. If you have set this then you should also call the AddUpdateFlag // method (typically in an init() function) and then use the Check method // to compare with the file // // KeepBadResultsFlagName is the name of a flag that will set a bool used // to decide whether or not to keep bad results. If it is not set then it // is ignored. If you have set this then you should also call the // AddKeepBadResultsFlag method (typically in an init() function) and then // use the Check method to compare with the file type GoldenFileCfg struct { DirNames []string Pfx string Sfx string UpdFlagName string updFlag bool updFlagAdded bool KeepBadResultsFlagName string keepBadResultsFlag bool keepBadResultsFlagAdded bool } // Check confirms that the value given matches the contents of the golden // file and returns true if it does, false otherwise. It will report any // errors it finds including any problems reading from or writing to the // golden file itself. // // If UpdFlagName is not empty and the AddUpdateFlag method // has been called (typically in an init() function) then the corresponding // flag value will be looked up and if the flag is set to true the golden // file will be updated with the supplied value. You can set this value // through a command-line parameter to the test and then pass that to this // function as follows: // // gfc := testhelper.GoldenFileCfg{ // DirNames: []string{"testdata"}, // Pfx: "values", // Sfx: "txt", // UpdFlagName: "upd-gf", // } // // func init() { // gfc.AddUpdateFlag() // } // ... // gfc.Check(t, "my value test", t.Name(), val) // // Then to update the golden files you would invoke the test command as follows: // // go test -upd-gf // // Similarly with the KeepBadResultsFlag. // // Give the -v argument to go test to see what is being updated. // // An advantage of using this method (over using the // testhelper.CheckAgainstGoldenFile function) is that this will show the // name of the flag to use in order to update the files. You save the hassle // of scanning the code to find out what you called the flag. func (gfc GoldenFileCfg) Check(t *testing.T, id, gfName string, val []byte) bool { t.Helper() if gfc.UpdFlagName != "" && !gfc.updFlagAdded { panic(fmt.Errorf( "the name of the flag to update the golden files has been"+ " given (%q) but the flag has not been added."+ " You should call the AddUpdateFlag() method"+ " (typically in an init() function)", gfc.UpdFlagName)) } if gfc.KeepBadResultsFlagName != "" && !gfc.keepBadResultsFlagAdded { panic(fmt.Errorf( "the name of the flag to keep bad results has been"+ " given (%q) but the flag has not been added."+ " You should call the AddKeepBadResultsFlag() method"+ " (typically in an init() function)", gfc.KeepBadResultsFlagName)) } return gfc.checkFile(t, id, gfc.PathName(gfName), val) } // PathName will return the name of a golden file. It applies the directory // names and any prefix or suffix to the supplied string to give a well-formed // name using the appropriate filepath separators for the operating system. A // suggested name to pass to this method might be the name of the current // test as given by the Name() method on testing.T. // // Note that any supplied name is "cleaned" by removing any part prior to an // embedded filepath.Separator. func (gfc GoldenFileCfg) PathName(name string) string { fNameParts := make([]string, 0, 3) if gfc.Pfx != "" { fNameParts = append(fNameParts, gfc.Pfx) } fNameParts = append(fNameParts, filepath.Base(name)) if gfc.Sfx != "" { fNameParts = append(fNameParts, gfc.Sfx) } fName := strings.Join(fNameParts, ".") pathParts := make([]string, 0, len(gfc.DirNames)+1) pathParts = append(pathParts, gfc.DirNames...) pathParts = append(pathParts, fName) return filepath.Join(pathParts...) } // CheckAgainstGoldenFile confirms that the value given matches the contents // of the golden file and returns true if it does, false otherwise. It will // report any errors it finds including any problems reading from or writing // to the golden file itself. If the updGF flag is set to true then the // golden file will be updated with the supplied value. You can set this // value through a command-line parameter to the test and then pass that to // this function as follows // // var upd = flag.Bool("upd-gf", false, "update the golden files") // gfc := testhelper.GoldenFileCfg{ // DirNames: []string{"testdata"}, // Pfx: "values", // Sfx: "txt", // } // ... // testhelper.CheckAgainstGoldenFile(t, // "my value test", // val, // gfc.PathName(t.Name()), // *upd) // // Then to update the golden files you would invoke the test command as follows // // go test -upd-gf // // Give the -v argument to go test to see what is being updated. // // Deprecated: use the Check method on the GoldenFileCfg func CheckAgainstGoldenFile(t *testing.T, testID string, val []byte, gfName string, updGF bool) bool { t.Helper() return checkFile(t, testID, gfName, val, updGF) } // getExpVal reads the contents of the golden file. If the updGF flag is set // then if will write the contents of the file before reading it. It returns // the contents and true if all went well, nil and false otherwise. It will // report any errors it finds including any problems reading from or writing // to the golden file itself. func getExpVal(t *testing.T, id, gfName string, val []byte, updGF bool) ([]byte, bool) { t.Helper() if updGF { if !updateGoldenFile(t, gfName, val) { return nil, false } } expVal, err := os.ReadFile(gfName) // nolint: gosec if err != nil { t.Log(id) t.Logf("\t: Problem with the golden file: %q", gfName) t.Errorf("\t: Couldn't read the expected value. Error: %s", err) return nil, false } return expVal, true } // checkFile confirms that the value given matches the contents of the golden // file and returns true if it does, false otherwise. It will report any // errors it finds including any problems reading from or writing to the // golden file itself. If the updGF flag is set to true then the golden file // will be updated with the supplied value. func checkFile(t *testing.T, id, gfName string, val []byte, updGF bool) bool { t.Helper() expVal, ok := getExpVal(t, id, gfName, val, updGF) if !ok { t.Errorf("\t: Actual\n" + string(val)) return false } return actEqualsExp(t, id, gfName, val, expVal) } // checkFile confirms that the value given matches the contents of the golden // file and returns true if it does, false otherwise. It will report any // errors it finds including any problems reading from or writing to the // golden file itself. If the updGF flag is set to true then the golden file // will be updated with the supplied value. func (gfc GoldenFileCfg) checkFile(t *testing.T, id, gfName string, val []byte) bool { t.Helper() expVal, ok := getExpVal(t, id, gfName, val, gfc.updFlag) if !ok { if gfc.UpdFlagName != "" { t.Errorf("\t: To update the golden file with the new value"+ " pass %q to the go test command", "-"+gfc.UpdFlagName) } t.Errorf("\t: Actual\n" + string(val)) return false }
} if gfc.UpdFlagName != "" { t.Errorf("\t: To update the golden file with the new value"+ " pass %q to the go test command", "-"+gfc.UpdFlagName) } if gfc.keepBadResultsFlag { keepBadResults(t, gfName, val) } else if gfc.KeepBadResultsFlagName != "" { t.Errorf("\t: To keep the (bad) Actual results for later"+ " investigation pass %q to the go test command", "-"+gfc.KeepBadResultsFlagName) } return false } // actEqualsExp compares the expected value against the actual and reports any // difference. It will return true if they are equal and false otherwise func actEqualsExp(t *testing.T, id, gfName string, actVal, expVal []byte) bool { t.Helper() if bytes.Equal(actVal, expVal) { return true } t.Log(id) t.Log("\t: Expected\n" + string(expVal)) t.Log("\t: Actual\n" + string(actVal)) t.Errorf("\t: The value given differs from the golden file value: %q", gfName) return false } // updateGoldenFile will attempt to update the golden file with the new // content and return true if it succeeds or false otherwise. If there is an // existing golden file it will try to preverve the contents so that they can // be compared with the new file. It reports its progress; if the file hasn't // changed it does nothing. func updateGoldenFile(t *testing.T, gfName string, val []byte) bool { t.Helper() origVal, err := os.ReadFile(gfName) // nolint: gosec if err == nil { if bytes.Equal(val, origVal) { return true } origFileName := gfName + ".orig" writeFile(t, origFileName, "original contents", origVal) } else if !os.IsNotExist(err) { t.Log("Couldn't preserve the original contents") t.Logf("\t: Couldn't read the golden file: %q", gfName) t.Error("\t: ", err) } if !writeFile(t, gfName, "golden", val) { return false } return true } // keepBadResults will attempt to write the bad results to a new file. func keepBadResults(t *testing.T, gfName string, val []byte) { t.Helper() fName := gfName + ".badResults" writeFile(t, fName, "bad results", val) } // writeFile will write the values into the file. If the parent directories // do not exist then it will create them and try again. func writeFile(t *testing.T, fName, desc string, val []byte) (rval bool) { t.Helper() rval = true var err error defer func() { if err != nil { t.Logf("\t: Couldn't write to the %s file", desc) t.Error("\t: ", err) rval = false } }() t.Logf("Updating/Creating the %s file: %q", desc, fName) err = os.WriteFile(fName, val, pBits) if os.IsNotExist(err) { dir := path.Dir(fName) if dir == "." { return } err = os.MkdirAll(dir, dirPBits) if err != nil { return } err = os.WriteFile(fName, val, pBits) } return }
if actEqualsExp(t, id, gfName, val, expVal) { return true
random_line_split
goldenFile.go
package testhelper import ( "bytes" "errors" "flag" "fmt" "os" "path" "path/filepath" "strings" "testing" ) const ( pBits = 0o644 dirPBits = 0o755 ) // AddUpdateFlag adds a new flag to the standard flag package. The flag is // used to control whether or not to update the Golden files with the new // values rather than reporting differences as test errors. If there is // already a Golden file present then this will be preserved in a file with // the same name as the Golden file but with ".orig" as a suffix. This can // then be set on the command line when testing and looked up by the // GoldenFileCfg.Check method. The Check method will report the flag name to // use if any is available. func (gfc *GoldenFileCfg) AddUpdateFlag() { if gfc.updFlagAdded { return } gfGlob := gfc.PathName("*") if gfc.UpdFlagName == "" { panic(errors.New( "AddUpdateFlag has been called for files in " + gfGlob + " but the GoldenFileCfg has no flag name set")) } flag.BoolVar(&gfc.updFlag, gfc.UpdFlagName, false, "set this flag to update the golden files in "+gfGlob) gfc.updFlagAdded = true } // AddKeepBadResultsFlag adds a new flag to the standard flag package. The // flag is used to control whether or not to keep the bad results in a // file. The name of the file will be the name of the Golden file with // ".badResults" as a suffix. These files can then be compared with the // Golden files do see what changes have been made . This can then be set on // the command line when testing and looked up by the GoldenFileCfg.Check // method. The Check method will report the flag name to use if any is // available. func (gfc *GoldenFileCfg) AddKeepBadResultsFlag() { if gfc.keepBadResultsFlagAdded { return } gfGlob := gfc.PathName("*") if gfc.KeepBadResultsFlagName == "" { panic(errors.New( "AddKeepBadResultsFlag has been called for files in " + gfGlob + " but the GoldenFileCfg has no flag name set")) } flag.BoolVar(&gfc.keepBadResultsFlag, gfc.KeepBadResultsFlagName, false, "set this flag to keep bad results in"+gfGlob) gfc.keepBadResultsFlagAdded = true } // GoldenFileCfg holds common configuration details for a collection of // golden files. It helps with consistent naming of golden files without // having to repeat common parts throughout the code. // // A golden file is a file that holds expected output (typically lengthy) // that can be compared as part of a test. It avoids the need to have a long // string in the body of a test. // // DirNames is a slice of strings holding the parts of the directory path // to the file // // Pfx is an optional prefix - leave it as an empty string to exclude it // // Sfx is an optional suffix - as for the prefix // // UpdFlagName is the name of a flag that will set a bool used to decide // whether or not to update the golden file. If it is not set then it is // ignored. If you have set this then you should also call the AddUpdateFlag // method (typically in an init() function) and then use the Check method // to compare with the file // // KeepBadResultsFlagName is the name of a flag that will set a bool used // to decide whether or not to keep bad results. If it is not set then it // is ignored. If you have set this then you should also call the // AddKeepBadResultsFlag method (typically in an init() function) and then // use the Check method to compare with the file type GoldenFileCfg struct { DirNames []string Pfx string Sfx string UpdFlagName string updFlag bool updFlagAdded bool KeepBadResultsFlagName string keepBadResultsFlag bool keepBadResultsFlagAdded bool } // Check confirms that the value given matches the contents of the golden // file and returns true if it does, false otherwise. It will report any // errors it finds including any problems reading from or writing to the // golden file itself. // // If UpdFlagName is not empty and the AddUpdateFlag method // has been called (typically in an init() function) then the corresponding // flag value will be looked up and if the flag is set to true the golden // file will be updated with the supplied value. You can set this value // through a command-line parameter to the test and then pass that to this // function as follows: // // gfc := testhelper.GoldenFileCfg{ // DirNames: []string{"testdata"}, // Pfx: "values", // Sfx: "txt", // UpdFlagName: "upd-gf", // } // // func init() { // gfc.AddUpdateFlag() // } // ... // gfc.Check(t, "my value test", t.Name(), val) // // Then to update the golden files you would invoke the test command as follows: // // go test -upd-gf // // Similarly with the KeepBadResultsFlag. // // Give the -v argument to go test to see what is being updated. // // An advantage of using this method (over using the // testhelper.CheckAgainstGoldenFile function) is that this will show the // name of the flag to use in order to update the files. You save the hassle // of scanning the code to find out what you called the flag. func (gfc GoldenFileCfg) Check(t *testing.T, id, gfName string, val []byte) bool { t.Helper() if gfc.UpdFlagName != "" && !gfc.updFlagAdded { panic(fmt.Errorf( "the name of the flag to update the golden files has been"+ " given (%q) but the flag has not been added."+ " You should call the AddUpdateFlag() method"+ " (typically in an init() function)", gfc.UpdFlagName)) } if gfc.KeepBadResultsFlagName != "" && !gfc.keepBadResultsFlagAdded { panic(fmt.Errorf( "the name of the flag to keep bad results has been"+ " given (%q) but the flag has not been added."+ " You should call the AddKeepBadResultsFlag() method"+ " (typically in an init() function)", gfc.KeepBadResultsFlagName)) } return gfc.checkFile(t, id, gfc.PathName(gfName), val) } // PathName will return the name of a golden file. It applies the directory // names and any prefix or suffix to the supplied string to give a well-formed // name using the appropriate filepath separators for the operating system. A // suggested name to pass to this method might be the name of the current // test as given by the Name() method on testing.T. // // Note that any supplied name is "cleaned" by removing any part prior to an // embedded filepath.Separator. func (gfc GoldenFileCfg) PathName(name string) string { fNameParts := make([]string, 0, 3) if gfc.Pfx != "" { fNameParts = append(fNameParts, gfc.Pfx) } fNameParts = append(fNameParts, filepath.Base(name)) if gfc.Sfx != "" { fNameParts = append(fNameParts, gfc.Sfx) } fName := strings.Join(fNameParts, ".") pathParts := make([]string, 0, len(gfc.DirNames)+1) pathParts = append(pathParts, gfc.DirNames...) pathParts = append(pathParts, fName) return filepath.Join(pathParts...) } // CheckAgainstGoldenFile confirms that the value given matches the contents // of the golden file and returns true if it does, false otherwise. It will // report any errors it finds including any problems reading from or writing // to the golden file itself. If the updGF flag is set to true then the // golden file will be updated with the supplied value. You can set this // value through a command-line parameter to the test and then pass that to // this function as follows // // var upd = flag.Bool("upd-gf", false, "update the golden files") // gfc := testhelper.GoldenFileCfg{ // DirNames: []string{"testdata"}, // Pfx: "values", // Sfx: "txt", // } // ... // testhelper.CheckAgainstGoldenFile(t, // "my value test", // val, // gfc.PathName(t.Name()), // *upd) // // Then to update the golden files you would invoke the test command as follows // // go test -upd-gf // // Give the -v argument to go test to see what is being updated. // // Deprecated: use the Check method on the GoldenFileCfg func
(t *testing.T, testID string, val []byte, gfName string, updGF bool) bool { t.Helper() return checkFile(t, testID, gfName, val, updGF) } // getExpVal reads the contents of the golden file. If the updGF flag is set // then if will write the contents of the file before reading it. It returns // the contents and true if all went well, nil and false otherwise. It will // report any errors it finds including any problems reading from or writing // to the golden file itself. func getExpVal(t *testing.T, id, gfName string, val []byte, updGF bool) ([]byte, bool) { t.Helper() if updGF { if !updateGoldenFile(t, gfName, val) { return nil, false } } expVal, err := os.ReadFile(gfName) // nolint: gosec if err != nil { t.Log(id) t.Logf("\t: Problem with the golden file: %q", gfName) t.Errorf("\t: Couldn't read the expected value. Error: %s", err) return nil, false } return expVal, true } // checkFile confirms that the value given matches the contents of the golden // file and returns true if it does, false otherwise. It will report any // errors it finds including any problems reading from or writing to the // golden file itself. If the updGF flag is set to true then the golden file // will be updated with the supplied value. func checkFile(t *testing.T, id, gfName string, val []byte, updGF bool) bool { t.Helper() expVal, ok := getExpVal(t, id, gfName, val, updGF) if !ok { t.Errorf("\t: Actual\n" + string(val)) return false } return actEqualsExp(t, id, gfName, val, expVal) } // checkFile confirms that the value given matches the contents of the golden // file and returns true if it does, false otherwise. It will report any // errors it finds including any problems reading from or writing to the // golden file itself. If the updGF flag is set to true then the golden file // will be updated with the supplied value. func (gfc GoldenFileCfg) checkFile(t *testing.T, id, gfName string, val []byte) bool { t.Helper() expVal, ok := getExpVal(t, id, gfName, val, gfc.updFlag) if !ok { if gfc.UpdFlagName != "" { t.Errorf("\t: To update the golden file with the new value"+ " pass %q to the go test command", "-"+gfc.UpdFlagName) } t.Errorf("\t: Actual\n" + string(val)) return false } if actEqualsExp(t, id, gfName, val, expVal) { return true } if gfc.UpdFlagName != "" { t.Errorf("\t: To update the golden file with the new value"+ " pass %q to the go test command", "-"+gfc.UpdFlagName) } if gfc.keepBadResultsFlag { keepBadResults(t, gfName, val) } else if gfc.KeepBadResultsFlagName != "" { t.Errorf("\t: To keep the (bad) Actual results for later"+ " investigation pass %q to the go test command", "-"+gfc.KeepBadResultsFlagName) } return false } // actEqualsExp compares the expected value against the actual and reports any // difference. It will return true if they are equal and false otherwise func actEqualsExp(t *testing.T, id, gfName string, actVal, expVal []byte) bool { t.Helper() if bytes.Equal(actVal, expVal) { return true } t.Log(id) t.Log("\t: Expected\n" + string(expVal)) t.Log("\t: Actual\n" + string(actVal)) t.Errorf("\t: The value given differs from the golden file value: %q", gfName) return false } // updateGoldenFile will attempt to update the golden file with the new // content and return true if it succeeds or false otherwise. If there is an // existing golden file it will try to preverve the contents so that they can // be compared with the new file. It reports its progress; if the file hasn't // changed it does nothing. func updateGoldenFile(t *testing.T, gfName string, val []byte) bool { t.Helper() origVal, err := os.ReadFile(gfName) // nolint: gosec if err == nil { if bytes.Equal(val, origVal) { return true } origFileName := gfName + ".orig" writeFile(t, origFileName, "original contents", origVal) } else if !os.IsNotExist(err) { t.Log("Couldn't preserve the original contents") t.Logf("\t: Couldn't read the golden file: %q", gfName) t.Error("\t: ", err) } if !writeFile(t, gfName, "golden", val) { return false } return true } // keepBadResults will attempt to write the bad results to a new file. func keepBadResults(t *testing.T, gfName string, val []byte) { t.Helper() fName := gfName + ".badResults" writeFile(t, fName, "bad results", val) } // writeFile will write the values into the file. If the parent directories // do not exist then it will create them and try again. func writeFile(t *testing.T, fName, desc string, val []byte) (rval bool) { t.Helper() rval = true var err error defer func() { if err != nil { t.Logf("\t: Couldn't write to the %s file", desc) t.Error("\t: ", err) rval = false } }() t.Logf("Updating/Creating the %s file: %q", desc, fName) err = os.WriteFile(fName, val, pBits) if os.IsNotExist(err) { dir := path.Dir(fName) if dir == "." { return } err = os.MkdirAll(dir, dirPBits) if err != nil { return } err = os.WriteFile(fName, val, pBits) } return }
CheckAgainstGoldenFile
identifier_name
goldenFile.go
package testhelper import ( "bytes" "errors" "flag" "fmt" "os" "path" "path/filepath" "strings" "testing" ) const ( pBits = 0o644 dirPBits = 0o755 ) // AddUpdateFlag adds a new flag to the standard flag package. The flag is // used to control whether or not to update the Golden files with the new // values rather than reporting differences as test errors. If there is // already a Golden file present then this will be preserved in a file with // the same name as the Golden file but with ".orig" as a suffix. This can // then be set on the command line when testing and looked up by the // GoldenFileCfg.Check method. The Check method will report the flag name to // use if any is available. func (gfc *GoldenFileCfg) AddUpdateFlag() { if gfc.updFlagAdded { return } gfGlob := gfc.PathName("*") if gfc.UpdFlagName == "" { panic(errors.New( "AddUpdateFlag has been called for files in " + gfGlob + " but the GoldenFileCfg has no flag name set")) } flag.BoolVar(&gfc.updFlag, gfc.UpdFlagName, false, "set this flag to update the golden files in "+gfGlob) gfc.updFlagAdded = true } // AddKeepBadResultsFlag adds a new flag to the standard flag package. The // flag is used to control whether or not to keep the bad results in a // file. The name of the file will be the name of the Golden file with // ".badResults" as a suffix. These files can then be compared with the // Golden files do see what changes have been made . This can then be set on // the command line when testing and looked up by the GoldenFileCfg.Check // method. The Check method will report the flag name to use if any is // available. func (gfc *GoldenFileCfg) AddKeepBadResultsFlag() { if gfc.keepBadResultsFlagAdded { return } gfGlob := gfc.PathName("*") if gfc.KeepBadResultsFlagName == "" { panic(errors.New( "AddKeepBadResultsFlag has been called for files in " + gfGlob + " but the GoldenFileCfg has no flag name set")) } flag.BoolVar(&gfc.keepBadResultsFlag, gfc.KeepBadResultsFlagName, false, "set this flag to keep bad results in"+gfGlob) gfc.keepBadResultsFlagAdded = true } // GoldenFileCfg holds common configuration details for a collection of // golden files. It helps with consistent naming of golden files without // having to repeat common parts throughout the code. // // A golden file is a file that holds expected output (typically lengthy) // that can be compared as part of a test. It avoids the need to have a long // string in the body of a test. // // DirNames is a slice of strings holding the parts of the directory path // to the file // // Pfx is an optional prefix - leave it as an empty string to exclude it // // Sfx is an optional suffix - as for the prefix // // UpdFlagName is the name of a flag that will set a bool used to decide // whether or not to update the golden file. If it is not set then it is // ignored. If you have set this then you should also call the AddUpdateFlag // method (typically in an init() function) and then use the Check method // to compare with the file // // KeepBadResultsFlagName is the name of a flag that will set a bool used // to decide whether or not to keep bad results. If it is not set then it // is ignored. If you have set this then you should also call the // AddKeepBadResultsFlag method (typically in an init() function) and then // use the Check method to compare with the file type GoldenFileCfg struct { DirNames []string Pfx string Sfx string UpdFlagName string updFlag bool updFlagAdded bool KeepBadResultsFlagName string keepBadResultsFlag bool keepBadResultsFlagAdded bool } // Check confirms that the value given matches the contents of the golden // file and returns true if it does, false otherwise. It will report any // errors it finds including any problems reading from or writing to the // golden file itself. // // If UpdFlagName is not empty and the AddUpdateFlag method // has been called (typically in an init() function) then the corresponding // flag value will be looked up and if the flag is set to true the golden // file will be updated with the supplied value. You can set this value // through a command-line parameter to the test and then pass that to this // function as follows: // // gfc := testhelper.GoldenFileCfg{ // DirNames: []string{"testdata"}, // Pfx: "values", // Sfx: "txt", // UpdFlagName: "upd-gf", // } // // func init() { // gfc.AddUpdateFlag() // } // ... // gfc.Check(t, "my value test", t.Name(), val) // // Then to update the golden files you would invoke the test command as follows: // // go test -upd-gf // // Similarly with the KeepBadResultsFlag. // // Give the -v argument to go test to see what is being updated. // // An advantage of using this method (over using the // testhelper.CheckAgainstGoldenFile function) is that this will show the // name of the flag to use in order to update the files. You save the hassle // of scanning the code to find out what you called the flag. func (gfc GoldenFileCfg) Check(t *testing.T, id, gfName string, val []byte) bool { t.Helper() if gfc.UpdFlagName != "" && !gfc.updFlagAdded { panic(fmt.Errorf( "the name of the flag to update the golden files has been"+ " given (%q) but the flag has not been added."+ " You should call the AddUpdateFlag() method"+ " (typically in an init() function)", gfc.UpdFlagName)) } if gfc.KeepBadResultsFlagName != "" && !gfc.keepBadResultsFlagAdded { panic(fmt.Errorf( "the name of the flag to keep bad results has been"+ " given (%q) but the flag has not been added."+ " You should call the AddKeepBadResultsFlag() method"+ " (typically in an init() function)", gfc.KeepBadResultsFlagName)) } return gfc.checkFile(t, id, gfc.PathName(gfName), val) } // PathName will return the name of a golden file. It applies the directory // names and any prefix or suffix to the supplied string to give a well-formed // name using the appropriate filepath separators for the operating system. A // suggested name to pass to this method might be the name of the current // test as given by the Name() method on testing.T. // // Note that any supplied name is "cleaned" by removing any part prior to an // embedded filepath.Separator. func (gfc GoldenFileCfg) PathName(name string) string { fNameParts := make([]string, 0, 3) if gfc.Pfx != "" { fNameParts = append(fNameParts, gfc.Pfx) } fNameParts = append(fNameParts, filepath.Base(name)) if gfc.Sfx != "" { fNameParts = append(fNameParts, gfc.Sfx) } fName := strings.Join(fNameParts, ".") pathParts := make([]string, 0, len(gfc.DirNames)+1) pathParts = append(pathParts, gfc.DirNames...) pathParts = append(pathParts, fName) return filepath.Join(pathParts...) } // CheckAgainstGoldenFile confirms that the value given matches the contents // of the golden file and returns true if it does, false otherwise. It will // report any errors it finds including any problems reading from or writing // to the golden file itself. If the updGF flag is set to true then the // golden file will be updated with the supplied value. You can set this // value through a command-line parameter to the test and then pass that to // this function as follows // // var upd = flag.Bool("upd-gf", false, "update the golden files") // gfc := testhelper.GoldenFileCfg{ // DirNames: []string{"testdata"}, // Pfx: "values", // Sfx: "txt", // } // ... // testhelper.CheckAgainstGoldenFile(t, // "my value test", // val, // gfc.PathName(t.Name()), // *upd) // // Then to update the golden files you would invoke the test command as follows // // go test -upd-gf // // Give the -v argument to go test to see what is being updated. // // Deprecated: use the Check method on the GoldenFileCfg func CheckAgainstGoldenFile(t *testing.T, testID string, val []byte, gfName string, updGF bool) bool { t.Helper() return checkFile(t, testID, gfName, val, updGF) } // getExpVal reads the contents of the golden file. If the updGF flag is set // then if will write the contents of the file before reading it. It returns // the contents and true if all went well, nil and false otherwise. It will // report any errors it finds including any problems reading from or writing // to the golden file itself. func getExpVal(t *testing.T, id, gfName string, val []byte, updGF bool) ([]byte, bool) { t.Helper() if updGF { if !updateGoldenFile(t, gfName, val) { return nil, false } } expVal, err := os.ReadFile(gfName) // nolint: gosec if err != nil
return expVal, true } // checkFile confirms that the value given matches the contents of the golden // file and returns true if it does, false otherwise. It will report any // errors it finds including any problems reading from or writing to the // golden file itself. If the updGF flag is set to true then the golden file // will be updated with the supplied value. func checkFile(t *testing.T, id, gfName string, val []byte, updGF bool) bool { t.Helper() expVal, ok := getExpVal(t, id, gfName, val, updGF) if !ok { t.Errorf("\t: Actual\n" + string(val)) return false } return actEqualsExp(t, id, gfName, val, expVal) } // checkFile confirms that the value given matches the contents of the golden // file and returns true if it does, false otherwise. It will report any // errors it finds including any problems reading from or writing to the // golden file itself. If the updGF flag is set to true then the golden file // will be updated with the supplied value. func (gfc GoldenFileCfg) checkFile(t *testing.T, id, gfName string, val []byte) bool { t.Helper() expVal, ok := getExpVal(t, id, gfName, val, gfc.updFlag) if !ok { if gfc.UpdFlagName != "" { t.Errorf("\t: To update the golden file with the new value"+ " pass %q to the go test command", "-"+gfc.UpdFlagName) } t.Errorf("\t: Actual\n" + string(val)) return false } if actEqualsExp(t, id, gfName, val, expVal) { return true } if gfc.UpdFlagName != "" { t.Errorf("\t: To update the golden file with the new value"+ " pass %q to the go test command", "-"+gfc.UpdFlagName) } if gfc.keepBadResultsFlag { keepBadResults(t, gfName, val) } else if gfc.KeepBadResultsFlagName != "" { t.Errorf("\t: To keep the (bad) Actual results for later"+ " investigation pass %q to the go test command", "-"+gfc.KeepBadResultsFlagName) } return false } // actEqualsExp compares the expected value against the actual and reports any // difference. It will return true if they are equal and false otherwise func actEqualsExp(t *testing.T, id, gfName string, actVal, expVal []byte) bool { t.Helper() if bytes.Equal(actVal, expVal) { return true } t.Log(id) t.Log("\t: Expected\n" + string(expVal)) t.Log("\t: Actual\n" + string(actVal)) t.Errorf("\t: The value given differs from the golden file value: %q", gfName) return false } // updateGoldenFile will attempt to update the golden file with the new // content and return true if it succeeds or false otherwise. If there is an // existing golden file it will try to preverve the contents so that they can // be compared with the new file. It reports its progress; if the file hasn't // changed it does nothing. func updateGoldenFile(t *testing.T, gfName string, val []byte) bool { t.Helper() origVal, err := os.ReadFile(gfName) // nolint: gosec if err == nil { if bytes.Equal(val, origVal) { return true } origFileName := gfName + ".orig" writeFile(t, origFileName, "original contents", origVal) } else if !os.IsNotExist(err) { t.Log("Couldn't preserve the original contents") t.Logf("\t: Couldn't read the golden file: %q", gfName) t.Error("\t: ", err) } if !writeFile(t, gfName, "golden", val) { return false } return true } // keepBadResults will attempt to write the bad results to a new file. func keepBadResults(t *testing.T, gfName string, val []byte) { t.Helper() fName := gfName + ".badResults" writeFile(t, fName, "bad results", val) } // writeFile will write the values into the file. If the parent directories // do not exist then it will create them and try again. func writeFile(t *testing.T, fName, desc string, val []byte) (rval bool) { t.Helper() rval = true var err error defer func() { if err != nil { t.Logf("\t: Couldn't write to the %s file", desc) t.Error("\t: ", err) rval = false } }() t.Logf("Updating/Creating the %s file: %q", desc, fName) err = os.WriteFile(fName, val, pBits) if os.IsNotExist(err) { dir := path.Dir(fName) if dir == "." { return } err = os.MkdirAll(dir, dirPBits) if err != nil { return } err = os.WriteFile(fName, val, pBits) } return }
{ t.Log(id) t.Logf("\t: Problem with the golden file: %q", gfName) t.Errorf("\t: Couldn't read the expected value. Error: %s", err) return nil, false }
conditional_block
record_io.py
import numpy as np import os import matplotlib import matplotlib.pyplot as plt import tensorflow as tf from object_detection.utils import label_map_util from object_detection.utils import dataset_util tf.compat.v1.enable_eager_execution() from PIL import Image import io from matplotlib.patches import Rectangle # --------------------------------------------------------------------------------------------------------------------- # Creating Record Files # --------------------------------------------------------------------------------------------------------------------- def _create_record_file_path(path): # clear the otuput path flag and set flags = tf.app.flags FLAGS = flags.FLAGS try: FLAGS.__delattr__('output_path') FLAGS.__delattr__('f') except: pass tf.app.flags.DEFINE_string('f', '', 'kernel') flags.DEFINE_string('output_path', path, '') FLAGS = flags.FLAGS print("New record file : {}".format(flags.FLAGS.output_path)) return tf.app.flags.FLAGS.output_path def create_record_file(image_path, output_path, examples_dict, class_to_index): output_path = _create_record_file_path(output_path) writer = tf.python_io.TFRecordWriter(output_path) for key, val in examples_dict.items(): example = val example["filename"] = key tf_example = create_tf_example(example, image_path, class_to_index) writer.write(tf_example.SerializeToString()) writer.close() print("Wrote {} examples".format(len(examples_dict))) def create_tf_example(example, path, class_mapping): """ Create a single Tensorflow Example object to be used in creating record Parameters ---------- example : dict A single object; the dictionary should contains the keys "filename" referring to the jpg containing the object, and "box_coords" which gives the location of the object, and "class" the name of the object path : str The path to the image files. Returns ------- The tf Example object """ path = (path + os.sep).encode('ascii') filename = example['filename'].encode('ascii') image_format = b'jpg' image = plt.imread(path +filename, "jpg") height, width = image.shape[:2] # Encode the jpg to byte form with tf.gfile.GFile(path+filename, 'rb') as fid: encoded_jpg = bytes(fid.read()) # normalize the box coordinates xmins = [box[0]/width for box in example['box_coords']] ymins = [box[1]/height for box in example['box_coords']] xmaxs = [box[2]/width for box in example['box_coords']] ymaxs = [box[3]/height for box in example['box_coords']] classes_text = [cls.encode('ascii') for cls in example["class"]] classes = [class_mapping[cls] for cls in example["class"]] # create the example tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height' : dataset_util.int64_feature(height), 'image/width' : dataset_util.int64_feature(width), 'image/filename' : dataset_util.bytes_feature(filename), 'image/source_id' : dataset_util.bytes_feature(filename), 'image/encoded' : dataset_util.bytes_feature(encoded_jpg), 'image/format' : dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin' : dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax' : dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin' : dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax' : dataset_util.float_list_feature(ymaxs), 'image/object/class/text' : dataset_util.bytes_list_feature(classes_text), 'image/object/class/label' : dataset_util.int64_list_feature(classes), })) return tf_example # --------------------------------------------------------------------------------------------------------------------- # Diagnostics # --------------------------------------------------------------------------------------------------------------------- def peek_in_record(path, plot=True):
# --------------------------------------------------------------------------------------------------------------------- # Reading Record Files # --------------------------------------------------------------------------------------------------------------------- def load_tf_record_file(path): for example in tf.python_io.tf_record_iterator(path): yield tf.train.SequenceExample.FromString(example) def read_record_file(path, index_to_class, return_dict=True, plot=True, **plot_kwargs): record = dict() for result in load_tf_record_file(path): fname = str(result.context.feature['image/filename'].bytes_list.value[0], "utf-8") width = result.context.feature['image/width'].int64_list.value[0] height = result.context.feature['image/height'].int64_list.value[0] data = result.context.feature['image/encoded'].bytes_list.value[0] img = Image.open(io.BytesIO(data), mode="r") img = np.asarray(img) xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value) ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value) xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value) ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value) xmins *= width xmaxs *= width ymins *= height ymaxs *= height labels = np.array(result.context.feature['image/object/class/label'].int64_list.value, dtype=int) if return_dict: record[fname] = dict() record[fname]["width"] = width record[fname]["height"] = height record[fname]["image"] = img record[fname]["xmins"] = xmins record[fname]["xmaxs"] = xmaxs record[fname]["ymins"] = ymins record[fname]["ymaxs"] = ymaxs if plot: fig, ax = plt.subplots(1,1,figsize=plot_kwargs.get("figsize",(8,8))) ax.imshow(np.asarray(img)) coords = [] for xmin, ymin, xmax, ymax in zip(xmins, ymins, xmaxs, ymaxs): coord = [xmin, ymin, xmax, ymax] coords +=[coord] ax.add_patch(Rectangle(xy=(coord[0], coord[1]), width=coord[2]-coord[0], height=coord[3]-coord[1], fill=None, color="r")) num = len(labels) x_coords = [1.1]*num y_coords = [i/num for i in range(num)] for (x,y,s, coord) in zip(x_coords, y_coords, labels, coords): text = index_to_class[s] ax.text(x*width,y*height,text, fontsize=16) ax.plot([coord[2],x*width*0.99],[coord[1],y*height], linestyle=":", color="r") ax.set_title(fname.replace("_", "-"), fontsize=18) ax.axis('off') plt.show() return record
objects = dict() obj_per_img = [] obj_shapes = [] img_shapes = [] total_images = 0 for result in load_tf_record_file(path): total_images += 1 names = result.context.feature['image/object/class/text'].bytes_list.value for name in names: if name not in objects: objects[name] = 1 else: objects[name] += 1 obj_per_img += [len(names)] width = result.context.feature['image/width'].int64_list.value[0] height = result.context.feature['image/height'].int64_list.value[0] xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value) ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value) xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value) ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value) xmins *= width xmaxs *= width ymins *= height ymaxs *= height img_shapes += [[height, width]] for xmin, ymin, xmax, ymax, name in zip(xmins, ymins, xmaxs, ymaxs, names): obj_shapes += [[ymax-ymin, xmax-xmin]] if (ymin < 0) or (ymax > height) or (xmin < 0) or (xmax > width): print("WARNING : Object {} outisde of image region".format(name)) total_objects = sum(objects.values()) obj_shapes = np.array(obj_shapes) img_shapes = np.array(img_shapes) print("="*100) print("Total Images : {0}".format(total_images)) print("Total Objects : {0}".format(total_objects)) print("Ave. Objects per Image : {0}".format(total_objects/total_images)) print("Classes : {0}".format(len(objects))) print("="*100) if plot: fig, axes = plt.subplots(2,2,figsize=(12,12)) ax = axes[1,0] ax.scatter(obj_shapes[:,1], obj_shapes[:,0]) ax.set_ylim([0.9*obj_shapes[:,0].min(), 1.1*obj_shapes[:,0].max()]) ax.set_xlim([0.9*obj_shapes[:,1].min(), 1.1*obj_shapes[:,1].max()]) ax.set_xlabel("Width", fontsize=18) ax.set_ylabel("Height", fontsize=18) ax.set_title("Object Shapes", fontsize=18) ax = axes[1,1] ax.scatter(img_shapes[:,1], img_shapes[:,0]) ax.set_ylim([0.9*img_shapes[:,0].min(), 1.1*img_shapes[:,0].max()]) ax.set_xlim([0.9*img_shapes[:,1].min(), 1.1*img_shapes[:,1].max()]) ax.set_xlabel("Width", fontsize=18) ax.set_ylabel("Height", fontsize=18) ax.set_title("Image Shapes", fontsize=18) ax = axes[0,1] ax.hist(obj_per_img, bins = np.arange(0.5,max(obj_per_img)+1.5,1),density=1.0) ax.set_xlabel("Objects per image", fontsize=18) ax = axes[0,0] labels = [x.decode() for x in objects.keys()] sizes = list(objects.values()) ax.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=False, startangle=90) ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show()
identifier_body
record_io.py
import numpy as np import os import matplotlib import matplotlib.pyplot as plt import tensorflow as tf from object_detection.utils import label_map_util from object_detection.utils import dataset_util tf.compat.v1.enable_eager_execution() from PIL import Image import io from matplotlib.patches import Rectangle # --------------------------------------------------------------------------------------------------------------------- # Creating Record Files # --------------------------------------------------------------------------------------------------------------------- def _create_record_file_path(path): # clear the otuput path flag and set flags = tf.app.flags FLAGS = flags.FLAGS try: FLAGS.__delattr__('output_path') FLAGS.__delattr__('f') except: pass tf.app.flags.DEFINE_string('f', '', 'kernel') flags.DEFINE_string('output_path', path, '') FLAGS = flags.FLAGS print("New record file : {}".format(flags.FLAGS.output_path)) return tf.app.flags.FLAGS.output_path def create_record_file(image_path, output_path, examples_dict, class_to_index): output_path = _create_record_file_path(output_path) writer = tf.python_io.TFRecordWriter(output_path) for key, val in examples_dict.items(): example = val example["filename"] = key tf_example = create_tf_example(example, image_path, class_to_index) writer.write(tf_example.SerializeToString()) writer.close() print("Wrote {} examples".format(len(examples_dict))) def create_tf_example(example, path, class_mapping): """ Create a single Tensorflow Example object to be used in creating record Parameters ---------- example : dict A single object; the dictionary should contains the keys "filename" referring to the jpg containing the object, and "box_coords" which gives the location of the object, and "class" the name of the object path : str The path to the image files. Returns ------- The tf Example object """ path = (path + os.sep).encode('ascii') filename = example['filename'].encode('ascii') image_format = b'jpg' image = plt.imread(path +filename, "jpg") height, width = image.shape[:2] # Encode the jpg to byte form with tf.gfile.GFile(path+filename, 'rb') as fid: encoded_jpg = bytes(fid.read()) # normalize the box coordinates xmins = [box[0]/width for box in example['box_coords']] ymins = [box[1]/height for box in example['box_coords']] xmaxs = [box[2]/width for box in example['box_coords']] ymaxs = [box[3]/height for box in example['box_coords']] classes_text = [cls.encode('ascii') for cls in example["class"]] classes = [class_mapping[cls] for cls in example["class"]] # create the example tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height' : dataset_util.int64_feature(height), 'image/width' : dataset_util.int64_feature(width), 'image/filename' : dataset_util.bytes_feature(filename), 'image/source_id' : dataset_util.bytes_feature(filename), 'image/encoded' : dataset_util.bytes_feature(encoded_jpg), 'image/format' : dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin' : dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax' : dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin' : dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax' : dataset_util.float_list_feature(ymaxs), 'image/object/class/text' : dataset_util.bytes_list_feature(classes_text), 'image/object/class/label' : dataset_util.int64_list_feature(classes), })) return tf_example # --------------------------------------------------------------------------------------------------------------------- # Diagnostics # --------------------------------------------------------------------------------------------------------------------- def peek_in_record(path, plot=True): objects = dict() obj_per_img = [] obj_shapes = [] img_shapes = [] total_images = 0 for result in load_tf_record_file(path): total_images += 1 names = result.context.feature['image/object/class/text'].bytes_list.value for name in names: if name not in objects: objects[name] = 1 else: objects[name] += 1 obj_per_img += [len(names)] width = result.context.feature['image/width'].int64_list.value[0] height = result.context.feature['image/height'].int64_list.value[0] xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value) ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value) xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value) ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value) xmins *= width xmaxs *= width ymins *= height ymaxs *= height img_shapes += [[height, width]] for xmin, ymin, xmax, ymax, name in zip(xmins, ymins, xmaxs, ymaxs, names): obj_shapes += [[ymax-ymin, xmax-xmin]] if (ymin < 0) or (ymax > height) or (xmin < 0) or (xmax > width): print("WARNING : Object {} outisde of image region".format(name)) total_objects = sum(objects.values()) obj_shapes = np.array(obj_shapes) img_shapes = np.array(img_shapes) print("="*100) print("Total Images : {0}".format(total_images)) print("Total Objects : {0}".format(total_objects)) print("Ave. Objects per Image : {0}".format(total_objects/total_images)) print("Classes : {0}".format(len(objects))) print("="*100) if plot: fig, axes = plt.subplots(2,2,figsize=(12,12)) ax = axes[1,0] ax.scatter(obj_shapes[:,1], obj_shapes[:,0]) ax.set_ylim([0.9*obj_shapes[:,0].min(), 1.1*obj_shapes[:,0].max()]) ax.set_xlim([0.9*obj_shapes[:,1].min(), 1.1*obj_shapes[:,1].max()]) ax.set_xlabel("Width", fontsize=18) ax.set_ylabel("Height", fontsize=18) ax.set_title("Object Shapes", fontsize=18) ax = axes[1,1] ax.scatter(img_shapes[:,1], img_shapes[:,0]) ax.set_ylim([0.9*img_shapes[:,0].min(), 1.1*img_shapes[:,0].max()]) ax.set_xlim([0.9*img_shapes[:,1].min(), 1.1*img_shapes[:,1].max()]) ax.set_xlabel("Width", fontsize=18) ax.set_ylabel("Height", fontsize=18) ax.set_title("Image Shapes", fontsize=18) ax = axes[0,1] ax.hist(obj_per_img, bins = np.arange(0.5,max(obj_per_img)+1.5,1),density=1.0) ax.set_xlabel("Objects per image", fontsize=18) ax = axes[0,0] labels = [x.decode() for x in objects.keys()] sizes = list(objects.values()) ax.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=False, startangle=90) ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() # --------------------------------------------------------------------------------------------------------------------- # Reading Record Files # --------------------------------------------------------------------------------------------------------------------- def load_tf_record_file(path): for example in tf.python_io.tf_record_iterator(path): yield tf.train.SequenceExample.FromString(example) def
(path, index_to_class, return_dict=True, plot=True, **plot_kwargs): record = dict() for result in load_tf_record_file(path): fname = str(result.context.feature['image/filename'].bytes_list.value[0], "utf-8") width = result.context.feature['image/width'].int64_list.value[0] height = result.context.feature['image/height'].int64_list.value[0] data = result.context.feature['image/encoded'].bytes_list.value[0] img = Image.open(io.BytesIO(data), mode="r") img = np.asarray(img) xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value) ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value) xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value) ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value) xmins *= width xmaxs *= width ymins *= height ymaxs *= height labels = np.array(result.context.feature['image/object/class/label'].int64_list.value, dtype=int) if return_dict: record[fname] = dict() record[fname]["width"] = width record[fname]["height"] = height record[fname]["image"] = img record[fname]["xmins"] = xmins record[fname]["xmaxs"] = xmaxs record[fname]["ymins"] = ymins record[fname]["ymaxs"] = ymaxs if plot: fig, ax = plt.subplots(1,1,figsize=plot_kwargs.get("figsize",(8,8))) ax.imshow(np.asarray(img)) coords = [] for xmin, ymin, xmax, ymax in zip(xmins, ymins, xmaxs, ymaxs): coord = [xmin, ymin, xmax, ymax] coords +=[coord] ax.add_patch(Rectangle(xy=(coord[0], coord[1]), width=coord[2]-coord[0], height=coord[3]-coord[1], fill=None, color="r")) num = len(labels) x_coords = [1.1]*num y_coords = [i/num for i in range(num)] for (x,y,s, coord) in zip(x_coords, y_coords, labels, coords): text = index_to_class[s] ax.text(x*width,y*height,text, fontsize=16) ax.plot([coord[2],x*width*0.99],[coord[1],y*height], linestyle=":", color="r") ax.set_title(fname.replace("_", "-"), fontsize=18) ax.axis('off') plt.show() return record
read_record_file
identifier_name
record_io.py
import numpy as np import os import matplotlib import matplotlib.pyplot as plt import tensorflow as tf from object_detection.utils import label_map_util from object_detection.utils import dataset_util tf.compat.v1.enable_eager_execution() from PIL import Image import io from matplotlib.patches import Rectangle # --------------------------------------------------------------------------------------------------------------------- # Creating Record Files # --------------------------------------------------------------------------------------------------------------------- def _create_record_file_path(path): # clear the otuput path flag and set flags = tf.app.flags FLAGS = flags.FLAGS try: FLAGS.__delattr__('output_path') FLAGS.__delattr__('f') except: pass tf.app.flags.DEFINE_string('f', '', 'kernel') flags.DEFINE_string('output_path', path, '') FLAGS = flags.FLAGS print("New record file : {}".format(flags.FLAGS.output_path)) return tf.app.flags.FLAGS.output_path def create_record_file(image_path, output_path, examples_dict, class_to_index): output_path = _create_record_file_path(output_path) writer = tf.python_io.TFRecordWriter(output_path) for key, val in examples_dict.items(): example = val example["filename"] = key tf_example = create_tf_example(example, image_path, class_to_index) writer.write(tf_example.SerializeToString()) writer.close() print("Wrote {} examples".format(len(examples_dict))) def create_tf_example(example, path, class_mapping): """ Create a single Tensorflow Example object to be used in creating record Parameters ---------- example : dict A single object; the dictionary should contains the keys "filename" referring to the jpg containing the object, and "box_coords" which gives the location of the object, and "class" the name of the object path : str The path to the image files. Returns ------- The tf Example object """ path = (path + os.sep).encode('ascii') filename = example['filename'].encode('ascii') image_format = b'jpg' image = plt.imread(path +filename, "jpg") height, width = image.shape[:2] # Encode the jpg to byte form with tf.gfile.GFile(path+filename, 'rb') as fid: encoded_jpg = bytes(fid.read()) # normalize the box coordinates xmins = [box[0]/width for box in example['box_coords']] ymins = [box[1]/height for box in example['box_coords']] xmaxs = [box[2]/width for box in example['box_coords']] ymaxs = [box[3]/height for box in example['box_coords']] classes_text = [cls.encode('ascii') for cls in example["class"]] classes = [class_mapping[cls] for cls in example["class"]] # create the example tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height' : dataset_util.int64_feature(height), 'image/width' : dataset_util.int64_feature(width), 'image/filename' : dataset_util.bytes_feature(filename), 'image/source_id' : dataset_util.bytes_feature(filename), 'image/encoded' : dataset_util.bytes_feature(encoded_jpg), 'image/format' : dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin' : dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax' : dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin' : dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax' : dataset_util.float_list_feature(ymaxs), 'image/object/class/text' : dataset_util.bytes_list_feature(classes_text), 'image/object/class/label' : dataset_util.int64_list_feature(classes), })) return tf_example # --------------------------------------------------------------------------------------------------------------------- # Diagnostics # --------------------------------------------------------------------------------------------------------------------- def peek_in_record(path, plot=True): objects = dict() obj_per_img = [] obj_shapes = [] img_shapes = [] total_images = 0 for result in load_tf_record_file(path): total_images += 1 names = result.context.feature['image/object/class/text'].bytes_list.value for name in names: if name not in objects: objects[name] = 1 else: objects[name] += 1 obj_per_img += [len(names)] width = result.context.feature['image/width'].int64_list.value[0] height = result.context.feature['image/height'].int64_list.value[0] xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value) ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value) xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value) ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value) xmins *= width xmaxs *= width ymins *= height ymaxs *= height img_shapes += [[height, width]] for xmin, ymin, xmax, ymax, name in zip(xmins, ymins, xmaxs, ymaxs, names): obj_shapes += [[ymax-ymin, xmax-xmin]] if (ymin < 0) or (ymax > height) or (xmin < 0) or (xmax > width): print("WARNING : Object {} outisde of image region".format(name)) total_objects = sum(objects.values()) obj_shapes = np.array(obj_shapes) img_shapes = np.array(img_shapes) print("="*100) print("Total Images : {0}".format(total_images)) print("Total Objects : {0}".format(total_objects)) print("Ave. Objects per Image : {0}".format(total_objects/total_images)) print("Classes : {0}".format(len(objects))) print("="*100) if plot: fig, axes = plt.subplots(2,2,figsize=(12,12)) ax = axes[1,0] ax.scatter(obj_shapes[:,1], obj_shapes[:,0]) ax.set_ylim([0.9*obj_shapes[:,0].min(), 1.1*obj_shapes[:,0].max()]) ax.set_xlim([0.9*obj_shapes[:,1].min(), 1.1*obj_shapes[:,1].max()]) ax.set_xlabel("Width", fontsize=18) ax.set_ylabel("Height", fontsize=18) ax.set_title("Object Shapes", fontsize=18) ax = axes[1,1] ax.scatter(img_shapes[:,1], img_shapes[:,0]) ax.set_ylim([0.9*img_shapes[:,0].min(), 1.1*img_shapes[:,0].max()]) ax.set_xlim([0.9*img_shapes[:,1].min(), 1.1*img_shapes[:,1].max()]) ax.set_xlabel("Width", fontsize=18) ax.set_ylabel("Height", fontsize=18) ax.set_title("Image Shapes", fontsize=18) ax = axes[0,1] ax.hist(obj_per_img, bins = np.arange(0.5,max(obj_per_img)+1.5,1),density=1.0) ax.set_xlabel("Objects per image", fontsize=18) ax = axes[0,0] labels = [x.decode() for x in objects.keys()] sizes = list(objects.values()) ax.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=False, startangle=90) ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() # --------------------------------------------------------------------------------------------------------------------- # Reading Record Files # --------------------------------------------------------------------------------------------------------------------- def load_tf_record_file(path): for example in tf.python_io.tf_record_iterator(path): yield tf.train.SequenceExample.FromString(example) def read_record_file(path, index_to_class, return_dict=True, plot=True, **plot_kwargs): record = dict() for result in load_tf_record_file(path): fname = str(result.context.feature['image/filename'].bytes_list.value[0], "utf-8") width = result.context.feature['image/width'].int64_list.value[0] height = result.context.feature['image/height'].int64_list.value[0] data = result.context.feature['image/encoded'].bytes_list.value[0] img = Image.open(io.BytesIO(data), mode="r") img = np.asarray(img) xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value) ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value) xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value) ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value) xmins *= width xmaxs *= width ymins *= height ymaxs *= height labels = np.array(result.context.feature['image/object/class/label'].int64_list.value, dtype=int) if return_dict:
if plot: fig, ax = plt.subplots(1,1,figsize=plot_kwargs.get("figsize",(8,8))) ax.imshow(np.asarray(img)) coords = [] for xmin, ymin, xmax, ymax in zip(xmins, ymins, xmaxs, ymaxs): coord = [xmin, ymin, xmax, ymax] coords +=[coord] ax.add_patch(Rectangle(xy=(coord[0], coord[1]), width=coord[2]-coord[0], height=coord[3]-coord[1], fill=None, color="r")) num = len(labels) x_coords = [1.1]*num y_coords = [i/num for i in range(num)] for (x,y,s, coord) in zip(x_coords, y_coords, labels, coords): text = index_to_class[s] ax.text(x*width,y*height,text, fontsize=16) ax.plot([coord[2],x*width*0.99],[coord[1],y*height], linestyle=":", color="r") ax.set_title(fname.replace("_", "-"), fontsize=18) ax.axis('off') plt.show() return record
record[fname] = dict() record[fname]["width"] = width record[fname]["height"] = height record[fname]["image"] = img record[fname]["xmins"] = xmins record[fname]["xmaxs"] = xmaxs record[fname]["ymins"] = ymins record[fname]["ymaxs"] = ymaxs
conditional_block
record_io.py
import numpy as np import os import matplotlib import matplotlib.pyplot as plt import tensorflow as tf from object_detection.utils import label_map_util from object_detection.utils import dataset_util tf.compat.v1.enable_eager_execution() from PIL import Image import io from matplotlib.patches import Rectangle # --------------------------------------------------------------------------------------------------------------------- # Creating Record Files # --------------------------------------------------------------------------------------------------------------------- def _create_record_file_path(path): # clear the otuput path flag and set flags = tf.app.flags FLAGS = flags.FLAGS try: FLAGS.__delattr__('output_path') FLAGS.__delattr__('f') except: pass tf.app.flags.DEFINE_string('f', '', 'kernel') flags.DEFINE_string('output_path', path, '') FLAGS = flags.FLAGS print("New record file : {}".format(flags.FLAGS.output_path)) return tf.app.flags.FLAGS.output_path def create_record_file(image_path, output_path, examples_dict, class_to_index): output_path = _create_record_file_path(output_path) writer = tf.python_io.TFRecordWriter(output_path) for key, val in examples_dict.items(): example = val example["filename"] = key tf_example = create_tf_example(example, image_path, class_to_index) writer.write(tf_example.SerializeToString()) writer.close() print("Wrote {} examples".format(len(examples_dict))) def create_tf_example(example, path, class_mapping): """ Create a single Tensorflow Example object to be used in creating record Parameters ---------- example : dict A single object; the dictionary should contains the keys "filename" referring to the jpg containing the object, and "box_coords" which gives the location of the object, and "class" the name of the object path : str The path to the image files. Returns ------- The tf Example object """ path = (path + os.sep).encode('ascii') filename = example['filename'].encode('ascii') image_format = b'jpg' image = plt.imread(path +filename, "jpg") height, width = image.shape[:2] # Encode the jpg to byte form with tf.gfile.GFile(path+filename, 'rb') as fid: encoded_jpg = bytes(fid.read()) # normalize the box coordinates xmins = [box[0]/width for box in example['box_coords']] ymins = [box[1]/height for box in example['box_coords']] xmaxs = [box[2]/width for box in example['box_coords']] ymaxs = [box[3]/height for box in example['box_coords']] classes_text = [cls.encode('ascii') for cls in example["class"]] classes = [class_mapping[cls] for cls in example["class"]] # create the example tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height' : dataset_util.int64_feature(height), 'image/width' : dataset_util.int64_feature(width), 'image/filename' : dataset_util.bytes_feature(filename), 'image/source_id' : dataset_util.bytes_feature(filename), 'image/encoded' : dataset_util.bytes_feature(encoded_jpg), 'image/format' : dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin' : dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax' : dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin' : dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax' : dataset_util.float_list_feature(ymaxs), 'image/object/class/text' : dataset_util.bytes_list_feature(classes_text), 'image/object/class/label' : dataset_util.int64_list_feature(classes), })) return tf_example # --------------------------------------------------------------------------------------------------------------------- # Diagnostics # ---------------------------------------------------------------------------------------------------------------------
def peek_in_record(path, plot=True): objects = dict() obj_per_img = [] obj_shapes = [] img_shapes = [] total_images = 0 for result in load_tf_record_file(path): total_images += 1 names = result.context.feature['image/object/class/text'].bytes_list.value for name in names: if name not in objects: objects[name] = 1 else: objects[name] += 1 obj_per_img += [len(names)] width = result.context.feature['image/width'].int64_list.value[0] height = result.context.feature['image/height'].int64_list.value[0] xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value) ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value) xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value) ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value) xmins *= width xmaxs *= width ymins *= height ymaxs *= height img_shapes += [[height, width]] for xmin, ymin, xmax, ymax, name in zip(xmins, ymins, xmaxs, ymaxs, names): obj_shapes += [[ymax-ymin, xmax-xmin]] if (ymin < 0) or (ymax > height) or (xmin < 0) or (xmax > width): print("WARNING : Object {} outisde of image region".format(name)) total_objects = sum(objects.values()) obj_shapes = np.array(obj_shapes) img_shapes = np.array(img_shapes) print("="*100) print("Total Images : {0}".format(total_images)) print("Total Objects : {0}".format(total_objects)) print("Ave. Objects per Image : {0}".format(total_objects/total_images)) print("Classes : {0}".format(len(objects))) print("="*100) if plot: fig, axes = plt.subplots(2,2,figsize=(12,12)) ax = axes[1,0] ax.scatter(obj_shapes[:,1], obj_shapes[:,0]) ax.set_ylim([0.9*obj_shapes[:,0].min(), 1.1*obj_shapes[:,0].max()]) ax.set_xlim([0.9*obj_shapes[:,1].min(), 1.1*obj_shapes[:,1].max()]) ax.set_xlabel("Width", fontsize=18) ax.set_ylabel("Height", fontsize=18) ax.set_title("Object Shapes", fontsize=18) ax = axes[1,1] ax.scatter(img_shapes[:,1], img_shapes[:,0]) ax.set_ylim([0.9*img_shapes[:,0].min(), 1.1*img_shapes[:,0].max()]) ax.set_xlim([0.9*img_shapes[:,1].min(), 1.1*img_shapes[:,1].max()]) ax.set_xlabel("Width", fontsize=18) ax.set_ylabel("Height", fontsize=18) ax.set_title("Image Shapes", fontsize=18) ax = axes[0,1] ax.hist(obj_per_img, bins = np.arange(0.5,max(obj_per_img)+1.5,1),density=1.0) ax.set_xlabel("Objects per image", fontsize=18) ax = axes[0,0] labels = [x.decode() for x in objects.keys()] sizes = list(objects.values()) ax.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=False, startangle=90) ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() # --------------------------------------------------------------------------------------------------------------------- # Reading Record Files # --------------------------------------------------------------------------------------------------------------------- def load_tf_record_file(path): for example in tf.python_io.tf_record_iterator(path): yield tf.train.SequenceExample.FromString(example) def read_record_file(path, index_to_class, return_dict=True, plot=True, **plot_kwargs): record = dict() for result in load_tf_record_file(path): fname = str(result.context.feature['image/filename'].bytes_list.value[0], "utf-8") width = result.context.feature['image/width'].int64_list.value[0] height = result.context.feature['image/height'].int64_list.value[0] data = result.context.feature['image/encoded'].bytes_list.value[0] img = Image.open(io.BytesIO(data), mode="r") img = np.asarray(img) xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value) ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value) xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value) ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value) xmins *= width xmaxs *= width ymins *= height ymaxs *= height labels = np.array(result.context.feature['image/object/class/label'].int64_list.value, dtype=int) if return_dict: record[fname] = dict() record[fname]["width"] = width record[fname]["height"] = height record[fname]["image"] = img record[fname]["xmins"] = xmins record[fname]["xmaxs"] = xmaxs record[fname]["ymins"] = ymins record[fname]["ymaxs"] = ymaxs if plot: fig, ax = plt.subplots(1,1,figsize=plot_kwargs.get("figsize",(8,8))) ax.imshow(np.asarray(img)) coords = [] for xmin, ymin, xmax, ymax in zip(xmins, ymins, xmaxs, ymaxs): coord = [xmin, ymin, xmax, ymax] coords +=[coord] ax.add_patch(Rectangle(xy=(coord[0], coord[1]), width=coord[2]-coord[0], height=coord[3]-coord[1], fill=None, color="r")) num = len(labels) x_coords = [1.1]*num y_coords = [i/num for i in range(num)] for (x,y,s, coord) in zip(x_coords, y_coords, labels, coords): text = index_to_class[s] ax.text(x*width,y*height,text, fontsize=16) ax.plot([coord[2],x*width*0.99],[coord[1],y*height], linestyle=":", color="r") ax.set_title(fname.replace("_", "-"), fontsize=18) ax.axis('off') plt.show() return record
random_line_split
data_tools.py
import numpy as np def get_data(model_type, TRAIN, words, EMB, enforce_gen, n_side_pixl): import numpy as np EMBEDDINGS, OBJ_ctr_sd_enf_gen = {}, [] # 0. Get dictionary of ALL our embedding words EMB_dict = build_emb_dict(words, EMB) # 1. Get the RELEVANT training instances (filtering for 'predicates' and 'complete_only' variables) OBJ_ctr_sd, rel_ids, TRAIN_relevant = get_TRAIN_relevant(TRAIN, words) # 2. get dictionaries WORDLISTS (INDICES for the embedding layer!) EMBEDDINGS['obj_list'] = list(set(TRAIN_relevant['obj'])) EMBEDDINGS['subj_list'] = list(set(TRAIN_relevant['subj'])) EMBEDDINGS['pred_list'] = list(set(TRAIN_relevant['rel'])) allwords = np.concatenate((EMBEDDINGS['subj_list'], EMBEDDINGS['pred_list'], EMBEDDINGS['obj_list']), axis=0) EMBEDDINGS['allwords_list'] = list( set(allwords)) # IMPORTANT: The order of this list is what prevails later on as index for embeddings # 3. Get INITIALIZATION embeddings EMBEDDINGS['subj_EMB'] = wordlist2emb_matrix(EMBEDDINGS['subj_list'], EMB_dict) EMBEDDINGS['pred_EMB'] = wordlist2emb_matrix(EMBEDDINGS['pred_list'], EMB_dict) EMBEDDINGS['obj_EMB'] = wordlist2emb_matrix(EMBEDDINGS['obj_list'], EMB_dict) EMBEDDINGS['allwords_EMB'] = wordlist2emb_matrix(EMBEDDINGS['allwords_list'],EMB_dict) # 3.1. Get RANDOM embeddings (of the size of allwords_EMB) EMBEDDINGS['allwords_EMB_rnd'] = get_random_EMB(EMBEDDINGS['allwords_EMB']) EMBEDDINGS['subj_EMB_rnd'] = get_random_EMB(EMBEDDINGS['subj_EMB']) EMBEDDINGS['pred_EMB_rnd'] = get_random_EMB(EMBEDDINGS['pred_EMB']) EMBEDDINGS['obj_EMB_rnd'] = get_random_EMB(EMBEDDINGS['obj_EMB']) # 3.2. get ONE-HOT embeddings: EMBEDDINGS['subj_EMB_onehot'] = np.identity(len(EMBEDDINGS['subj_list'])) EMBEDDINGS['pred_EMB_onehot'] = np.identity(len(EMBEDDINGS['pred_list'])) EMBEDDINGS['obj_EMB_onehot'] = np.identity(len(EMBEDDINGS['obj_list'])) EMBEDDINGS['allwords_EMB_onehot'] = np.identity(len(EMBEDDINGS['allwords_list'])) # 4. Get X data (i.e., get the SEQUENCES of INDICES for the embedding layer) X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, \ idx_IN_X_and_y, idx_enf_gen = relevant_instances2X_and_y(model_type, TRAIN_relevant, EMBEDDINGS, enforce_gen, n_side_pixl) # 5. Get the OBJ_ctr_sd_enf_gen that we need for some performance measures! if enforce_gen['eval'] is not None: OBJ_ctr_sd_enf_gen = OBJ_ctr_sd[idx_enf_gen] # 6. Finally, if we have REDUCED the X and y data by ENFORCING generalization (excluding instances) we have to reduce OBJ_ctr_sd and TRAIN_relevant accordingly if enforce_gen['eval'] is not None: for key in TRAIN_relevant: TRAIN_relevant[key] = np.array(TRAIN_relevant[key]) TRAIN_relevant[key] = TRAIN_relevant[key][idx_IN_X_and_y] OBJ_ctr_sd = OBJ_ctr_sd[idx_IN_X_and_y] rel_ids = np.array(rel_ids) rel_ids = rel_ids[idx_IN_X_and_y] return X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, rel_ids, OBJ_ctr_sd, OBJ_ctr_sd_enf_gen, EMBEDDINGS, TRAIN_relevant def relevant_instances2X_and_y(model_type, TRAIN_relevant, EMBEDDINGS, enforce_gen, n_side_pixl): # OUTPUT: the X and y data, gotten by converting each word into its corresponding index print('Getting X and y data') X_vars = ['subj_ctr_x', 'subj_ctr_y', 'subj_sd_x', 'subj_sd_y'] y_vars = ['obj_sd_x', 'obj_sd_y', 'obj_ctr_x', 'obj_ctr_y'] subj_list, pred_list, obj_list, allwords_list = EMBEDDINGS['subj_list'], EMBEDDINGS['pred_list'], EMBEDDINGS[ 'obj_list'], EMBEDDINGS['allwords_list'] # get X: X, X_enf_gen = {}, {} X['subj'], X['pred'], X['obj'] = [], [], [] X_enf_gen['subj'], X_enf_gen['pred'], X_enf_gen['obj'] = [], [], [] for i in range(len(TRAIN_relevant['subj'])): triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i]) # append to the GENERALIZED set if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']): X_enf_gen['subj'].append(subj_list.index(TRAIN_relevant['subj'][i])) X_enf_gen['pred'].append(pred_list.index(TRAIN_relevant['rel'][i])) X_enf_gen['obj'].append(obj_list.index(TRAIN_relevant['obj'][i])) elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet): X_enf_gen['subj'].append(subj_list.index(TRAIN_relevant['subj'][i])) X_enf_gen['pred'].append(pred_list.index(TRAIN_relevant['rel'][i])) X_enf_gen['obj'].append(obj_list.index(TRAIN_relevant['obj'][i])) else: # if either the triplet/word is not generalized or we aren't enforcing generalization X['subj'].append(subj_list.index(TRAIN_relevant['subj'][i])) X['pred'].append(pred_list.index(TRAIN_relevant['rel'][i])) X['obj'].append(obj_list.index(TRAIN_relevant['obj'][i])) # Reshape X['subj'] = np.array(X['subj']).reshape((-1, 1)) X['pred'] = np.array(X['pred']).reshape((-1, 1)) X['obj'] = np.array(X['obj']).reshape((-1, 1)) # FORMAT: if we have gotten some zero shot instances if X_enf_gen['subj'] != []: X_enf_gen['subj'] = np.array(X_enf_gen['subj']).reshape( (-1, 1)) # get them in the right FORMAT for the merged (SEP) model! X_enf_gen['pred'] = np.array(X_enf_gen['pred']).reshape((-1, 1)) X_enf_gen['obj'] = np.array(X_enf_gen['obj']).reshape((-1, 1)) else: X_enf_gen['subj'], X_enf_gen['pred'], X_enf_gen['obj'] = None, None, None # Get Y (if model_type = PIX we output the regular y besides y_pixl!) y, y_pixl, y_enf_gen, idx_IN_X_and_y, idx_enf_gen, y_enf_gen_pixl = [], [], [], [], [], [] for i in range(len(TRAIN_relevant['subj'])): y_new_row = [] for k in range(len(y_vars)): y_new_row.extend([float(TRAIN_relevant[y_vars[k]][i])]) # IMPORTANT: We assume that the variables are NUMERIC if model_type == 'PIX': obj_sd_x, obj_sd_y = float(TRAIN_relevant['obj_sd_x'][i]), float(TRAIN_relevant['obj_sd_y'][i]) obj_ctr_x, obj_ctr_y = float(TRAIN_relevant['obj_ctr_x'][i]), float(TRAIN_relevant['obj_ctr_y'][i]) y_pixl_new_row = coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl) # get stuff for the generalzed setting: triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i]) if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']): y_enf_gen.append(y_new_row) if model_type == 'PIX': y_enf_gen_pixl.append(y_pixl_new_row) idx_enf_gen.append(i) elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet): y_enf_gen.append(y_new_row) if model_type == 'PIX': y_enf_gen_pixl.append(y_pixl_new_row) idx_enf_gen.append(i) else: # NON GENERALIZED y.append(y_new_row) if model_type == 'PIX': y_pixl.append(y_pixl_new_row) idx_IN_X_and_y.append(i) y = np.array(y) y_enf_gen = np.array(y_enf_gen) if y_enf_gen != [] else None if model_type == 'PIX': y_pixl = np.array(y_pixl) y_enf_gen_pixl = np.array(y_enf_gen_pixl) if y_enf_gen_pixl != [] else None else: y_pixl = [[[]]] # necessary because we get the index 0 of y_pixl (if model_type != 'PIX') to save memory in learn_and_evaluate() print('We have gotten ' + str(len(idx_IN_X_and_y)) + ' instances (for both, train & test)') # Get X_extra X_extra, X_extra_enf_gen = [], [] if X_vars != []: for i in range(len(TRAIN_relevant['subj'])): X_extra_new_row = [] for k in range(len(X_vars)): # we already ASSUME that we have at least one y-variable X_extra_new_row.extend( [float(TRAIN_relevant[X_vars[k]][i])]) # IMPORTANT: We assume that the variables are NUMERIC # get stuff for the generalized: triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i]) if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']): X_extra_enf_gen.append(X_extra_new_row) elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet): X_extra_enf_gen.append(X_extra_new_row) else: X_extra.append(X_extra_new_row) X_extra = np.array(X_extra) if X_extra != [] else None # IMPORTANT: we only make it a numpy array if we have something, because we use == [] as condition in models_learn X_extra_enf_gen = np.array(X_extra_enf_gen) if X_extra_enf_gen != [] else None return X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, idx_IN_X_and_y, idx_enf_gen def get_TRAIN_relevant(TRAIN, words): # IMPORTANT: we preserve the ORDER of TRAIN (so that we can recover information afterwards) TRAIN_relevant, rel_ids, OBJ_ctr_sd = {}, [], [] print('Getting *relevant* instances, from a total of: ' + str(len(TRAIN['subj']))) var_names = [key for key in TRAIN] # INITIALIZE TRAIN_relavant for varname in var_names: TRAIN_relevant[varname] = [] for i in range(len( TRAIN['subj'] )): # Samples loop we_have_it = True if ((TRAIN['subj'][i] in words) and (TRAIN['rel'][i] in words) and (TRAIN['obj'][i] in words)) else False # if we have the complete triplet if we_have_it == True: for varname in var_names: TRAIN_relevant[varname].append(TRAIN[varname][i]) rel_ids.append(TRAIN['rel_id'][i]) OBJ_ctr_sd.append([TRAIN['img_idx'][i], TRAIN['rel_id'][i], TRAIN['subj'][i], TRAIN['rel'][i], TRAIN['obj'][i], TRAIN['subj_sd_x'][i], TRAIN['subj_sd_y'][i], TRAIN['subj_ctr_x'][i], TRAIN['subj_ctr_y'][i], TRAIN['obj_sd_x'][i], TRAIN['obj_sd_y'][i], TRAIN['obj_ctr_x'][i], TRAIN['obj_ctr_y'][i]]) OBJ_ctr_sd = np.array(OBJ_ctr_sd) print('We have gotten ' + str(len(TRAIN_relevant['subj'])) + ' RELEVANT instances') return OBJ_ctr_sd, rel_ids, TRAIN_relevant def get_random_EMB(actual_EMB): # Returns embedding matrix of the original shape with random normal vectors (dimension-wise) mu, sigma, vec_size = np.mean(actual_EMB), np.mean(np.std(actual_EMB, axis=0)), len(actual_EMB[0, :]) rand_EMB = [] for i in range(actual_EMB.shape[0]): # build a dictionary of random vectors rand_EMB.append(np.random.normal(mu, sigma, vec_size)) rand_EMB = np.array(rand_EMB) return rand_EMB def coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl): ''' This function works with an individual example (extending it to many examples, where e.g., obj_sd_x is a vector, is easy) :param obj_sd_x (and the rest): real number (not vectors!) :param n_side_pixl: number of pixels as output (hyperparameter) :return y_pixl: matrix of pixels, i.e., a 2D tensor (n_side_pixl, n_side_pixl) ''' # continuous bounding box corners (prevent problems of predictions outside [0,1]) A_left_x, A_right_x = max((obj_ctr_x - obj_sd_x), 0), min((obj_ctr_x + obj_sd_x), 1) A_low_y, A_top_y = min((obj_ctr_y + obj_sd_y), 1), max((obj_ctr_y - obj_sd_y), 0) # translate continuous bounding box corners into indices in a n_side_pixl x n_side_pixl matrix i_left, i_right = np.rint( (n_side_pixl - 1)*A_left_x).astype(np.int), np.rint((n_side_pixl - 1)*A_right_x).astype(np.int) j_low, j_top = np.rint((n_side_pixl - 1)*A_low_y).astype(np.int), np.rint((n_side_pixl - 1)*A_top_y).astype(np.int) pixl_matr = np.zeros( (n_side_pixl, n_side_pixl) ) # add ones inside of the bounding box i_range = range( i_left, i_right ) i_range = [i_left] if ((i_left == i_right) or (i_range == [])) else i_range # AVOID THE CASE where width is 0 AND i_range=[] (as upper bound < lower bound) j_range = range( j_top, j_low ) j_range = [j_low] if ((j_low == j_top) or (j_range == [])) else j_range # AVOID THE CASE where height is 0 AND i_range=[] (as upper bound < lower bound) pixl_matr[ np.array(i_range)[:, None], np.array(j_range)] = 1 # (IMPORTANT: indices must be np.arrays) put a 1 everywhere inside of the bounding box pixl_matr = pixl_matr.reshape((-1)) return pixl_matr def pixl_idx2coord_all_examples(y_pixl): ''' Transforms the whole set of predicted matrices y_pixl into their continuous CENTER coordinates (Obj_ctr) :param y_pixl: array of MATRICES with predicted heatmaps (pixels). Each matrix = 1 example :return: PRED_obj_ctr_x, PRED_obj_ctr_y: arrays of length = number of examples ''' PRED_obj_ctr_x, PRED_obj_ctr_y = [], [] n_side_pixl = y_pixl.shape[1] #get automatically the number of pixels from the pixel matrix side for i in range( y_pixl.shape[0] ): # loop on number of examples idx_maximums = get_maximums_idx(y_pixl[i]) # get indices of maximum (allow for multiple of them) ctr_x, ctr_y = pixl_idx2coord_indiv(idx_maximums, n_side_pixl) # transform pixel indices into continuous coordinates PRED_obj_ctr_x.append(ctr_x) PRED_obj_ctr_y.append(ctr_y) PRED_obj_ctr_x, PRED_obj_ctr_y = np.array(PRED_obj_ctr_x), np.array(PRED_obj_ctr_y) return PRED_obj_ctr_x, PRED_obj_ctr_y def get_maximums_idx( heat_matrix ): # Given a matrix of activations, it outputs the indices corresponding to its maximum values # INPUT: heat_matrix: matrix of continuous activations (within [0,1]) of size n_side_pixl x n_side_pixl # OUTPUT: maximums: indices corresponding to where the activations are maximum (accounts for multiple maximums) #maximums = np.unravel_index(np.argmax(heat_matrix), heat_matrix.shape) # gives the index of the FIRST largest element. Doesn't account for multiple maximums! maximums = np.where(heat_matrix == heat_matrix.max()) # This one accounts for multiple maximums! return np.array(maximums) def pixl_idx2coord_indiv(idx_maximums, n_side_pixl): ''' This function receives input from get_maximums_indices() Given discrete pixels indices (i,j) where i,j = 0,...,n_side_pixl (where activations are maximal), it transforms them to (continuous) coordinates in [0,1] IMPORTANT: It only computes the CENTER of the Obj (not sd's). So it's useful for measures that only use Obj_ctr :param idx_maximums: index of maximums from get_maximums_idx() :param n_side_pixl: side of the activation matrix (necessary to transform indices to coordinates) :return pred_obj_ctr_x, pred_obj_ctr_y: predicted (continuous) coordinates in [0,1] (Obj_ctr) ''' coord = np.mean(idx_maximums, axis = 1) PRED_coord = coord.astype(np.float)/float(n_side_pixl - 1) # Transform pixel indices to (continuous) coordinates pred_obj_ctr_x, pred_obj_ctr_y = PRED_coord[0], PRED_coord[1] return pred_obj_ctr_x, pred_obj_ctr_y def get_folds(n_samples, n_folds): indices = np.random.permutation(np.arange(n_samples)) n_test = int(np.floor(n_samples / n_folds)) kf = [(np.delete(indices, np.arange(i * n_test, (i + 1) * n_test)), # train indices[i * n_test:(i + 1) * n_test]) for i in range(n_folds)] # test return kf def mirror_x(subj_ctr_x, obj_ctr_x): # Computes the absolute value of the obj_ctr_x variable (to make it symmetric) aux_obj_ctr_x = [ (1 - float(obj_ctr_x[i])) if float(obj_ctr_x[i]) <= float(subj_ctr_x[i]) else float(obj_ctr_x[i]) for i in range(len(obj_ctr_x)) ] aux_subj_ctr_x = [ (1 - float(subj_ctr_x[i])) if float(obj_ctr_x[i]) <= float(subj_ctr_x[i]) else float(subj_ctr_x[i]) for i in range(len(obj_ctr_x)) ] subj_ctr_x, obj_ctr_x = aux_subj_ctr_x, aux_obj_ctr_x return subj_ctr_x, obj_ctr_x def build_emb_dict(words, EMB): #Input: words= word list, EMB= embeddings in a np.array format #Output: Dictionary of embeddings
def wordlist2emb_matrix(words_to_get, EMB_dict): # Input: words_to_get = word list from the EMB matrix, EMB_dict= dictionary of embeddings # Output: MATRIX of embeddings # IMPORTANT: it preserves the order of the words_to_get list EMB_matrix = [] for i in range(len(words_to_get)): try: EMB_matrix.append( EMB_dict[words_to_get[i]] ) except KeyError: pass #print 'WARNING! word ' + words_to_get[i] + ' not found in our embeddings! (and it should be!!)' EMB_matrix = np.array(EMB_matrix) return EMB_matrix def get_GEN(TRAIN_relevant, train_idx, test_idx, subj, obj, pred): # Gives the generalized triplets (combination) and the generalized words # IMPORTANT: The outputted indices (idx_gen_tuples) are over the instances of the TEST SET! print ('Getting generalized instances') train_tuples = [(TRAIN_relevant[subj][train_idx[ii]],TRAIN_relevant[pred][train_idx[ii]], TRAIN_relevant[obj][ train_idx[ii] ]) for ii in range(len(train_idx)) ] test_tuples = [(TRAIN_relevant[subj][test_idx[ii]],TRAIN_relevant[pred][test_idx[ii]], TRAIN_relevant[obj][test_idx[ii]]) for ii in range(len(test_idx)) ] # 2. get GENERALIZED TUPLES (its index) #train_tuples, test_tuples = tuples[train_idx], tuples[test_idx] idx_gen_tuples = [i for i, x in enumerate(test_tuples) if x not in train_tuples]#IMPORTANT!!! This index (idx_gen_tuples) is over the instances of the TEST SET! # 3.get GENARALIZED WORDS (its index) subjs, preds, objs = np.array(TRAIN_relevant[subj]), np.array(TRAIN_relevant[pred]), np.array(TRAIN_relevant[obj]) # get unique wordlist in test_tuples[idx_gen_tuples] --> TRICK! if there are gen-words, they MUST be within gen-tuples) and in train tuples allwords_train = np.concatenate((subjs[train_idx], preds[train_idx],objs[train_idx]), axis=0) allwords_test = np.concatenate((subjs[test_idx], preds[test_idx],objs[test_idx]), axis=0) unique_words_train = list(set(allwords_train)) unique_words_test = list(set(allwords_test)) # get generalized words gen_words = [ unique_words_test[j] for j in range(len(unique_words_test)) if unique_words_test[j] not in unique_words_train] # intersect unique_words_train and unique_words_test and get the complementary # find INDICES of the tuples in the test set that contain any of the gen-words idx_gen_words = [ i for i,x in enumerate(test_tuples) if any(word in gen_words for word in x) ] # x=tuple, word= word within the tuple, i=index of the tuple x return idx_gen_tuples, idx_gen_words, gen_words def get_CLEAN_train_test_idx(TRAIN_relevant, train_idx, test_idx, clean_eval): # Gives the indices of the triplets that we want to get as a clean test set # clean_eval contains the triplets, etc. of our clean selection of instances # IMPORTANT: idx_clean_train and idx_clean_test are over the instances of the TRAIN and TEST SETS respectively! print ('Getting *clean* train and test instances') #0. get either tuples or triplets from our train AND test data if (clean_eval['eval'] == 'triplets') or (clean_eval['eval'] == 'words'): #if we want clean words, we search them among the whole triplet train_tuples = [(TRAIN_relevant['subj'][train_idx[ii]],TRAIN_relevant['rel'][train_idx[ii]], TRAIN_relevant['obj'][ train_idx[ii] ]) for ii in range(len(train_idx)) ] test_tuples = [(TRAIN_relevant['subj'][test_idx[ii]],TRAIN_relevant['rel'][test_idx[ii]], TRAIN_relevant['obj'][test_idx[ii]]) for ii in range(len(test_idx)) ] #1. Decide what clean triplets/words to use if clean_eval['eval'] == 'triplets': clean_tuples = clean_eval['triplets'] if clean_eval['eval'] == 'words': clean_words = clean_eval['words'] # 2. get INDICES of our clean instances (triplets...) in both, TRAIN and TEST data if clean_eval['eval'] == 'triplets': idx_clean_test = [ i for i,x in enumerate(test_tuples) if x in clean_tuples ] idx_clean_train = [ i for i,x in enumerate(train_tuples) if x in clean_tuples ] if clean_eval['eval'] == 'words': idx_clean_test = [ i for i,x in enumerate(test_tuples) if any(word in clean_words for word in x) ] idx_clean_train = [i for i, x in enumerate(train_tuples) if any(word in clean_words for word in x)] return idx_clean_train, idx_clean_test def get_enforce_gen(to_get): # This function can also get the clean_test instances, as there's nothing specific about generalized here import read_data as rd gen_triplets, gen_words = [],[] if to_get == 'triplets': # Read CSV GEN_INST = rd.load_training_data('../data/TRIPLETS_random.csv') # Get all triplets for i in range(len(GEN_INST['subj'])): gen_triplets.append( ( GEN_INST['subj'][i], GEN_INST['rel'][i], GEN_INST['obj'][i] ) ) gen_triplets = list(set(gen_triplets)) elif to_get == 'words': gen_words = rd.readWordlist('../data/WORDS_random.csv') gen_words = list(set(gen_words)) return gen_triplets, gen_words def aux_get_train_test_splits(X, X_extra, y, OBJ_ctr_sd, train_idx, test_idx): # This is an auxiliary function that gives back the train and test splits # Is not very elegant, but we don't create y_pixl_train and test splits because it takes too much memory # get X X_train, X_test = {},{} X_train['subj'], X_test['subj'] = X['subj'][train_idx], X['subj'][test_idx] X_train['pred'], X_test['pred'] = X['pred'][train_idx], X['pred'][test_idx] X_train['obj'], X_test['obj'] = X['obj'][train_idx], X['obj'][test_idx] # get y y_train, y_test = y[train_idx], y[test_idx] #not a dictionary!!! (a matrix array) OBJ_ctr_sd_train, OBJ_ctr_sd_test = OBJ_ctr_sd[train_idx], OBJ_ctr_sd[test_idx] X_extra_train, X_extra_test = X_extra[train_idx], X_extra[test_idx] return X_train, X_test, X_extra_train, X_extra_test, y_train, y_test, OBJ_ctr_sd_train, OBJ_ctr_sd_test def compute_centers(x_subj, y_subj, width_subj, height_subj, x_obj, y_obj, width_obj, height_obj): ''' Notice: the (0,0) coordinates of the image correspond to the TOP left corner (not bottom left) INPUT: absolute positions of: x_obj (x of top left corner of bounding box), y_obj (y of top left...), width_obj (width bounding box), height_obj (height bounding box), width_img (width img), height_img (height img) OUTPUT: centered positions (center of bounding box and standard dev. of bounding box): obj_ctr_x, obj_ctr_y, obj_sd_x, obj_sd_y ''' # OBJECT: obj_ctr_x = float(x_obj) + (float(width_obj)/2) obj_ctr_y = float(y_obj) + (float(height_obj)/2) obj_sd_x = (float(width_obj)/2) # after simplifying in the formula of SD, it gives this obj_sd_y = (float(height_obj)/2) # after simplifying in the formula of SD, it gives this # SUBJECT: subj_ctr_x = float(x_subj) + (float(width_subj)/2) subj_ctr_y = float(y_subj) + (float(height_subj)/2) subj_sd_x = (float(width_subj)/2) #after simplifying in the formula of SD, it gives this subj_sd_y = (float(height_subj)/2) # after simplifying in the formula of SD, it gives this return subj_ctr_x, subj_ctr_y, subj_sd_x, subj_sd_y, obj_ctr_x, obj_ctr_y, obj_sd_x, obj_sd_y
EMB_dict = {} for i in range(len(words)): EMB_dict[words[i]] = EMB[i,:] return EMB_dict
identifier_body
data_tools.py
import numpy as np def get_data(model_type, TRAIN, words, EMB, enforce_gen, n_side_pixl): import numpy as np EMBEDDINGS, OBJ_ctr_sd_enf_gen = {}, [] # 0. Get dictionary of ALL our embedding words EMB_dict = build_emb_dict(words, EMB) # 1. Get the RELEVANT training instances (filtering for 'predicates' and 'complete_only' variables) OBJ_ctr_sd, rel_ids, TRAIN_relevant = get_TRAIN_relevant(TRAIN, words) # 2. get dictionaries WORDLISTS (INDICES for the embedding layer!) EMBEDDINGS['obj_list'] = list(set(TRAIN_relevant['obj'])) EMBEDDINGS['subj_list'] = list(set(TRAIN_relevant['subj'])) EMBEDDINGS['pred_list'] = list(set(TRAIN_relevant['rel'])) allwords = np.concatenate((EMBEDDINGS['subj_list'], EMBEDDINGS['pred_list'], EMBEDDINGS['obj_list']), axis=0) EMBEDDINGS['allwords_list'] = list( set(allwords)) # IMPORTANT: The order of this list is what prevails later on as index for embeddings # 3. Get INITIALIZATION embeddings EMBEDDINGS['subj_EMB'] = wordlist2emb_matrix(EMBEDDINGS['subj_list'], EMB_dict) EMBEDDINGS['pred_EMB'] = wordlist2emb_matrix(EMBEDDINGS['pred_list'], EMB_dict) EMBEDDINGS['obj_EMB'] = wordlist2emb_matrix(EMBEDDINGS['obj_list'], EMB_dict) EMBEDDINGS['allwords_EMB'] = wordlist2emb_matrix(EMBEDDINGS['allwords_list'],EMB_dict) # 3.1. Get RANDOM embeddings (of the size of allwords_EMB) EMBEDDINGS['allwords_EMB_rnd'] = get_random_EMB(EMBEDDINGS['allwords_EMB']) EMBEDDINGS['subj_EMB_rnd'] = get_random_EMB(EMBEDDINGS['subj_EMB']) EMBEDDINGS['pred_EMB_rnd'] = get_random_EMB(EMBEDDINGS['pred_EMB']) EMBEDDINGS['obj_EMB_rnd'] = get_random_EMB(EMBEDDINGS['obj_EMB']) # 3.2. get ONE-HOT embeddings: EMBEDDINGS['subj_EMB_onehot'] = np.identity(len(EMBEDDINGS['subj_list'])) EMBEDDINGS['pred_EMB_onehot'] = np.identity(len(EMBEDDINGS['pred_list'])) EMBEDDINGS['obj_EMB_onehot'] = np.identity(len(EMBEDDINGS['obj_list'])) EMBEDDINGS['allwords_EMB_onehot'] = np.identity(len(EMBEDDINGS['allwords_list'])) # 4. Get X data (i.e., get the SEQUENCES of INDICES for the embedding layer) X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, \ idx_IN_X_and_y, idx_enf_gen = relevant_instances2X_and_y(model_type, TRAIN_relevant, EMBEDDINGS, enforce_gen, n_side_pixl) # 5. Get the OBJ_ctr_sd_enf_gen that we need for some performance measures! if enforce_gen['eval'] is not None: OBJ_ctr_sd_enf_gen = OBJ_ctr_sd[idx_enf_gen] # 6. Finally, if we have REDUCED the X and y data by ENFORCING generalization (excluding instances) we have to reduce OBJ_ctr_sd and TRAIN_relevant accordingly if enforce_gen['eval'] is not None: for key in TRAIN_relevant: TRAIN_relevant[key] = np.array(TRAIN_relevant[key]) TRAIN_relevant[key] = TRAIN_relevant[key][idx_IN_X_and_y] OBJ_ctr_sd = OBJ_ctr_sd[idx_IN_X_and_y] rel_ids = np.array(rel_ids) rel_ids = rel_ids[idx_IN_X_and_y] return X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, rel_ids, OBJ_ctr_sd, OBJ_ctr_sd_enf_gen, EMBEDDINGS, TRAIN_relevant def relevant_instances2X_and_y(model_type, TRAIN_relevant, EMBEDDINGS, enforce_gen, n_side_pixl): # OUTPUT: the X and y data, gotten by converting each word into its corresponding index print('Getting X and y data') X_vars = ['subj_ctr_x', 'subj_ctr_y', 'subj_sd_x', 'subj_sd_y'] y_vars = ['obj_sd_x', 'obj_sd_y', 'obj_ctr_x', 'obj_ctr_y'] subj_list, pred_list, obj_list, allwords_list = EMBEDDINGS['subj_list'], EMBEDDINGS['pred_list'], EMBEDDINGS[ 'obj_list'], EMBEDDINGS['allwords_list'] # get X: X, X_enf_gen = {}, {} X['subj'], X['pred'], X['obj'] = [], [], [] X_enf_gen['subj'], X_enf_gen['pred'], X_enf_gen['obj'] = [], [], [] for i in range(len(TRAIN_relevant['subj'])): triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i]) # append to the GENERALIZED set if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']): X_enf_gen['subj'].append(subj_list.index(TRAIN_relevant['subj'][i])) X_enf_gen['pred'].append(pred_list.index(TRAIN_relevant['rel'][i])) X_enf_gen['obj'].append(obj_list.index(TRAIN_relevant['obj'][i])) elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet): X_enf_gen['subj'].append(subj_list.index(TRAIN_relevant['subj'][i])) X_enf_gen['pred'].append(pred_list.index(TRAIN_relevant['rel'][i])) X_enf_gen['obj'].append(obj_list.index(TRAIN_relevant['obj'][i])) else: # if either the triplet/word is not generalized or we aren't enforcing generalization X['subj'].append(subj_list.index(TRAIN_relevant['subj'][i])) X['pred'].append(pred_list.index(TRAIN_relevant['rel'][i])) X['obj'].append(obj_list.index(TRAIN_relevant['obj'][i])) # Reshape X['subj'] = np.array(X['subj']).reshape((-1, 1)) X['pred'] = np.array(X['pred']).reshape((-1, 1)) X['obj'] = np.array(X['obj']).reshape((-1, 1)) # FORMAT: if we have gotten some zero shot instances if X_enf_gen['subj'] != []: X_enf_gen['subj'] = np.array(X_enf_gen['subj']).reshape( (-1, 1)) # get them in the right FORMAT for the merged (SEP) model! X_enf_gen['pred'] = np.array(X_enf_gen['pred']).reshape((-1, 1)) X_enf_gen['obj'] = np.array(X_enf_gen['obj']).reshape((-1, 1)) else: X_enf_gen['subj'], X_enf_gen['pred'], X_enf_gen['obj'] = None, None, None # Get Y (if model_type = PIX we output the regular y besides y_pixl!) y, y_pixl, y_enf_gen, idx_IN_X_and_y, idx_enf_gen, y_enf_gen_pixl = [], [], [], [], [], [] for i in range(len(TRAIN_relevant['subj'])): y_new_row = [] for k in range(len(y_vars)): y_new_row.extend([float(TRAIN_relevant[y_vars[k]][i])]) # IMPORTANT: We assume that the variables are NUMERIC if model_type == 'PIX': obj_sd_x, obj_sd_y = float(TRAIN_relevant['obj_sd_x'][i]), float(TRAIN_relevant['obj_sd_y'][i]) obj_ctr_x, obj_ctr_y = float(TRAIN_relevant['obj_ctr_x'][i]), float(TRAIN_relevant['obj_ctr_y'][i]) y_pixl_new_row = coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl) # get stuff for the generalzed setting: triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i]) if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']): y_enf_gen.append(y_new_row) if model_type == 'PIX': y_enf_gen_pixl.append(y_pixl_new_row) idx_enf_gen.append(i) elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet): y_enf_gen.append(y_new_row) if model_type == 'PIX': y_enf_gen_pixl.append(y_pixl_new_row) idx_enf_gen.append(i) else: # NON GENERALIZED y.append(y_new_row) if model_type == 'PIX': y_pixl.append(y_pixl_new_row) idx_IN_X_and_y.append(i) y = np.array(y) y_enf_gen = np.array(y_enf_gen) if y_enf_gen != [] else None if model_type == 'PIX': y_pixl = np.array(y_pixl) y_enf_gen_pixl = np.array(y_enf_gen_pixl) if y_enf_gen_pixl != [] else None else: y_pixl = [[[]]] # necessary because we get the index 0 of y_pixl (if model_type != 'PIX') to save memory in learn_and_evaluate() print('We have gotten ' + str(len(idx_IN_X_and_y)) + ' instances (for both, train & test)') # Get X_extra X_extra, X_extra_enf_gen = [], [] if X_vars != []: for i in range(len(TRAIN_relevant['subj'])): X_extra_new_row = [] for k in range(len(X_vars)): # we already ASSUME that we have at least one y-variable X_extra_new_row.extend( [float(TRAIN_relevant[X_vars[k]][i])]) # IMPORTANT: We assume that the variables are NUMERIC # get stuff for the generalized: triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i]) if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']): X_extra_enf_gen.append(X_extra_new_row) elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet): X_extra_enf_gen.append(X_extra_new_row) else: X_extra.append(X_extra_new_row) X_extra = np.array(X_extra) if X_extra != [] else None # IMPORTANT: we only make it a numpy array if we have something, because we use == [] as condition in models_learn X_extra_enf_gen = np.array(X_extra_enf_gen) if X_extra_enf_gen != [] else None return X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, idx_IN_X_and_y, idx_enf_gen def get_TRAIN_relevant(TRAIN, words): # IMPORTANT: we preserve the ORDER of TRAIN (so that we can recover information afterwards) TRAIN_relevant, rel_ids, OBJ_ctr_sd = {}, [], [] print('Getting *relevant* instances, from a total of: ' + str(len(TRAIN['subj']))) var_names = [key for key in TRAIN] # INITIALIZE TRAIN_relavant for varname in var_names: TRAIN_relevant[varname] = [] for i in range(len( TRAIN['subj'] )): # Samples loop we_have_it = True if ((TRAIN['subj'][i] in words) and (TRAIN['rel'][i] in words) and (TRAIN['obj'][i] in words)) else False # if we have the complete triplet if we_have_it == True: for varname in var_names: TRAIN_relevant[varname].append(TRAIN[varname][i]) rel_ids.append(TRAIN['rel_id'][i]) OBJ_ctr_sd.append([TRAIN['img_idx'][i], TRAIN['rel_id'][i], TRAIN['subj'][i], TRAIN['rel'][i], TRAIN['obj'][i], TRAIN['subj_sd_x'][i], TRAIN['subj_sd_y'][i], TRAIN['subj_ctr_x'][i], TRAIN['subj_ctr_y'][i], TRAIN['obj_sd_x'][i], TRAIN['obj_sd_y'][i], TRAIN['obj_ctr_x'][i], TRAIN['obj_ctr_y'][i]]) OBJ_ctr_sd = np.array(OBJ_ctr_sd) print('We have gotten ' + str(len(TRAIN_relevant['subj'])) + ' RELEVANT instances') return OBJ_ctr_sd, rel_ids, TRAIN_relevant def get_random_EMB(actual_EMB): # Returns embedding matrix of the original shape with random normal vectors (dimension-wise) mu, sigma, vec_size = np.mean(actual_EMB), np.mean(np.std(actual_EMB, axis=0)), len(actual_EMB[0, :]) rand_EMB = [] for i in range(actual_EMB.shape[0]): # build a dictionary of random vectors rand_EMB.append(np.random.normal(mu, sigma, vec_size)) rand_EMB = np.array(rand_EMB) return rand_EMB
:param n_side_pixl: number of pixels as output (hyperparameter) :return y_pixl: matrix of pixels, i.e., a 2D tensor (n_side_pixl, n_side_pixl) ''' # continuous bounding box corners (prevent problems of predictions outside [0,1]) A_left_x, A_right_x = max((obj_ctr_x - obj_sd_x), 0), min((obj_ctr_x + obj_sd_x), 1) A_low_y, A_top_y = min((obj_ctr_y + obj_sd_y), 1), max((obj_ctr_y - obj_sd_y), 0) # translate continuous bounding box corners into indices in a n_side_pixl x n_side_pixl matrix i_left, i_right = np.rint( (n_side_pixl - 1)*A_left_x).astype(np.int), np.rint((n_side_pixl - 1)*A_right_x).astype(np.int) j_low, j_top = np.rint((n_side_pixl - 1)*A_low_y).astype(np.int), np.rint((n_side_pixl - 1)*A_top_y).astype(np.int) pixl_matr = np.zeros( (n_side_pixl, n_side_pixl) ) # add ones inside of the bounding box i_range = range( i_left, i_right ) i_range = [i_left] if ((i_left == i_right) or (i_range == [])) else i_range # AVOID THE CASE where width is 0 AND i_range=[] (as upper bound < lower bound) j_range = range( j_top, j_low ) j_range = [j_low] if ((j_low == j_top) or (j_range == [])) else j_range # AVOID THE CASE where height is 0 AND i_range=[] (as upper bound < lower bound) pixl_matr[ np.array(i_range)[:, None], np.array(j_range)] = 1 # (IMPORTANT: indices must be np.arrays) put a 1 everywhere inside of the bounding box pixl_matr = pixl_matr.reshape((-1)) return pixl_matr def pixl_idx2coord_all_examples(y_pixl): ''' Transforms the whole set of predicted matrices y_pixl into their continuous CENTER coordinates (Obj_ctr) :param y_pixl: array of MATRICES with predicted heatmaps (pixels). Each matrix = 1 example :return: PRED_obj_ctr_x, PRED_obj_ctr_y: arrays of length = number of examples ''' PRED_obj_ctr_x, PRED_obj_ctr_y = [], [] n_side_pixl = y_pixl.shape[1] #get automatically the number of pixels from the pixel matrix side for i in range( y_pixl.shape[0] ): # loop on number of examples idx_maximums = get_maximums_idx(y_pixl[i]) # get indices of maximum (allow for multiple of them) ctr_x, ctr_y = pixl_idx2coord_indiv(idx_maximums, n_side_pixl) # transform pixel indices into continuous coordinates PRED_obj_ctr_x.append(ctr_x) PRED_obj_ctr_y.append(ctr_y) PRED_obj_ctr_x, PRED_obj_ctr_y = np.array(PRED_obj_ctr_x), np.array(PRED_obj_ctr_y) return PRED_obj_ctr_x, PRED_obj_ctr_y def get_maximums_idx( heat_matrix ): # Given a matrix of activations, it outputs the indices corresponding to its maximum values # INPUT: heat_matrix: matrix of continuous activations (within [0,1]) of size n_side_pixl x n_side_pixl # OUTPUT: maximums: indices corresponding to where the activations are maximum (accounts for multiple maximums) #maximums = np.unravel_index(np.argmax(heat_matrix), heat_matrix.shape) # gives the index of the FIRST largest element. Doesn't account for multiple maximums! maximums = np.where(heat_matrix == heat_matrix.max()) # This one accounts for multiple maximums! return np.array(maximums) def pixl_idx2coord_indiv(idx_maximums, n_side_pixl): ''' This function receives input from get_maximums_indices() Given discrete pixels indices (i,j) where i,j = 0,...,n_side_pixl (where activations are maximal), it transforms them to (continuous) coordinates in [0,1] IMPORTANT: It only computes the CENTER of the Obj (not sd's). So it's useful for measures that only use Obj_ctr :param idx_maximums: index of maximums from get_maximums_idx() :param n_side_pixl: side of the activation matrix (necessary to transform indices to coordinates) :return pred_obj_ctr_x, pred_obj_ctr_y: predicted (continuous) coordinates in [0,1] (Obj_ctr) ''' coord = np.mean(idx_maximums, axis = 1) PRED_coord = coord.astype(np.float)/float(n_side_pixl - 1) # Transform pixel indices to (continuous) coordinates pred_obj_ctr_x, pred_obj_ctr_y = PRED_coord[0], PRED_coord[1] return pred_obj_ctr_x, pred_obj_ctr_y def get_folds(n_samples, n_folds): indices = np.random.permutation(np.arange(n_samples)) n_test = int(np.floor(n_samples / n_folds)) kf = [(np.delete(indices, np.arange(i * n_test, (i + 1) * n_test)), # train indices[i * n_test:(i + 1) * n_test]) for i in range(n_folds)] # test return kf def mirror_x(subj_ctr_x, obj_ctr_x): # Computes the absolute value of the obj_ctr_x variable (to make it symmetric) aux_obj_ctr_x = [ (1 - float(obj_ctr_x[i])) if float(obj_ctr_x[i]) <= float(subj_ctr_x[i]) else float(obj_ctr_x[i]) for i in range(len(obj_ctr_x)) ] aux_subj_ctr_x = [ (1 - float(subj_ctr_x[i])) if float(obj_ctr_x[i]) <= float(subj_ctr_x[i]) else float(subj_ctr_x[i]) for i in range(len(obj_ctr_x)) ] subj_ctr_x, obj_ctr_x = aux_subj_ctr_x, aux_obj_ctr_x return subj_ctr_x, obj_ctr_x def build_emb_dict(words, EMB): #Input: words= word list, EMB= embeddings in a np.array format #Output: Dictionary of embeddings EMB_dict = {} for i in range(len(words)): EMB_dict[words[i]] = EMB[i,:] return EMB_dict def wordlist2emb_matrix(words_to_get, EMB_dict): # Input: words_to_get = word list from the EMB matrix, EMB_dict= dictionary of embeddings # Output: MATRIX of embeddings # IMPORTANT: it preserves the order of the words_to_get list EMB_matrix = [] for i in range(len(words_to_get)): try: EMB_matrix.append( EMB_dict[words_to_get[i]] ) except KeyError: pass #print 'WARNING! word ' + words_to_get[i] + ' not found in our embeddings! (and it should be!!)' EMB_matrix = np.array(EMB_matrix) return EMB_matrix def get_GEN(TRAIN_relevant, train_idx, test_idx, subj, obj, pred): # Gives the generalized triplets (combination) and the generalized words # IMPORTANT: The outputted indices (idx_gen_tuples) are over the instances of the TEST SET! print ('Getting generalized instances') train_tuples = [(TRAIN_relevant[subj][train_idx[ii]],TRAIN_relevant[pred][train_idx[ii]], TRAIN_relevant[obj][ train_idx[ii] ]) for ii in range(len(train_idx)) ] test_tuples = [(TRAIN_relevant[subj][test_idx[ii]],TRAIN_relevant[pred][test_idx[ii]], TRAIN_relevant[obj][test_idx[ii]]) for ii in range(len(test_idx)) ] # 2. get GENERALIZED TUPLES (its index) #train_tuples, test_tuples = tuples[train_idx], tuples[test_idx] idx_gen_tuples = [i for i, x in enumerate(test_tuples) if x not in train_tuples]#IMPORTANT!!! This index (idx_gen_tuples) is over the instances of the TEST SET! # 3.get GENARALIZED WORDS (its index) subjs, preds, objs = np.array(TRAIN_relevant[subj]), np.array(TRAIN_relevant[pred]), np.array(TRAIN_relevant[obj]) # get unique wordlist in test_tuples[idx_gen_tuples] --> TRICK! if there are gen-words, they MUST be within gen-tuples) and in train tuples allwords_train = np.concatenate((subjs[train_idx], preds[train_idx],objs[train_idx]), axis=0) allwords_test = np.concatenate((subjs[test_idx], preds[test_idx],objs[test_idx]), axis=0) unique_words_train = list(set(allwords_train)) unique_words_test = list(set(allwords_test)) # get generalized words gen_words = [ unique_words_test[j] for j in range(len(unique_words_test)) if unique_words_test[j] not in unique_words_train] # intersect unique_words_train and unique_words_test and get the complementary # find INDICES of the tuples in the test set that contain any of the gen-words idx_gen_words = [ i for i,x in enumerate(test_tuples) if any(word in gen_words for word in x) ] # x=tuple, word= word within the tuple, i=index of the tuple x return idx_gen_tuples, idx_gen_words, gen_words def get_CLEAN_train_test_idx(TRAIN_relevant, train_idx, test_idx, clean_eval): # Gives the indices of the triplets that we want to get as a clean test set # clean_eval contains the triplets, etc. of our clean selection of instances # IMPORTANT: idx_clean_train and idx_clean_test are over the instances of the TRAIN and TEST SETS respectively! print ('Getting *clean* train and test instances') #0. get either tuples or triplets from our train AND test data if (clean_eval['eval'] == 'triplets') or (clean_eval['eval'] == 'words'): #if we want clean words, we search them among the whole triplet train_tuples = [(TRAIN_relevant['subj'][train_idx[ii]],TRAIN_relevant['rel'][train_idx[ii]], TRAIN_relevant['obj'][ train_idx[ii] ]) for ii in range(len(train_idx)) ] test_tuples = [(TRAIN_relevant['subj'][test_idx[ii]],TRAIN_relevant['rel'][test_idx[ii]], TRAIN_relevant['obj'][test_idx[ii]]) for ii in range(len(test_idx)) ] #1. Decide what clean triplets/words to use if clean_eval['eval'] == 'triplets': clean_tuples = clean_eval['triplets'] if clean_eval['eval'] == 'words': clean_words = clean_eval['words'] # 2. get INDICES of our clean instances (triplets...) in both, TRAIN and TEST data if clean_eval['eval'] == 'triplets': idx_clean_test = [ i for i,x in enumerate(test_tuples) if x in clean_tuples ] idx_clean_train = [ i for i,x in enumerate(train_tuples) if x in clean_tuples ] if clean_eval['eval'] == 'words': idx_clean_test = [ i for i,x in enumerate(test_tuples) if any(word in clean_words for word in x) ] idx_clean_train = [i for i, x in enumerate(train_tuples) if any(word in clean_words for word in x)] return idx_clean_train, idx_clean_test def get_enforce_gen(to_get): # This function can also get the clean_test instances, as there's nothing specific about generalized here import read_data as rd gen_triplets, gen_words = [],[] if to_get == 'triplets': # Read CSV GEN_INST = rd.load_training_data('../data/TRIPLETS_random.csv') # Get all triplets for i in range(len(GEN_INST['subj'])): gen_triplets.append( ( GEN_INST['subj'][i], GEN_INST['rel'][i], GEN_INST['obj'][i] ) ) gen_triplets = list(set(gen_triplets)) elif to_get == 'words': gen_words = rd.readWordlist('../data/WORDS_random.csv') gen_words = list(set(gen_words)) return gen_triplets, gen_words def aux_get_train_test_splits(X, X_extra, y, OBJ_ctr_sd, train_idx, test_idx): # This is an auxiliary function that gives back the train and test splits # Is not very elegant, but we don't create y_pixl_train and test splits because it takes too much memory # get X X_train, X_test = {},{} X_train['subj'], X_test['subj'] = X['subj'][train_idx], X['subj'][test_idx] X_train['pred'], X_test['pred'] = X['pred'][train_idx], X['pred'][test_idx] X_train['obj'], X_test['obj'] = X['obj'][train_idx], X['obj'][test_idx] # get y y_train, y_test = y[train_idx], y[test_idx] #not a dictionary!!! (a matrix array) OBJ_ctr_sd_train, OBJ_ctr_sd_test = OBJ_ctr_sd[train_idx], OBJ_ctr_sd[test_idx] X_extra_train, X_extra_test = X_extra[train_idx], X_extra[test_idx] return X_train, X_test, X_extra_train, X_extra_test, y_train, y_test, OBJ_ctr_sd_train, OBJ_ctr_sd_test def compute_centers(x_subj, y_subj, width_subj, height_subj, x_obj, y_obj, width_obj, height_obj): ''' Notice: the (0,0) coordinates of the image correspond to the TOP left corner (not bottom left) INPUT: absolute positions of: x_obj (x of top left corner of bounding box), y_obj (y of top left...), width_obj (width bounding box), height_obj (height bounding box), width_img (width img), height_img (height img) OUTPUT: centered positions (center of bounding box and standard dev. of bounding box): obj_ctr_x, obj_ctr_y, obj_sd_x, obj_sd_y ''' # OBJECT: obj_ctr_x = float(x_obj) + (float(width_obj)/2) obj_ctr_y = float(y_obj) + (float(height_obj)/2) obj_sd_x = (float(width_obj)/2) # after simplifying in the formula of SD, it gives this obj_sd_y = (float(height_obj)/2) # after simplifying in the formula of SD, it gives this # SUBJECT: subj_ctr_x = float(x_subj) + (float(width_subj)/2) subj_ctr_y = float(y_subj) + (float(height_subj)/2) subj_sd_x = (float(width_subj)/2) #after simplifying in the formula of SD, it gives this subj_sd_y = (float(height_subj)/2) # after simplifying in the formula of SD, it gives this return subj_ctr_x, subj_ctr_y, subj_sd_x, subj_sd_y, obj_ctr_x, obj_ctr_y, obj_sd_x, obj_sd_y
def coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl): ''' This function works with an individual example (extending it to many examples, where e.g., obj_sd_x is a vector, is easy) :param obj_sd_x (and the rest): real number (not vectors!)
random_line_split
data_tools.py
import numpy as np def get_data(model_type, TRAIN, words, EMB, enforce_gen, n_side_pixl): import numpy as np EMBEDDINGS, OBJ_ctr_sd_enf_gen = {}, [] # 0. Get dictionary of ALL our embedding words EMB_dict = build_emb_dict(words, EMB) # 1. Get the RELEVANT training instances (filtering for 'predicates' and 'complete_only' variables) OBJ_ctr_sd, rel_ids, TRAIN_relevant = get_TRAIN_relevant(TRAIN, words) # 2. get dictionaries WORDLISTS (INDICES for the embedding layer!) EMBEDDINGS['obj_list'] = list(set(TRAIN_relevant['obj'])) EMBEDDINGS['subj_list'] = list(set(TRAIN_relevant['subj'])) EMBEDDINGS['pred_list'] = list(set(TRAIN_relevant['rel'])) allwords = np.concatenate((EMBEDDINGS['subj_list'], EMBEDDINGS['pred_list'], EMBEDDINGS['obj_list']), axis=0) EMBEDDINGS['allwords_list'] = list( set(allwords)) # IMPORTANT: The order of this list is what prevails later on as index for embeddings # 3. Get INITIALIZATION embeddings EMBEDDINGS['subj_EMB'] = wordlist2emb_matrix(EMBEDDINGS['subj_list'], EMB_dict) EMBEDDINGS['pred_EMB'] = wordlist2emb_matrix(EMBEDDINGS['pred_list'], EMB_dict) EMBEDDINGS['obj_EMB'] = wordlist2emb_matrix(EMBEDDINGS['obj_list'], EMB_dict) EMBEDDINGS['allwords_EMB'] = wordlist2emb_matrix(EMBEDDINGS['allwords_list'],EMB_dict) # 3.1. Get RANDOM embeddings (of the size of allwords_EMB) EMBEDDINGS['allwords_EMB_rnd'] = get_random_EMB(EMBEDDINGS['allwords_EMB']) EMBEDDINGS['subj_EMB_rnd'] = get_random_EMB(EMBEDDINGS['subj_EMB']) EMBEDDINGS['pred_EMB_rnd'] = get_random_EMB(EMBEDDINGS['pred_EMB']) EMBEDDINGS['obj_EMB_rnd'] = get_random_EMB(EMBEDDINGS['obj_EMB']) # 3.2. get ONE-HOT embeddings: EMBEDDINGS['subj_EMB_onehot'] = np.identity(len(EMBEDDINGS['subj_list'])) EMBEDDINGS['pred_EMB_onehot'] = np.identity(len(EMBEDDINGS['pred_list'])) EMBEDDINGS['obj_EMB_onehot'] = np.identity(len(EMBEDDINGS['obj_list'])) EMBEDDINGS['allwords_EMB_onehot'] = np.identity(len(EMBEDDINGS['allwords_list'])) # 4. Get X data (i.e., get the SEQUENCES of INDICES for the embedding layer) X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, \ idx_IN_X_and_y, idx_enf_gen = relevant_instances2X_and_y(model_type, TRAIN_relevant, EMBEDDINGS, enforce_gen, n_side_pixl) # 5. Get the OBJ_ctr_sd_enf_gen that we need for some performance measures! if enforce_gen['eval'] is not None: OBJ_ctr_sd_enf_gen = OBJ_ctr_sd[idx_enf_gen] # 6. Finally, if we have REDUCED the X and y data by ENFORCING generalization (excluding instances) we have to reduce OBJ_ctr_sd and TRAIN_relevant accordingly if enforce_gen['eval'] is not None: for key in TRAIN_relevant: TRAIN_relevant[key] = np.array(TRAIN_relevant[key]) TRAIN_relevant[key] = TRAIN_relevant[key][idx_IN_X_and_y] OBJ_ctr_sd = OBJ_ctr_sd[idx_IN_X_and_y] rel_ids = np.array(rel_ids) rel_ids = rel_ids[idx_IN_X_and_y] return X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, rel_ids, OBJ_ctr_sd, OBJ_ctr_sd_enf_gen, EMBEDDINGS, TRAIN_relevant def relevant_instances2X_and_y(model_type, TRAIN_relevant, EMBEDDINGS, enforce_gen, n_side_pixl): # OUTPUT: the X and y data, gotten by converting each word into its corresponding index print('Getting X and y data') X_vars = ['subj_ctr_x', 'subj_ctr_y', 'subj_sd_x', 'subj_sd_y'] y_vars = ['obj_sd_x', 'obj_sd_y', 'obj_ctr_x', 'obj_ctr_y'] subj_list, pred_list, obj_list, allwords_list = EMBEDDINGS['subj_list'], EMBEDDINGS['pred_list'], EMBEDDINGS[ 'obj_list'], EMBEDDINGS['allwords_list'] # get X: X, X_enf_gen = {}, {} X['subj'], X['pred'], X['obj'] = [], [], [] X_enf_gen['subj'], X_enf_gen['pred'], X_enf_gen['obj'] = [], [], [] for i in range(len(TRAIN_relevant['subj'])): triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i]) # append to the GENERALIZED set if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']): X_enf_gen['subj'].append(subj_list.index(TRAIN_relevant['subj'][i])) X_enf_gen['pred'].append(pred_list.index(TRAIN_relevant['rel'][i])) X_enf_gen['obj'].append(obj_list.index(TRAIN_relevant['obj'][i])) elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet): X_enf_gen['subj'].append(subj_list.index(TRAIN_relevant['subj'][i])) X_enf_gen['pred'].append(pred_list.index(TRAIN_relevant['rel'][i])) X_enf_gen['obj'].append(obj_list.index(TRAIN_relevant['obj'][i])) else: # if either the triplet/word is not generalized or we aren't enforcing generalization X['subj'].append(subj_list.index(TRAIN_relevant['subj'][i])) X['pred'].append(pred_list.index(TRAIN_relevant['rel'][i])) X['obj'].append(obj_list.index(TRAIN_relevant['obj'][i])) # Reshape X['subj'] = np.array(X['subj']).reshape((-1, 1)) X['pred'] = np.array(X['pred']).reshape((-1, 1)) X['obj'] = np.array(X['obj']).reshape((-1, 1)) # FORMAT: if we have gotten some zero shot instances if X_enf_gen['subj'] != []: X_enf_gen['subj'] = np.array(X_enf_gen['subj']).reshape( (-1, 1)) # get them in the right FORMAT for the merged (SEP) model! X_enf_gen['pred'] = np.array(X_enf_gen['pred']).reshape((-1, 1)) X_enf_gen['obj'] = np.array(X_enf_gen['obj']).reshape((-1, 1)) else: X_enf_gen['subj'], X_enf_gen['pred'], X_enf_gen['obj'] = None, None, None # Get Y (if model_type = PIX we output the regular y besides y_pixl!) y, y_pixl, y_enf_gen, idx_IN_X_and_y, idx_enf_gen, y_enf_gen_pixl = [], [], [], [], [], [] for i in range(len(TRAIN_relevant['subj'])): y_new_row = [] for k in range(len(y_vars)): y_new_row.extend([float(TRAIN_relevant[y_vars[k]][i])]) # IMPORTANT: We assume that the variables are NUMERIC if model_type == 'PIX': obj_sd_x, obj_sd_y = float(TRAIN_relevant['obj_sd_x'][i]), float(TRAIN_relevant['obj_sd_y'][i]) obj_ctr_x, obj_ctr_y = float(TRAIN_relevant['obj_ctr_x'][i]), float(TRAIN_relevant['obj_ctr_y'][i]) y_pixl_new_row = coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl) # get stuff for the generalzed setting: triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i]) if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']): y_enf_gen.append(y_new_row) if model_type == 'PIX': y_enf_gen_pixl.append(y_pixl_new_row) idx_enf_gen.append(i) elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet): y_enf_gen.append(y_new_row) if model_type == 'PIX': y_enf_gen_pixl.append(y_pixl_new_row) idx_enf_gen.append(i) else: # NON GENERALIZED
y = np.array(y) y_enf_gen = np.array(y_enf_gen) if y_enf_gen != [] else None if model_type == 'PIX': y_pixl = np.array(y_pixl) y_enf_gen_pixl = np.array(y_enf_gen_pixl) if y_enf_gen_pixl != [] else None else: y_pixl = [[[]]] # necessary because we get the index 0 of y_pixl (if model_type != 'PIX') to save memory in learn_and_evaluate() print('We have gotten ' + str(len(idx_IN_X_and_y)) + ' instances (for both, train & test)') # Get X_extra X_extra, X_extra_enf_gen = [], [] if X_vars != []: for i in range(len(TRAIN_relevant['subj'])): X_extra_new_row = [] for k in range(len(X_vars)): # we already ASSUME that we have at least one y-variable X_extra_new_row.extend( [float(TRAIN_relevant[X_vars[k]][i])]) # IMPORTANT: We assume that the variables are NUMERIC # get stuff for the generalized: triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i]) if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']): X_extra_enf_gen.append(X_extra_new_row) elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet): X_extra_enf_gen.append(X_extra_new_row) else: X_extra.append(X_extra_new_row) X_extra = np.array(X_extra) if X_extra != [] else None # IMPORTANT: we only make it a numpy array if we have something, because we use == [] as condition in models_learn X_extra_enf_gen = np.array(X_extra_enf_gen) if X_extra_enf_gen != [] else None return X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, idx_IN_X_and_y, idx_enf_gen def get_TRAIN_relevant(TRAIN, words): # IMPORTANT: we preserve the ORDER of TRAIN (so that we can recover information afterwards) TRAIN_relevant, rel_ids, OBJ_ctr_sd = {}, [], [] print('Getting *relevant* instances, from a total of: ' + str(len(TRAIN['subj']))) var_names = [key for key in TRAIN] # INITIALIZE TRAIN_relavant for varname in var_names: TRAIN_relevant[varname] = [] for i in range(len( TRAIN['subj'] )): # Samples loop we_have_it = True if ((TRAIN['subj'][i] in words) and (TRAIN['rel'][i] in words) and (TRAIN['obj'][i] in words)) else False # if we have the complete triplet if we_have_it == True: for varname in var_names: TRAIN_relevant[varname].append(TRAIN[varname][i]) rel_ids.append(TRAIN['rel_id'][i]) OBJ_ctr_sd.append([TRAIN['img_idx'][i], TRAIN['rel_id'][i], TRAIN['subj'][i], TRAIN['rel'][i], TRAIN['obj'][i], TRAIN['subj_sd_x'][i], TRAIN['subj_sd_y'][i], TRAIN['subj_ctr_x'][i], TRAIN['subj_ctr_y'][i], TRAIN['obj_sd_x'][i], TRAIN['obj_sd_y'][i], TRAIN['obj_ctr_x'][i], TRAIN['obj_ctr_y'][i]]) OBJ_ctr_sd = np.array(OBJ_ctr_sd) print('We have gotten ' + str(len(TRAIN_relevant['subj'])) + ' RELEVANT instances') return OBJ_ctr_sd, rel_ids, TRAIN_relevant def get_random_EMB(actual_EMB): # Returns embedding matrix of the original shape with random normal vectors (dimension-wise) mu, sigma, vec_size = np.mean(actual_EMB), np.mean(np.std(actual_EMB, axis=0)), len(actual_EMB[0, :]) rand_EMB = [] for i in range(actual_EMB.shape[0]): # build a dictionary of random vectors rand_EMB.append(np.random.normal(mu, sigma, vec_size)) rand_EMB = np.array(rand_EMB) return rand_EMB def coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl): ''' This function works with an individual example (extending it to many examples, where e.g., obj_sd_x is a vector, is easy) :param obj_sd_x (and the rest): real number (not vectors!) :param n_side_pixl: number of pixels as output (hyperparameter) :return y_pixl: matrix of pixels, i.e., a 2D tensor (n_side_pixl, n_side_pixl) ''' # continuous bounding box corners (prevent problems of predictions outside [0,1]) A_left_x, A_right_x = max((obj_ctr_x - obj_sd_x), 0), min((obj_ctr_x + obj_sd_x), 1) A_low_y, A_top_y = min((obj_ctr_y + obj_sd_y), 1), max((obj_ctr_y - obj_sd_y), 0) # translate continuous bounding box corners into indices in a n_side_pixl x n_side_pixl matrix i_left, i_right = np.rint( (n_side_pixl - 1)*A_left_x).astype(np.int), np.rint((n_side_pixl - 1)*A_right_x).astype(np.int) j_low, j_top = np.rint((n_side_pixl - 1)*A_low_y).astype(np.int), np.rint((n_side_pixl - 1)*A_top_y).astype(np.int) pixl_matr = np.zeros( (n_side_pixl, n_side_pixl) ) # add ones inside of the bounding box i_range = range( i_left, i_right ) i_range = [i_left] if ((i_left == i_right) or (i_range == [])) else i_range # AVOID THE CASE where width is 0 AND i_range=[] (as upper bound < lower bound) j_range = range( j_top, j_low ) j_range = [j_low] if ((j_low == j_top) or (j_range == [])) else j_range # AVOID THE CASE where height is 0 AND i_range=[] (as upper bound < lower bound) pixl_matr[ np.array(i_range)[:, None], np.array(j_range)] = 1 # (IMPORTANT: indices must be np.arrays) put a 1 everywhere inside of the bounding box pixl_matr = pixl_matr.reshape((-1)) return pixl_matr def pixl_idx2coord_all_examples(y_pixl): ''' Transforms the whole set of predicted matrices y_pixl into their continuous CENTER coordinates (Obj_ctr) :param y_pixl: array of MATRICES with predicted heatmaps (pixels). Each matrix = 1 example :return: PRED_obj_ctr_x, PRED_obj_ctr_y: arrays of length = number of examples ''' PRED_obj_ctr_x, PRED_obj_ctr_y = [], [] n_side_pixl = y_pixl.shape[1] #get automatically the number of pixels from the pixel matrix side for i in range( y_pixl.shape[0] ): # loop on number of examples idx_maximums = get_maximums_idx(y_pixl[i]) # get indices of maximum (allow for multiple of them) ctr_x, ctr_y = pixl_idx2coord_indiv(idx_maximums, n_side_pixl) # transform pixel indices into continuous coordinates PRED_obj_ctr_x.append(ctr_x) PRED_obj_ctr_y.append(ctr_y) PRED_obj_ctr_x, PRED_obj_ctr_y = np.array(PRED_obj_ctr_x), np.array(PRED_obj_ctr_y) return PRED_obj_ctr_x, PRED_obj_ctr_y def get_maximums_idx( heat_matrix ): # Given a matrix of activations, it outputs the indices corresponding to its maximum values # INPUT: heat_matrix: matrix of continuous activations (within [0,1]) of size n_side_pixl x n_side_pixl # OUTPUT: maximums: indices corresponding to where the activations are maximum (accounts for multiple maximums) #maximums = np.unravel_index(np.argmax(heat_matrix), heat_matrix.shape) # gives the index of the FIRST largest element. Doesn't account for multiple maximums! maximums = np.where(heat_matrix == heat_matrix.max()) # This one accounts for multiple maximums! return np.array(maximums) def pixl_idx2coord_indiv(idx_maximums, n_side_pixl): ''' This function receives input from get_maximums_indices() Given discrete pixels indices (i,j) where i,j = 0,...,n_side_pixl (where activations are maximal), it transforms them to (continuous) coordinates in [0,1] IMPORTANT: It only computes the CENTER of the Obj (not sd's). So it's useful for measures that only use Obj_ctr :param idx_maximums: index of maximums from get_maximums_idx() :param n_side_pixl: side of the activation matrix (necessary to transform indices to coordinates) :return pred_obj_ctr_x, pred_obj_ctr_y: predicted (continuous) coordinates in [0,1] (Obj_ctr) ''' coord = np.mean(idx_maximums, axis = 1) PRED_coord = coord.astype(np.float)/float(n_side_pixl - 1) # Transform pixel indices to (continuous) coordinates pred_obj_ctr_x, pred_obj_ctr_y = PRED_coord[0], PRED_coord[1] return pred_obj_ctr_x, pred_obj_ctr_y def get_folds(n_samples, n_folds): indices = np.random.permutation(np.arange(n_samples)) n_test = int(np.floor(n_samples / n_folds)) kf = [(np.delete(indices, np.arange(i * n_test, (i + 1) * n_test)), # train indices[i * n_test:(i + 1) * n_test]) for i in range(n_folds)] # test return kf def mirror_x(subj_ctr_x, obj_ctr_x): # Computes the absolute value of the obj_ctr_x variable (to make it symmetric) aux_obj_ctr_x = [ (1 - float(obj_ctr_x[i])) if float(obj_ctr_x[i]) <= float(subj_ctr_x[i]) else float(obj_ctr_x[i]) for i in range(len(obj_ctr_x)) ] aux_subj_ctr_x = [ (1 - float(subj_ctr_x[i])) if float(obj_ctr_x[i]) <= float(subj_ctr_x[i]) else float(subj_ctr_x[i]) for i in range(len(obj_ctr_x)) ] subj_ctr_x, obj_ctr_x = aux_subj_ctr_x, aux_obj_ctr_x return subj_ctr_x, obj_ctr_x def build_emb_dict(words, EMB): #Input: words= word list, EMB= embeddings in a np.array format #Output: Dictionary of embeddings EMB_dict = {} for i in range(len(words)): EMB_dict[words[i]] = EMB[i,:] return EMB_dict def wordlist2emb_matrix(words_to_get, EMB_dict): # Input: words_to_get = word list from the EMB matrix, EMB_dict= dictionary of embeddings # Output: MATRIX of embeddings # IMPORTANT: it preserves the order of the words_to_get list EMB_matrix = [] for i in range(len(words_to_get)): try: EMB_matrix.append( EMB_dict[words_to_get[i]] ) except KeyError: pass #print 'WARNING! word ' + words_to_get[i] + ' not found in our embeddings! (and it should be!!)' EMB_matrix = np.array(EMB_matrix) return EMB_matrix def get_GEN(TRAIN_relevant, train_idx, test_idx, subj, obj, pred): # Gives the generalized triplets (combination) and the generalized words # IMPORTANT: The outputted indices (idx_gen_tuples) are over the instances of the TEST SET! print ('Getting generalized instances') train_tuples = [(TRAIN_relevant[subj][train_idx[ii]],TRAIN_relevant[pred][train_idx[ii]], TRAIN_relevant[obj][ train_idx[ii] ]) for ii in range(len(train_idx)) ] test_tuples = [(TRAIN_relevant[subj][test_idx[ii]],TRAIN_relevant[pred][test_idx[ii]], TRAIN_relevant[obj][test_idx[ii]]) for ii in range(len(test_idx)) ] # 2. get GENERALIZED TUPLES (its index) #train_tuples, test_tuples = tuples[train_idx], tuples[test_idx] idx_gen_tuples = [i for i, x in enumerate(test_tuples) if x not in train_tuples]#IMPORTANT!!! This index (idx_gen_tuples) is over the instances of the TEST SET! # 3.get GENARALIZED WORDS (its index) subjs, preds, objs = np.array(TRAIN_relevant[subj]), np.array(TRAIN_relevant[pred]), np.array(TRAIN_relevant[obj]) # get unique wordlist in test_tuples[idx_gen_tuples] --> TRICK! if there are gen-words, they MUST be within gen-tuples) and in train tuples allwords_train = np.concatenate((subjs[train_idx], preds[train_idx],objs[train_idx]), axis=0) allwords_test = np.concatenate((subjs[test_idx], preds[test_idx],objs[test_idx]), axis=0) unique_words_train = list(set(allwords_train)) unique_words_test = list(set(allwords_test)) # get generalized words gen_words = [ unique_words_test[j] for j in range(len(unique_words_test)) if unique_words_test[j] not in unique_words_train] # intersect unique_words_train and unique_words_test and get the complementary # find INDICES of the tuples in the test set that contain any of the gen-words idx_gen_words = [ i for i,x in enumerate(test_tuples) if any(word in gen_words for word in x) ] # x=tuple, word= word within the tuple, i=index of the tuple x return idx_gen_tuples, idx_gen_words, gen_words def get_CLEAN_train_test_idx(TRAIN_relevant, train_idx, test_idx, clean_eval): # Gives the indices of the triplets that we want to get as a clean test set # clean_eval contains the triplets, etc. of our clean selection of instances # IMPORTANT: idx_clean_train and idx_clean_test are over the instances of the TRAIN and TEST SETS respectively! print ('Getting *clean* train and test instances') #0. get either tuples or triplets from our train AND test data if (clean_eval['eval'] == 'triplets') or (clean_eval['eval'] == 'words'): #if we want clean words, we search them among the whole triplet train_tuples = [(TRAIN_relevant['subj'][train_idx[ii]],TRAIN_relevant['rel'][train_idx[ii]], TRAIN_relevant['obj'][ train_idx[ii] ]) for ii in range(len(train_idx)) ] test_tuples = [(TRAIN_relevant['subj'][test_idx[ii]],TRAIN_relevant['rel'][test_idx[ii]], TRAIN_relevant['obj'][test_idx[ii]]) for ii in range(len(test_idx)) ] #1. Decide what clean triplets/words to use if clean_eval['eval'] == 'triplets': clean_tuples = clean_eval['triplets'] if clean_eval['eval'] == 'words': clean_words = clean_eval['words'] # 2. get INDICES of our clean instances (triplets...) in both, TRAIN and TEST data if clean_eval['eval'] == 'triplets': idx_clean_test = [ i for i,x in enumerate(test_tuples) if x in clean_tuples ] idx_clean_train = [ i for i,x in enumerate(train_tuples) if x in clean_tuples ] if clean_eval['eval'] == 'words': idx_clean_test = [ i for i,x in enumerate(test_tuples) if any(word in clean_words for word in x) ] idx_clean_train = [i for i, x in enumerate(train_tuples) if any(word in clean_words for word in x)] return idx_clean_train, idx_clean_test def get_enforce_gen(to_get): # This function can also get the clean_test instances, as there's nothing specific about generalized here import read_data as rd gen_triplets, gen_words = [],[] if to_get == 'triplets': # Read CSV GEN_INST = rd.load_training_data('../data/TRIPLETS_random.csv') # Get all triplets for i in range(len(GEN_INST['subj'])): gen_triplets.append( ( GEN_INST['subj'][i], GEN_INST['rel'][i], GEN_INST['obj'][i] ) ) gen_triplets = list(set(gen_triplets)) elif to_get == 'words': gen_words = rd.readWordlist('../data/WORDS_random.csv') gen_words = list(set(gen_words)) return gen_triplets, gen_words def aux_get_train_test_splits(X, X_extra, y, OBJ_ctr_sd, train_idx, test_idx): # This is an auxiliary function that gives back the train and test splits # Is not very elegant, but we don't create y_pixl_train and test splits because it takes too much memory # get X X_train, X_test = {},{} X_train['subj'], X_test['subj'] = X['subj'][train_idx], X['subj'][test_idx] X_train['pred'], X_test['pred'] = X['pred'][train_idx], X['pred'][test_idx] X_train['obj'], X_test['obj'] = X['obj'][train_idx], X['obj'][test_idx] # get y y_train, y_test = y[train_idx], y[test_idx] #not a dictionary!!! (a matrix array) OBJ_ctr_sd_train, OBJ_ctr_sd_test = OBJ_ctr_sd[train_idx], OBJ_ctr_sd[test_idx] X_extra_train, X_extra_test = X_extra[train_idx], X_extra[test_idx] return X_train, X_test, X_extra_train, X_extra_test, y_train, y_test, OBJ_ctr_sd_train, OBJ_ctr_sd_test def compute_centers(x_subj, y_subj, width_subj, height_subj, x_obj, y_obj, width_obj, height_obj): ''' Notice: the (0,0) coordinates of the image correspond to the TOP left corner (not bottom left) INPUT: absolute positions of: x_obj (x of top left corner of bounding box), y_obj (y of top left...), width_obj (width bounding box), height_obj (height bounding box), width_img (width img), height_img (height img) OUTPUT: centered positions (center of bounding box and standard dev. of bounding box): obj_ctr_x, obj_ctr_y, obj_sd_x, obj_sd_y ''' # OBJECT: obj_ctr_x = float(x_obj) + (float(width_obj)/2) obj_ctr_y = float(y_obj) + (float(height_obj)/2) obj_sd_x = (float(width_obj)/2) # after simplifying in the formula of SD, it gives this obj_sd_y = (float(height_obj)/2) # after simplifying in the formula of SD, it gives this # SUBJECT: subj_ctr_x = float(x_subj) + (float(width_subj)/2) subj_ctr_y = float(y_subj) + (float(height_subj)/2) subj_sd_x = (float(width_subj)/2) #after simplifying in the formula of SD, it gives this subj_sd_y = (float(height_subj)/2) # after simplifying in the formula of SD, it gives this return subj_ctr_x, subj_ctr_y, subj_sd_x, subj_sd_y, obj_ctr_x, obj_ctr_y, obj_sd_x, obj_sd_y
y.append(y_new_row) if model_type == 'PIX': y_pixl.append(y_pixl_new_row) idx_IN_X_and_y.append(i)
conditional_block
data_tools.py
import numpy as np def get_data(model_type, TRAIN, words, EMB, enforce_gen, n_side_pixl): import numpy as np EMBEDDINGS, OBJ_ctr_sd_enf_gen = {}, [] # 0. Get dictionary of ALL our embedding words EMB_dict = build_emb_dict(words, EMB) # 1. Get the RELEVANT training instances (filtering for 'predicates' and 'complete_only' variables) OBJ_ctr_sd, rel_ids, TRAIN_relevant = get_TRAIN_relevant(TRAIN, words) # 2. get dictionaries WORDLISTS (INDICES for the embedding layer!) EMBEDDINGS['obj_list'] = list(set(TRAIN_relevant['obj'])) EMBEDDINGS['subj_list'] = list(set(TRAIN_relevant['subj'])) EMBEDDINGS['pred_list'] = list(set(TRAIN_relevant['rel'])) allwords = np.concatenate((EMBEDDINGS['subj_list'], EMBEDDINGS['pred_list'], EMBEDDINGS['obj_list']), axis=0) EMBEDDINGS['allwords_list'] = list( set(allwords)) # IMPORTANT: The order of this list is what prevails later on as index for embeddings # 3. Get INITIALIZATION embeddings EMBEDDINGS['subj_EMB'] = wordlist2emb_matrix(EMBEDDINGS['subj_list'], EMB_dict) EMBEDDINGS['pred_EMB'] = wordlist2emb_matrix(EMBEDDINGS['pred_list'], EMB_dict) EMBEDDINGS['obj_EMB'] = wordlist2emb_matrix(EMBEDDINGS['obj_list'], EMB_dict) EMBEDDINGS['allwords_EMB'] = wordlist2emb_matrix(EMBEDDINGS['allwords_list'],EMB_dict) # 3.1. Get RANDOM embeddings (of the size of allwords_EMB) EMBEDDINGS['allwords_EMB_rnd'] = get_random_EMB(EMBEDDINGS['allwords_EMB']) EMBEDDINGS['subj_EMB_rnd'] = get_random_EMB(EMBEDDINGS['subj_EMB']) EMBEDDINGS['pred_EMB_rnd'] = get_random_EMB(EMBEDDINGS['pred_EMB']) EMBEDDINGS['obj_EMB_rnd'] = get_random_EMB(EMBEDDINGS['obj_EMB']) # 3.2. get ONE-HOT embeddings: EMBEDDINGS['subj_EMB_onehot'] = np.identity(len(EMBEDDINGS['subj_list'])) EMBEDDINGS['pred_EMB_onehot'] = np.identity(len(EMBEDDINGS['pred_list'])) EMBEDDINGS['obj_EMB_onehot'] = np.identity(len(EMBEDDINGS['obj_list'])) EMBEDDINGS['allwords_EMB_onehot'] = np.identity(len(EMBEDDINGS['allwords_list'])) # 4. Get X data (i.e., get the SEQUENCES of INDICES for the embedding layer) X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, \ idx_IN_X_and_y, idx_enf_gen = relevant_instances2X_and_y(model_type, TRAIN_relevant, EMBEDDINGS, enforce_gen, n_side_pixl) # 5. Get the OBJ_ctr_sd_enf_gen that we need for some performance measures! if enforce_gen['eval'] is not None: OBJ_ctr_sd_enf_gen = OBJ_ctr_sd[idx_enf_gen] # 6. Finally, if we have REDUCED the X and y data by ENFORCING generalization (excluding instances) we have to reduce OBJ_ctr_sd and TRAIN_relevant accordingly if enforce_gen['eval'] is not None: for key in TRAIN_relevant: TRAIN_relevant[key] = np.array(TRAIN_relevant[key]) TRAIN_relevant[key] = TRAIN_relevant[key][idx_IN_X_and_y] OBJ_ctr_sd = OBJ_ctr_sd[idx_IN_X_and_y] rel_ids = np.array(rel_ids) rel_ids = rel_ids[idx_IN_X_and_y] return X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, rel_ids, OBJ_ctr_sd, OBJ_ctr_sd_enf_gen, EMBEDDINGS, TRAIN_relevant def relevant_instances2X_and_y(model_type, TRAIN_relevant, EMBEDDINGS, enforce_gen, n_side_pixl): # OUTPUT: the X and y data, gotten by converting each word into its corresponding index print('Getting X and y data') X_vars = ['subj_ctr_x', 'subj_ctr_y', 'subj_sd_x', 'subj_sd_y'] y_vars = ['obj_sd_x', 'obj_sd_y', 'obj_ctr_x', 'obj_ctr_y'] subj_list, pred_list, obj_list, allwords_list = EMBEDDINGS['subj_list'], EMBEDDINGS['pred_list'], EMBEDDINGS[ 'obj_list'], EMBEDDINGS['allwords_list'] # get X: X, X_enf_gen = {}, {} X['subj'], X['pred'], X['obj'] = [], [], [] X_enf_gen['subj'], X_enf_gen['pred'], X_enf_gen['obj'] = [], [], [] for i in range(len(TRAIN_relevant['subj'])): triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i]) # append to the GENERALIZED set if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']): X_enf_gen['subj'].append(subj_list.index(TRAIN_relevant['subj'][i])) X_enf_gen['pred'].append(pred_list.index(TRAIN_relevant['rel'][i])) X_enf_gen['obj'].append(obj_list.index(TRAIN_relevant['obj'][i])) elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet): X_enf_gen['subj'].append(subj_list.index(TRAIN_relevant['subj'][i])) X_enf_gen['pred'].append(pred_list.index(TRAIN_relevant['rel'][i])) X_enf_gen['obj'].append(obj_list.index(TRAIN_relevant['obj'][i])) else: # if either the triplet/word is not generalized or we aren't enforcing generalization X['subj'].append(subj_list.index(TRAIN_relevant['subj'][i])) X['pred'].append(pred_list.index(TRAIN_relevant['rel'][i])) X['obj'].append(obj_list.index(TRAIN_relevant['obj'][i])) # Reshape X['subj'] = np.array(X['subj']).reshape((-1, 1)) X['pred'] = np.array(X['pred']).reshape((-1, 1)) X['obj'] = np.array(X['obj']).reshape((-1, 1)) # FORMAT: if we have gotten some zero shot instances if X_enf_gen['subj'] != []: X_enf_gen['subj'] = np.array(X_enf_gen['subj']).reshape( (-1, 1)) # get them in the right FORMAT for the merged (SEP) model! X_enf_gen['pred'] = np.array(X_enf_gen['pred']).reshape((-1, 1)) X_enf_gen['obj'] = np.array(X_enf_gen['obj']).reshape((-1, 1)) else: X_enf_gen['subj'], X_enf_gen['pred'], X_enf_gen['obj'] = None, None, None # Get Y (if model_type = PIX we output the regular y besides y_pixl!) y, y_pixl, y_enf_gen, idx_IN_X_and_y, idx_enf_gen, y_enf_gen_pixl = [], [], [], [], [], [] for i in range(len(TRAIN_relevant['subj'])): y_new_row = [] for k in range(len(y_vars)): y_new_row.extend([float(TRAIN_relevant[y_vars[k]][i])]) # IMPORTANT: We assume that the variables are NUMERIC if model_type == 'PIX': obj_sd_x, obj_sd_y = float(TRAIN_relevant['obj_sd_x'][i]), float(TRAIN_relevant['obj_sd_y'][i]) obj_ctr_x, obj_ctr_y = float(TRAIN_relevant['obj_ctr_x'][i]), float(TRAIN_relevant['obj_ctr_y'][i]) y_pixl_new_row = coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl) # get stuff for the generalzed setting: triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i]) if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']): y_enf_gen.append(y_new_row) if model_type == 'PIX': y_enf_gen_pixl.append(y_pixl_new_row) idx_enf_gen.append(i) elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet): y_enf_gen.append(y_new_row) if model_type == 'PIX': y_enf_gen_pixl.append(y_pixl_new_row) idx_enf_gen.append(i) else: # NON GENERALIZED y.append(y_new_row) if model_type == 'PIX': y_pixl.append(y_pixl_new_row) idx_IN_X_and_y.append(i) y = np.array(y) y_enf_gen = np.array(y_enf_gen) if y_enf_gen != [] else None if model_type == 'PIX': y_pixl = np.array(y_pixl) y_enf_gen_pixl = np.array(y_enf_gen_pixl) if y_enf_gen_pixl != [] else None else: y_pixl = [[[]]] # necessary because we get the index 0 of y_pixl (if model_type != 'PIX') to save memory in learn_and_evaluate() print('We have gotten ' + str(len(idx_IN_X_and_y)) + ' instances (for both, train & test)') # Get X_extra X_extra, X_extra_enf_gen = [], [] if X_vars != []: for i in range(len(TRAIN_relevant['subj'])): X_extra_new_row = [] for k in range(len(X_vars)): # we already ASSUME that we have at least one y-variable X_extra_new_row.extend( [float(TRAIN_relevant[X_vars[k]][i])]) # IMPORTANT: We assume that the variables are NUMERIC # get stuff for the generalized: triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i]) if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']): X_extra_enf_gen.append(X_extra_new_row) elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet): X_extra_enf_gen.append(X_extra_new_row) else: X_extra.append(X_extra_new_row) X_extra = np.array(X_extra) if X_extra != [] else None # IMPORTANT: we only make it a numpy array if we have something, because we use == [] as condition in models_learn X_extra_enf_gen = np.array(X_extra_enf_gen) if X_extra_enf_gen != [] else None return X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, idx_IN_X_and_y, idx_enf_gen def get_TRAIN_relevant(TRAIN, words): # IMPORTANT: we preserve the ORDER of TRAIN (so that we can recover information afterwards) TRAIN_relevant, rel_ids, OBJ_ctr_sd = {}, [], [] print('Getting *relevant* instances, from a total of: ' + str(len(TRAIN['subj']))) var_names = [key for key in TRAIN] # INITIALIZE TRAIN_relavant for varname in var_names: TRAIN_relevant[varname] = [] for i in range(len( TRAIN['subj'] )): # Samples loop we_have_it = True if ((TRAIN['subj'][i] in words) and (TRAIN['rel'][i] in words) and (TRAIN['obj'][i] in words)) else False # if we have the complete triplet if we_have_it == True: for varname in var_names: TRAIN_relevant[varname].append(TRAIN[varname][i]) rel_ids.append(TRAIN['rel_id'][i]) OBJ_ctr_sd.append([TRAIN['img_idx'][i], TRAIN['rel_id'][i], TRAIN['subj'][i], TRAIN['rel'][i], TRAIN['obj'][i], TRAIN['subj_sd_x'][i], TRAIN['subj_sd_y'][i], TRAIN['subj_ctr_x'][i], TRAIN['subj_ctr_y'][i], TRAIN['obj_sd_x'][i], TRAIN['obj_sd_y'][i], TRAIN['obj_ctr_x'][i], TRAIN['obj_ctr_y'][i]]) OBJ_ctr_sd = np.array(OBJ_ctr_sd) print('We have gotten ' + str(len(TRAIN_relevant['subj'])) + ' RELEVANT instances') return OBJ_ctr_sd, rel_ids, TRAIN_relevant def get_random_EMB(actual_EMB): # Returns embedding matrix of the original shape with random normal vectors (dimension-wise) mu, sigma, vec_size = np.mean(actual_EMB), np.mean(np.std(actual_EMB, axis=0)), len(actual_EMB[0, :]) rand_EMB = [] for i in range(actual_EMB.shape[0]): # build a dictionary of random vectors rand_EMB.append(np.random.normal(mu, sigma, vec_size)) rand_EMB = np.array(rand_EMB) return rand_EMB def coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl): ''' This function works with an individual example (extending it to many examples, where e.g., obj_sd_x is a vector, is easy) :param obj_sd_x (and the rest): real number (not vectors!) :param n_side_pixl: number of pixels as output (hyperparameter) :return y_pixl: matrix of pixels, i.e., a 2D tensor (n_side_pixl, n_side_pixl) ''' # continuous bounding box corners (prevent problems of predictions outside [0,1]) A_left_x, A_right_x = max((obj_ctr_x - obj_sd_x), 0), min((obj_ctr_x + obj_sd_x), 1) A_low_y, A_top_y = min((obj_ctr_y + obj_sd_y), 1), max((obj_ctr_y - obj_sd_y), 0) # translate continuous bounding box corners into indices in a n_side_pixl x n_side_pixl matrix i_left, i_right = np.rint( (n_side_pixl - 1)*A_left_x).astype(np.int), np.rint((n_side_pixl - 1)*A_right_x).astype(np.int) j_low, j_top = np.rint((n_side_pixl - 1)*A_low_y).astype(np.int), np.rint((n_side_pixl - 1)*A_top_y).astype(np.int) pixl_matr = np.zeros( (n_side_pixl, n_side_pixl) ) # add ones inside of the bounding box i_range = range( i_left, i_right ) i_range = [i_left] if ((i_left == i_right) or (i_range == [])) else i_range # AVOID THE CASE where width is 0 AND i_range=[] (as upper bound < lower bound) j_range = range( j_top, j_low ) j_range = [j_low] if ((j_low == j_top) or (j_range == [])) else j_range # AVOID THE CASE where height is 0 AND i_range=[] (as upper bound < lower bound) pixl_matr[ np.array(i_range)[:, None], np.array(j_range)] = 1 # (IMPORTANT: indices must be np.arrays) put a 1 everywhere inside of the bounding box pixl_matr = pixl_matr.reshape((-1)) return pixl_matr def pixl_idx2coord_all_examples(y_pixl): ''' Transforms the whole set of predicted matrices y_pixl into their continuous CENTER coordinates (Obj_ctr) :param y_pixl: array of MATRICES with predicted heatmaps (pixels). Each matrix = 1 example :return: PRED_obj_ctr_x, PRED_obj_ctr_y: arrays of length = number of examples ''' PRED_obj_ctr_x, PRED_obj_ctr_y = [], [] n_side_pixl = y_pixl.shape[1] #get automatically the number of pixels from the pixel matrix side for i in range( y_pixl.shape[0] ): # loop on number of examples idx_maximums = get_maximums_idx(y_pixl[i]) # get indices of maximum (allow for multiple of them) ctr_x, ctr_y = pixl_idx2coord_indiv(idx_maximums, n_side_pixl) # transform pixel indices into continuous coordinates PRED_obj_ctr_x.append(ctr_x) PRED_obj_ctr_y.append(ctr_y) PRED_obj_ctr_x, PRED_obj_ctr_y = np.array(PRED_obj_ctr_x), np.array(PRED_obj_ctr_y) return PRED_obj_ctr_x, PRED_obj_ctr_y def get_maximums_idx( heat_matrix ): # Given a matrix of activations, it outputs the indices corresponding to its maximum values # INPUT: heat_matrix: matrix of continuous activations (within [0,1]) of size n_side_pixl x n_side_pixl # OUTPUT: maximums: indices corresponding to where the activations are maximum (accounts for multiple maximums) #maximums = np.unravel_index(np.argmax(heat_matrix), heat_matrix.shape) # gives the index of the FIRST largest element. Doesn't account for multiple maximums! maximums = np.where(heat_matrix == heat_matrix.max()) # This one accounts for multiple maximums! return np.array(maximums) def pixl_idx2coord_indiv(idx_maximums, n_side_pixl): ''' This function receives input from get_maximums_indices() Given discrete pixels indices (i,j) where i,j = 0,...,n_side_pixl (where activations are maximal), it transforms them to (continuous) coordinates in [0,1] IMPORTANT: It only computes the CENTER of the Obj (not sd's). So it's useful for measures that only use Obj_ctr :param idx_maximums: index of maximums from get_maximums_idx() :param n_side_pixl: side of the activation matrix (necessary to transform indices to coordinates) :return pred_obj_ctr_x, pred_obj_ctr_y: predicted (continuous) coordinates in [0,1] (Obj_ctr) ''' coord = np.mean(idx_maximums, axis = 1) PRED_coord = coord.astype(np.float)/float(n_side_pixl - 1) # Transform pixel indices to (continuous) coordinates pred_obj_ctr_x, pred_obj_ctr_y = PRED_coord[0], PRED_coord[1] return pred_obj_ctr_x, pred_obj_ctr_y def get_folds(n_samples, n_folds): indices = np.random.permutation(np.arange(n_samples)) n_test = int(np.floor(n_samples / n_folds)) kf = [(np.delete(indices, np.arange(i * n_test, (i + 1) * n_test)), # train indices[i * n_test:(i + 1) * n_test]) for i in range(n_folds)] # test return kf def mirror_x(subj_ctr_x, obj_ctr_x): # Computes the absolute value of the obj_ctr_x variable (to make it symmetric) aux_obj_ctr_x = [ (1 - float(obj_ctr_x[i])) if float(obj_ctr_x[i]) <= float(subj_ctr_x[i]) else float(obj_ctr_x[i]) for i in range(len(obj_ctr_x)) ] aux_subj_ctr_x = [ (1 - float(subj_ctr_x[i])) if float(obj_ctr_x[i]) <= float(subj_ctr_x[i]) else float(subj_ctr_x[i]) for i in range(len(obj_ctr_x)) ] subj_ctr_x, obj_ctr_x = aux_subj_ctr_x, aux_obj_ctr_x return subj_ctr_x, obj_ctr_x def build_emb_dict(words, EMB): #Input: words= word list, EMB= embeddings in a np.array format #Output: Dictionary of embeddings EMB_dict = {} for i in range(len(words)): EMB_dict[words[i]] = EMB[i,:] return EMB_dict def
(words_to_get, EMB_dict): # Input: words_to_get = word list from the EMB matrix, EMB_dict= dictionary of embeddings # Output: MATRIX of embeddings # IMPORTANT: it preserves the order of the words_to_get list EMB_matrix = [] for i in range(len(words_to_get)): try: EMB_matrix.append( EMB_dict[words_to_get[i]] ) except KeyError: pass #print 'WARNING! word ' + words_to_get[i] + ' not found in our embeddings! (and it should be!!)' EMB_matrix = np.array(EMB_matrix) return EMB_matrix def get_GEN(TRAIN_relevant, train_idx, test_idx, subj, obj, pred): # Gives the generalized triplets (combination) and the generalized words # IMPORTANT: The outputted indices (idx_gen_tuples) are over the instances of the TEST SET! print ('Getting generalized instances') train_tuples = [(TRAIN_relevant[subj][train_idx[ii]],TRAIN_relevant[pred][train_idx[ii]], TRAIN_relevant[obj][ train_idx[ii] ]) for ii in range(len(train_idx)) ] test_tuples = [(TRAIN_relevant[subj][test_idx[ii]],TRAIN_relevant[pred][test_idx[ii]], TRAIN_relevant[obj][test_idx[ii]]) for ii in range(len(test_idx)) ] # 2. get GENERALIZED TUPLES (its index) #train_tuples, test_tuples = tuples[train_idx], tuples[test_idx] idx_gen_tuples = [i for i, x in enumerate(test_tuples) if x not in train_tuples]#IMPORTANT!!! This index (idx_gen_tuples) is over the instances of the TEST SET! # 3.get GENARALIZED WORDS (its index) subjs, preds, objs = np.array(TRAIN_relevant[subj]), np.array(TRAIN_relevant[pred]), np.array(TRAIN_relevant[obj]) # get unique wordlist in test_tuples[idx_gen_tuples] --> TRICK! if there are gen-words, they MUST be within gen-tuples) and in train tuples allwords_train = np.concatenate((subjs[train_idx], preds[train_idx],objs[train_idx]), axis=0) allwords_test = np.concatenate((subjs[test_idx], preds[test_idx],objs[test_idx]), axis=0) unique_words_train = list(set(allwords_train)) unique_words_test = list(set(allwords_test)) # get generalized words gen_words = [ unique_words_test[j] for j in range(len(unique_words_test)) if unique_words_test[j] not in unique_words_train] # intersect unique_words_train and unique_words_test and get the complementary # find INDICES of the tuples in the test set that contain any of the gen-words idx_gen_words = [ i for i,x in enumerate(test_tuples) if any(word in gen_words for word in x) ] # x=tuple, word= word within the tuple, i=index of the tuple x return idx_gen_tuples, idx_gen_words, gen_words def get_CLEAN_train_test_idx(TRAIN_relevant, train_idx, test_idx, clean_eval): # Gives the indices of the triplets that we want to get as a clean test set # clean_eval contains the triplets, etc. of our clean selection of instances # IMPORTANT: idx_clean_train and idx_clean_test are over the instances of the TRAIN and TEST SETS respectively! print ('Getting *clean* train and test instances') #0. get either tuples or triplets from our train AND test data if (clean_eval['eval'] == 'triplets') or (clean_eval['eval'] == 'words'): #if we want clean words, we search them among the whole triplet train_tuples = [(TRAIN_relevant['subj'][train_idx[ii]],TRAIN_relevant['rel'][train_idx[ii]], TRAIN_relevant['obj'][ train_idx[ii] ]) for ii in range(len(train_idx)) ] test_tuples = [(TRAIN_relevant['subj'][test_idx[ii]],TRAIN_relevant['rel'][test_idx[ii]], TRAIN_relevant['obj'][test_idx[ii]]) for ii in range(len(test_idx)) ] #1. Decide what clean triplets/words to use if clean_eval['eval'] == 'triplets': clean_tuples = clean_eval['triplets'] if clean_eval['eval'] == 'words': clean_words = clean_eval['words'] # 2. get INDICES of our clean instances (triplets...) in both, TRAIN and TEST data if clean_eval['eval'] == 'triplets': idx_clean_test = [ i for i,x in enumerate(test_tuples) if x in clean_tuples ] idx_clean_train = [ i for i,x in enumerate(train_tuples) if x in clean_tuples ] if clean_eval['eval'] == 'words': idx_clean_test = [ i for i,x in enumerate(test_tuples) if any(word in clean_words for word in x) ] idx_clean_train = [i for i, x in enumerate(train_tuples) if any(word in clean_words for word in x)] return idx_clean_train, idx_clean_test def get_enforce_gen(to_get): # This function can also get the clean_test instances, as there's nothing specific about generalized here import read_data as rd gen_triplets, gen_words = [],[] if to_get == 'triplets': # Read CSV GEN_INST = rd.load_training_data('../data/TRIPLETS_random.csv') # Get all triplets for i in range(len(GEN_INST['subj'])): gen_triplets.append( ( GEN_INST['subj'][i], GEN_INST['rel'][i], GEN_INST['obj'][i] ) ) gen_triplets = list(set(gen_triplets)) elif to_get == 'words': gen_words = rd.readWordlist('../data/WORDS_random.csv') gen_words = list(set(gen_words)) return gen_triplets, gen_words def aux_get_train_test_splits(X, X_extra, y, OBJ_ctr_sd, train_idx, test_idx): # This is an auxiliary function that gives back the train and test splits # Is not very elegant, but we don't create y_pixl_train and test splits because it takes too much memory # get X X_train, X_test = {},{} X_train['subj'], X_test['subj'] = X['subj'][train_idx], X['subj'][test_idx] X_train['pred'], X_test['pred'] = X['pred'][train_idx], X['pred'][test_idx] X_train['obj'], X_test['obj'] = X['obj'][train_idx], X['obj'][test_idx] # get y y_train, y_test = y[train_idx], y[test_idx] #not a dictionary!!! (a matrix array) OBJ_ctr_sd_train, OBJ_ctr_sd_test = OBJ_ctr_sd[train_idx], OBJ_ctr_sd[test_idx] X_extra_train, X_extra_test = X_extra[train_idx], X_extra[test_idx] return X_train, X_test, X_extra_train, X_extra_test, y_train, y_test, OBJ_ctr_sd_train, OBJ_ctr_sd_test def compute_centers(x_subj, y_subj, width_subj, height_subj, x_obj, y_obj, width_obj, height_obj): ''' Notice: the (0,0) coordinates of the image correspond to the TOP left corner (not bottom left) INPUT: absolute positions of: x_obj (x of top left corner of bounding box), y_obj (y of top left...), width_obj (width bounding box), height_obj (height bounding box), width_img (width img), height_img (height img) OUTPUT: centered positions (center of bounding box and standard dev. of bounding box): obj_ctr_x, obj_ctr_y, obj_sd_x, obj_sd_y ''' # OBJECT: obj_ctr_x = float(x_obj) + (float(width_obj)/2) obj_ctr_y = float(y_obj) + (float(height_obj)/2) obj_sd_x = (float(width_obj)/2) # after simplifying in the formula of SD, it gives this obj_sd_y = (float(height_obj)/2) # after simplifying in the formula of SD, it gives this # SUBJECT: subj_ctr_x = float(x_subj) + (float(width_subj)/2) subj_ctr_y = float(y_subj) + (float(height_subj)/2) subj_sd_x = (float(width_subj)/2) #after simplifying in the formula of SD, it gives this subj_sd_y = (float(height_subj)/2) # after simplifying in the formula of SD, it gives this return subj_ctr_x, subj_ctr_y, subj_sd_x, subj_sd_y, obj_ctr_x, obj_ctr_y, obj_sd_x, obj_sd_y
wordlist2emb_matrix
identifier_name
optimizer-output.component.ts
import { Router } from '@angular/router'; import { Sort } from '@angular/material/sort'; import { FormControl } from '@angular/forms'; import { Component, EventEmitter, OnInit, Output, ViewChild } from '@angular/core'; import { MatTableDataSource } from '@angular/material/table'; import { SelectionModel } from '@angular/cdk/collections'; import { NgbModal, NgbModalOptions } from '@ng-bootstrap/ng-bootstrap'; import {ModalDismissReasons} from '@ng-bootstrap/ng-bootstrap'; import { MatPaginator } from '@angular/material/paginator'; import { groupByJson } from '../../planner/scenario-planning/scenario-planning.component'; import { Input } from '@angular/core'; import { Angular5Csv } from 'angular5-csv/dist/Angular5-csv'; import { ScenarioPlannerService } from '../../backend-services/scenario-planner.service'; import * as Notiflix from 'notiflix'; import { environment } from 'src/environments/environment'; import { DataControllerService } from 'src/app/base/data-controller/data-controller.service'; import { trigger, state, style, animate, transition } from "@angular/animations"; export interface ScenarioPlanner { product_tpn: number; total_incremental_sales: string; total_activation_cost:number; pack_type: string; product_name:string; activation_type:string; processed_lift: number; } export interface ScenarioPlannerConstraint { pack_type:string fsi: boolean; fai: boolean; search: boolean; sot: boolean; bpp: boolean; } Notiflix.Notify.init({ position:'right-bottom', timeout:3000 }) @Component({ selector: 'app-optimizer-output', templateUrl: './optimizer-output.component.html', styleUrls: ['./optimizer-output.component.scss'], animations: [ trigger("changeDivSize", [ state( "initial", style({ backgroundColor: "green", width: "100px", height: "100px" }) ), state( "final", style({ backgroundColor: "red", width: "200px", height: "200px" }) ), transition("initial=>final", animate("1500ms")), transition("final=>initial", animate("1000ms")) ]), trigger("balloonEffect", [ state( "initial", style({ backgroundColor: "green", transform: "scale(1)" }) ), state( "final", style({ backgroundColor: "red", transform: "scale(1.5)" }) ), transition("final=>initial", animate("1000ms")), transition("initial=>final", animate("1500ms")) ]), trigger("fadeInOut", [ state( "void", style({ opacity: 0 }) ), transition("void <=> *", animate(1000)) ]), trigger("EnterLeave", [ state("flyIn", style({ transform: "translateX(0)" })), transition(":enter", [ style({ transform: "translateX(-100%)" }), animate("0.5s 300ms ease-in") ]), transition(":leave", [ animate("0.3s ease-out", style({ transform: "translateX(100%)" })) ]) ]) ] }) export class OptimizerOutputComponent implements OnInit { response_data:any; SOURCE: any; valueSelected:any=0; modalOptions:NgbModalOptions | undefined; filterData: any; defaultData:any datastream:any; reload1: boolean=true; TATSPack_ARRAY: any=[]; currencySymbol: any; optimizedLift:any=0; totalLift:any=0; incremantalCSV: number=0; totalscvROAS:number=0; totalActivationCost:number=0; Ratecardjson: any; budgetConstraintSubscribe: any; totalBudget: any=0; constructor(private modalService: NgbModal, private dataservice:DataControllerService, private routes:Router,private apiServices:ScenarioPlannerService) { // console.log(this.route.getCurrentNavigation()?.extras.state); this.datastream=this.routes.getCurrentNavigation()?.extras.state; this.currencySymbol=environment.currencySymbol; this.modalOptions = { backdrop:'static', backdropClass:'customBackdrop' } }; ELEMENT_DATA: ScenarioPlanner[] = []; activationLIB:any={}; TATS:any={}; packTypeList:any; TATS_ARRAY:any=[]; DynActivationColumns:any=[]; TATS_BY_PACK:any={}; Chartpoints_pla_rev:any={}; FileName:string=''; activationLIBSelected:any={}; binaryOption=[ {id: 'Yes', name: "Yes"}, {id: 'No', name: "No"},]; reload:boolean=true; ELEMENT_DATA_CONSTRAINTS:any=[]; //displayedColumnsConstraints: string[] = ['pack_type','fsi', 'fai','search', 'sot', 'bpp']; //dataSourceConstraints = new MatTableDataSource<ScenarioPlannerConstraint>(this.ELEMENT_DATA_CONSTRAINTS); PlacementLabel:any=[]; @Input() dataSetLabel:any=[ 'FAI', 'FSI', 'SOT', 'BBP','Search']; @Input() dataSet:any={ data: [0, 0, 0, 0, 0], title: { text: 'Incremental Revenue by Placements', display: true } }; dataSetLabel1:any=[]; saveList:any=[{'name':'SELECT','id':0}, {'name':'Load1','id':1}] selectedplacementTypes=''; dataSet1:any={ data: [], label: 'Expected Lift by Pack type' }; //'total_activation_cost','total_incremental_sales','processed_lift' displayedColumns: string[] = ['pack_sub_type','pack_type','activation_type','total_activation_cost','total_incremental_sales','csv_roas','processed_lift',]; dataSource = new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA); selection = new SelectionModel<ScenarioPlanner>(true, []); sortedData: ScenarioPlanner[]=[]; selectedData:any=[]; skuList: ScenarioPlanner[] = []; activityType: ScenarioPlanner[] = []; activityLift:any = ''; activityROI:any = ''; renderedData: any; closeModal: any; liftSliderValue:any = [5,60]; roiSliderValue:any = [5,40]; groupedOnPackType=[]; // Configuration for the filters skuSelected:any = []; placementTypes = new FormControl(); //segment Segment = new FormControl(); segmentList: any[] = []; selectedSegmentList: any = []; constraint_list=[] ngOnInit(): void { Notiflix.Loading.dots('Loading...'); this.budgetConstraintSubscribe = this.dataservice.BudgetConstraintOb.subscribe((constraint:any) => { if(constraint){ this.totalBudget=constraint['total']; } console.log(constraint,"constraintz"); console.log(this.totalBudget,"totalbudget") }); this.apiServices.getActivationList().subscribe((res:any)=>{ console.log(res,"RES"); Notiflix.Loading.remove(); if(res.code==200){ this.DynActivationColumns=res.data; for(let [key,value] of Object.entries(res.data)){ let values:any=value; this.activationLIB[values.value]=values.name; this.PlacementLabel.push(values.name); } if(this.datastream){ this.SOURCE=this.datastream.source if(this.datastream.source=='from_opt_activation'){ this.ELEMENT_DATA_CONSTRAINTS=this.datastream.data[0] || []; this.selectedData=this.datastream.data[1] || []; this.response_data=this.datastream.data[2] || []; this.filterData=this.datastream.data[3] || []; this.defaultData=this.datastream.data[3] || []; this.Ratecardjson=this.datastream.data[4] || []; this.ELEMENT_DATA_CONSTRAINTS.forEach((element:any) => { let itemlist=[]; for( const [key,value] of Object.entries(element)){ if((value) && (this.activationLIB[key]!=undefined)){ itemlist.push(this.activationLIB[key]); } } this.activationLIBSelected[element.pack_type]=itemlist; }); } this.ELEMENT_DATA=this.filterData; this.ngAfterViewInit(); this.getSavedData(); this.groupedOnPackType=groupByJson(this.filterData,'pack_type'); this.segmentList=Object.keys(this.groupedOnPackType); this.selectedSegmentList = this.segmentList; this.chartInit(this.ELEMENT_DATA); }else{ this.routes.navigate(['/planner']); } } }); } @ViewChild(MatPaginator) paginator: any; ngAfterViewInit() { console.log(this.ELEMENT_DATA,"this.ELEMENT_DATA__"); this.ELEMENT_DATA=this.ELEMENT_DATA.sort((a:any, b:any) => b.processed_lift - a.processed_lift); this.ELEMENT_DATA.forEach((element:any) => { element['csv_roas']=((element.total_incremental_sales/element.total_activation_cost)*100).toFixed() }); this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA); this.dataSource.paginator = this.paginator; this.dataSource.connect().subscribe(d => { this.renderedData = d}); } // File Reader ( EXCEL OR CSV) to JSON Format // Input Handler for the promocode upload async testData(event:any){ // let promoList:any=await this.onFileChange(event); // let FilteredSet=promoList['sheet1']; // this.ELEMENT_DATA=FilteredSet; // this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA); // this.ngAfterViewInit(); } saveScenarioTrigger(content:any) { this.modalService.open(content, this.modalOptions).result.then((result) => { }); } deleteSavedList(){ let that=this; Notiflix.Confirm.show('Confirm Delete','Are you sure you want to delete this item?','Yes','No', ()=>{ //scenario_planner_listdelete this.apiServices.scenario_planner_listdelete(this.valueSelected).subscribe((res:any)=>{ if(res.code==200 && res.status=='success'){ that.getSavedData(); Notiflix.Notify.success('Deleted Successfully ! '); } }); }); } LoadSaveList(){ this.incremantalCSV=0; this.totalActivationCost=0; this.totalscvROAS=0; if(this.valueSelected!=0){ //load data Notiflix.Loading.dots('Loading...'); this.apiServices.scenario_planner_listdetails(this.valueSelected).subscribe((res:any)=>{ console.log(res,"listDetails"); Notiflix.Loading.remove(); let response=res; if(res.code==200 && res.status=='success'){ this.resetFilter(); let filterData:any=response['data'][0].json_data; this.groupedOnPackType=groupByJson(filterData,'pack_type'); this.segmentList=Object.keys(this.groupedOnPackType); this.selectedSegmentList = this.segmentList; filterData = filterData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"])); if(this.selectedplacementTypes.length!=0){ let to_find:any=[...this.selectedplacementTypes]; console.log(to_find,"to_find"); filterData=recursiveFind(filterData,to_find); console.log(to_find,"to_find") } filterData=filterData.sort((a:any, b:any) => b.processed_lift - a.processed_lift); console.log(filterData,"filterData"); this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData); this.dataSource.paginator = this.paginator; this.chartInit(filterData); Notiflix.Notify.success('Senario is loaded successfully !!!'); this.filterData=filterData; this.modalService.dismissAll(); } }); }else{ //load default data let filterData:any = this.defaultData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"])); if(this.selectedplacementTypes.length!=0){ let to_find:any=[...this.selectedplacementTypes]; filterData=recursiveFind(filterData,to_find); } this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData); this.dataSource.paginator = this.paginator; this.chartInit(filterData); this.modalService.dismissAll(); } } saveScenario(){ let planner_type=''; if(this.SOURCE=='from_opt_activation'){ planner_type='optimizer'; }else{ planner_type='simulation' } let payload={ "name":this.FileName, "json_data":this.filterData, "planner_type":planner_type } if(this.FileName.trim()!=''){ this.apiServices.scenario_planner_simulate_save(payload).subscribe((res:any)=>{ console.log(res,"res") if(res.code==200){ this.modalService.dismissAll(); Notiflix.Notify.success('Simulation is Saved Successfully'); this.getSavedData(); this.FileName=''; }else{ if(res.status=='Failed'){ Notiflix.Notify.failure('Failed to save record'); } } }); }else{ Notiflix.Notify.failure('Please Enter The Scenario Name') } } getSavedData(){ this.apiServices.scenario_planner_list().subscribe((res:any) =>{ console.log(res,"scenatio_list"); this.saveList=[]; if(res.code==200 && res.status=='success'){ if(this.SOURCE=='from_opt_activation'){ // planner_type='optimizer'; this.saveList=[{'name':'Default','id':0}]; this.saveList.push(...res.data['optimizer']); }else{ // planner_type='simulation' //this.saveList=res.data['simulation']; this.saveList=[{'name':'Default','id':0}]; this.saveList.push(...res.data['simulation']); } } console.log(this.saveList,"saveList"); }); } getpackTypeList(filterData:any,byPacktype:any){ this.TATS_ARRAY=[]; for(let [key,value] of Object.entries(this.activationLIB)){ this.TATS_ARRAY.push({'name':value,'value':this.TATS[key]}) } this.TATSPack_ARRAY=[]; if(this.packTypeList){ for(let [key,value] of Object.entries(this.packTypeList)){ let values:any=value; this.TATSPack_ARRAY.push({'name':values.name,'value':this.TATS[key]}) } for(let [key,value] of Object.entries(byPacktype)){ let lvalue:any=value; this.TATS_BY_PACK[key.toLowerCase()]=lvalue.length; } } } downloadProducts(){ let filename="Scenario-Planner - OPTIMIZER" var options = { fieldSeparator: ',', quoteStrings: '"', decimalseparator: '.', showLabels: true, showTitle: true, title: filename, useBom: true, noDownload: false, headers: ['Pack Type', 'Product Sub Type', 'Activity','Cost','Incremental Sales','Expected Lift','CSV ROAS'], nullToEmptyString: true, }; this.renderedData.map((item:any)=> { for(let [key,value] of Object.entries(item)){ let values:any=value; if(!this.displayedColumns.includes(key)){ delete item[key]; }else{ if(key=='processed_lift'){ item[key]=values.toFixed(2)+"%"; } else if(key=='csv_roas'){ item[key]=values+"%"; } else if(key=='total_activation_cost'){ item[key]=values.toFixed(2); } else if(key=='total_incremental_sales'){ item[key]=values.toFixed(2); } //'total_activation_cost','total_incremental_sales' } } }); new Angular5Csv(this.renderedData, filename, options); } test_filter(){ } decrementRange(value:any){ value.discount=value.discount-5; } incrementRange(value:any){ value.discount=value.discount+5; } goBack(){ console.log(this.SOURCE,"this.SOURCE") if(this.SOURCE=='from_opt_activation'){ this.routes.navigate(['/optimizer'],{ state: {'source':'from_output','data':[this.ELEMENT_DATA_CONSTRAINTS,this.selectedData,this.response_data,this.Ratecardjson]}}); }else{ this.routes.navigate(['/simulator'],{ state: {'source':'from_output','data':[this.ELEMENT_DATA_CONSTRAINTS,this.selectedData,this.response_data,this.Ratecardjson]}}); } } resetFilter(){ this.dataSource = new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA); this.dataSource.paginator = this.paginator; this.chartInit(this.ELEMENT_DATA); } doFilter(){ this.incremantalCSV=0; console.log(this.selectedSegmentList,"Segmentedlist") let filterData:any = this.ELEMENT_DATA.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"])); if(this.selectedplacementTypes.length!=0){ let to_find:any=[...this.selectedplacementTypes]; filterData=recursiveFind(filterData,to_find); } this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData); this.dataSource.paginator = this.paginator; this.chartInit(filterData); } chartInit(filterData:any){ this.TATS={}; this.incremantalCSV=0; this.totalActivationCost=0; this.totalscvROAS=0; this.optimizedLift=0; this.totalLift=0; this.DynActivationColumns.forEach((element:any) => { this.TATS[element.value]=0; //this.Chartpoints_pla_rev[element.value]=0; //this.incremantalCSV+=element.total_incremental_sales; }); let gbActivity=groupByJson(filterData,'activation_type'); console.log(gbActivity,"gbActivity") let gbActivityList=Object.keys(gbActivity); gbActivityList.forEach((item)=>{ this.Chartpoints_pla_rev[item]=0; }); let predictedSales=0; filterData.forEach((element:any)=>{ this.incremantalCSV+=element.total_incremental_sales; this.totalActivationCost+=element.total_activation_cost; this.totalscvROAS+=element.total_incremental_sales/element.total_activation_cost; this.optimizedLift+=element.total_activation_cost; //this.totalLift+=element.processed_lift; // calculation = item["total_incremental_sales"] /(item["predicted_sales"] - item["total_incremental_sales"]) predictedSales+=element.predicted_sales; }); this.totalLift=this.incremantalCSV/(predictedSales-this.incremantalCSV)*100; this.optimizedLift=this.optimizedLift.toFixed() this.optimizedLift= numberWithCommas(this.optimizedLift); gbActivityList.forEach((item)=>{ filterData.forEach((element:any)=>{ if(element.activation_type.includes(item)){ this.Chartpoints_pla_rev[item]=element.total_incremental_sales } }); }); for(let [key,value] of Object.entries(this.activationLIB)){ filterData.forEach((element:any)=>{ if(element.activation_type.includes(value)){ this.TATS[key]+=1; //this.Chartpoints_pla_rev[key]+=element.total_incremental_sales.toFixed(2); } }); } console.log(this.Chartpoints_pla_rev,"==="); let byPacktype=groupByJson(filterData,'pack_type'); console.log(filterData,byPacktype,"1"); this.chartRender(this.Chartpoints_pla_rev,filterData); this.chartExpLift(filterData,byPacktype); this.getpackTypeList(filterData,byPacktype); } chartRender(data:any,filterData:any){ this.reload=false; let data_points:any=[]; this.dataSetLabel=[]; console.log(data,"data") let gbActivity=groupByJson(filterData,'activation_type'); console.log(gbActivity,"gbActivity")
let gbActivityList=Object.keys(gbActivity); gbActivityList.forEach((item)=>{ if(data[item]!=0){ this.dataSetLabel.push(item); data_points.push(data[item].toFixed(2)); } console.log( this.dataSetLabel," this.dataSetLabel",data_points); }); this.dataSet={ data: data_points, label: 'Incremental Revenue by Placement' ,backgroundColor:[ 'rgb(156, 39, 176)', 'rgb(103, 58, 183 )', 'rgb(33, 150, 243 )', 'rgb(0, 150, 136 )', 'rgb(139, 195, 74 )', 'rgb(233, 30, 99 )', 'rgb(103, 58, 183 )', ]}; setTimeout(()=>{ this.reload=true; },200); } chartExpLift(data:any,byPacktype:any){ this.reload1=false; let data_points1:any=[]; this.dataSetLabel1=[]; for(let [key,value] of Object.entries(byPacktype)){ this.dataSetLabel1.push(key); let items:any=value; let tssum=0; items.map((item:any)=>{ tssum+=parseInt(item.processed_lift); }); console.log(tssum.toFixed(2),"tssum"); data_points1.push(tssum); } console.log(data_points1,"data_points1") this.dataSet1={ data: data_points1, label: 'Expected Lift By Pack Type' ,backgroundColor:[ 'rgb(156, 39, 176)', 'rgb(103, 58, 183 )', 'rgb(33, 150, 243 )', 'rgb(0, 150, 136 )', 'rgb(139, 195, 74 )', 'rgb(233, 30, 99 )', 'rgb(103, 58, 183 )', ]}; this.apiServices.getpackTypeList().subscribe((res: any) => { console.log(res, "getpackTypeList"); if (res.code == 200 && res.status == 'success') { this.packTypeList = res.data; this.packTypeList.forEach((element:any) => { element['counts']=0; }); this.packTypeList.forEach((element:any) => { console.log(byPacktype[element.name],"byPacktype[element.name]"); element['counts']=byPacktype[element.name]?.length || 0; }); console.log(this.packTypeList,"updated"); } }); setTimeout(()=>{ this.reload1=true; console.log(this.dataSet1, this.dataSetLabel) },200); } sortData(sort: Sort) { console.log("sort"); const data = this.filterData.slice(); if (!sort.active || sort.direction === '') { this.sortedData = data; return; } this.sortedData = data.sort((a:any, b:any) => { const isAsc = sort.direction === 'desc'; switch (sort.active) { case 'processed_lift': return compare(a.processed_lift, b.processed_lift, isAsc); case 'total_activation_cost': return compare(a.total_activation_total_activation_cost, b.total_activation_total_activation_cost, isAsc); case 'total_incremental_sales': return compare(a.total_incremental_sales, b.total_incremental_sales, isAsc); default: return 0; } }); console.log(this.sortedData,"sortedData") this.dataSource = new MatTableDataSource<ScenarioPlanner>(this.sortedData); this.dataSource.paginator = this.paginator; this.dataSource.connect().subscribe(d => { this.renderedData = d}); // this.ngAfterViewInit(); } triggerModal(content :any) { this.modalService.open(content, {ariaLabelledBy: 'modal-basic-title'}).result.then((res) => { this.closeModal = `Closed with: ${res}`; }, (res) => { this.closeModal = `Dismissed ${this.getDismissReason(res)}`; }); } private getDismissReason(reason: any): string { if (reason === ModalDismissReasons.ESC) { return 'by pressing ESC'; } else if (reason === ModalDismissReasons.BACKDROP_CLICK) { return 'by clicking on a backdrop'; } else { return `with: ${reason}`; } } isAllSelected() { const numSelected = this.selection.selected.length; const numRows = this.dataSource.data.length; return numSelected === numRows; } masterToggle() { if (this.isAllSelected()) { this.selection.clear(); return;} this.selection.select(...this.dataSource.data); this.setActivationCounter(); } checkbox_row(row:any){ this.selection.toggle(row); this.setActivationCounter(); } checkboxLabel(row?: ScenarioPlanner): string { if (!row) { return `${this.isAllSelected() ? 'deselect' : 'select'} all`; } return `${this.selection.isSelected(row) ? 'deselect' : 'select'} row ${row.product_tpn + 1}`; } updateProductCounter(){ //totalProducts } recountCheckbox(event:any){ event.stopPropagation(); this.setActivationCounter(); } setActivationCounter(){ setTimeout(()=>{ // this.totalActivities=this.selection.selected.length; // //console.log(this.selection.selected,"this.totalActivities"); // console.log(groupByJson(this.selection.selected,'sku'),"SKU group") // this.totalProducts=Object.keys(groupByJson(this.selection.selected,'sku')).length; },200); } } // Used For Datatable sorting function compare(a: number | string, b: number | string, isAsc: boolean) { return (a < b ? -1 : 1) * (isAsc ? 1 : -1); } function recursiveFind(inputArr:any,find:any):any{ //break-condition if(find.length==0){ return inputArr }else{ // if(find.length==1){ // inputArr=inputArr.filter((data:any) => find[0] == data["activation_type"]); // find.shift(); // }else{ // inputArr=inputArr.filter((data:any) => data["activation_type"].includes(find[0])); // find.shift(); // console.log(inputArr,"inputArr"); // } inputArr=inputArr.filter((data:any) => data["activation_type"].includes(find[0])); find.shift(); return recursiveFind(inputArr,find) } } function numberWithCommas(x:any) { return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ","); }
random_line_split
optimizer-output.component.ts
import { Router } from '@angular/router'; import { Sort } from '@angular/material/sort'; import { FormControl } from '@angular/forms'; import { Component, EventEmitter, OnInit, Output, ViewChild } from '@angular/core'; import { MatTableDataSource } from '@angular/material/table'; import { SelectionModel } from '@angular/cdk/collections'; import { NgbModal, NgbModalOptions } from '@ng-bootstrap/ng-bootstrap'; import {ModalDismissReasons} from '@ng-bootstrap/ng-bootstrap'; import { MatPaginator } from '@angular/material/paginator'; import { groupByJson } from '../../planner/scenario-planning/scenario-planning.component'; import { Input } from '@angular/core'; import { Angular5Csv } from 'angular5-csv/dist/Angular5-csv'; import { ScenarioPlannerService } from '../../backend-services/scenario-planner.service'; import * as Notiflix from 'notiflix'; import { environment } from 'src/environments/environment'; import { DataControllerService } from 'src/app/base/data-controller/data-controller.service'; import { trigger, state, style, animate, transition } from "@angular/animations"; export interface ScenarioPlanner { product_tpn: number; total_incremental_sales: string; total_activation_cost:number; pack_type: string; product_name:string; activation_type:string; processed_lift: number; } export interface ScenarioPlannerConstraint { pack_type:string fsi: boolean; fai: boolean; search: boolean; sot: boolean; bpp: boolean; } Notiflix.Notify.init({ position:'right-bottom', timeout:3000 }) @Component({ selector: 'app-optimizer-output', templateUrl: './optimizer-output.component.html', styleUrls: ['./optimizer-output.component.scss'], animations: [ trigger("changeDivSize", [ state( "initial", style({ backgroundColor: "green", width: "100px", height: "100px" }) ), state( "final", style({ backgroundColor: "red", width: "200px", height: "200px" }) ), transition("initial=>final", animate("1500ms")), transition("final=>initial", animate("1000ms")) ]), trigger("balloonEffect", [ state( "initial", style({ backgroundColor: "green", transform: "scale(1)" }) ), state( "final", style({ backgroundColor: "red", transform: "scale(1.5)" }) ), transition("final=>initial", animate("1000ms")), transition("initial=>final", animate("1500ms")) ]), trigger("fadeInOut", [ state( "void", style({ opacity: 0 }) ), transition("void <=> *", animate(1000)) ]), trigger("EnterLeave", [ state("flyIn", style({ transform: "translateX(0)" })), transition(":enter", [ style({ transform: "translateX(-100%)" }), animate("0.5s 300ms ease-in") ]), transition(":leave", [ animate("0.3s ease-out", style({ transform: "translateX(100%)" })) ]) ]) ] }) export class OptimizerOutputComponent implements OnInit { response_data:any; SOURCE: any; valueSelected:any=0; modalOptions:NgbModalOptions | undefined; filterData: any; defaultData:any datastream:any; reload1: boolean=true; TATSPack_ARRAY: any=[]; currencySymbol: any; optimizedLift:any=0; totalLift:any=0; incremantalCSV: number=0; totalscvROAS:number=0; totalActivationCost:number=0; Ratecardjson: any; budgetConstraintSubscribe: any; totalBudget: any=0; constructor(private modalService: NgbModal, private dataservice:DataControllerService, private routes:Router,private apiServices:ScenarioPlannerService) { // console.log(this.route.getCurrentNavigation()?.extras.state); this.datastream=this.routes.getCurrentNavigation()?.extras.state; this.currencySymbol=environment.currencySymbol; this.modalOptions = { backdrop:'static', backdropClass:'customBackdrop' } }; ELEMENT_DATA: ScenarioPlanner[] = []; activationLIB:any={}; TATS:any={}; packTypeList:any; TATS_ARRAY:any=[]; DynActivationColumns:any=[]; TATS_BY_PACK:any={}; Chartpoints_pla_rev:any={}; FileName:string=''; activationLIBSelected:any={}; binaryOption=[ {id: 'Yes', name: "Yes"}, {id: 'No', name: "No"},]; reload:boolean=true; ELEMENT_DATA_CONSTRAINTS:any=[]; //displayedColumnsConstraints: string[] = ['pack_type','fsi', 'fai','search', 'sot', 'bpp']; //dataSourceConstraints = new MatTableDataSource<ScenarioPlannerConstraint>(this.ELEMENT_DATA_CONSTRAINTS); PlacementLabel:any=[]; @Input() dataSetLabel:any=[ 'FAI', 'FSI', 'SOT', 'BBP','Search']; @Input() dataSet:any={ data: [0, 0, 0, 0, 0], title: { text: 'Incremental Revenue by Placements', display: true } }; dataSetLabel1:any=[]; saveList:any=[{'name':'SELECT','id':0}, {'name':'Load1','id':1}] selectedplacementTypes=''; dataSet1:any={ data: [], label: 'Expected Lift by Pack type' }; //'total_activation_cost','total_incremental_sales','processed_lift' displayedColumns: string[] = ['pack_sub_type','pack_type','activation_type','total_activation_cost','total_incremental_sales','csv_roas','processed_lift',]; dataSource = new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA); selection = new SelectionModel<ScenarioPlanner>(true, []); sortedData: ScenarioPlanner[]=[]; selectedData:any=[]; skuList: ScenarioPlanner[] = []; activityType: ScenarioPlanner[] = []; activityLift:any = ''; activityROI:any = ''; renderedData: any; closeModal: any; liftSliderValue:any = [5,60]; roiSliderValue:any = [5,40]; groupedOnPackType=[]; // Configuration for the filters skuSelected:any = []; placementTypes = new FormControl(); //segment Segment = new FormControl(); segmentList: any[] = []; selectedSegmentList: any = []; constraint_list=[] ngOnInit(): void { Notiflix.Loading.dots('Loading...'); this.budgetConstraintSubscribe = this.dataservice.BudgetConstraintOb.subscribe((constraint:any) => { if(constraint){ this.totalBudget=constraint['total']; } console.log(constraint,"constraintz"); console.log(this.totalBudget,"totalbudget") }); this.apiServices.getActivationList().subscribe((res:any)=>{ console.log(res,"RES"); Notiflix.Loading.remove(); if(res.code==200){ this.DynActivationColumns=res.data; for(let [key,value] of Object.entries(res.data)){ let values:any=value; this.activationLIB[values.value]=values.name; this.PlacementLabel.push(values.name); } if(this.datastream){ this.SOURCE=this.datastream.source if(this.datastream.source=='from_opt_activation'){ this.ELEMENT_DATA_CONSTRAINTS=this.datastream.data[0] || []; this.selectedData=this.datastream.data[1] || []; this.response_data=this.datastream.data[2] || []; this.filterData=this.datastream.data[3] || []; this.defaultData=this.datastream.data[3] || []; this.Ratecardjson=this.datastream.data[4] || []; this.ELEMENT_DATA_CONSTRAINTS.forEach((element:any) => { let itemlist=[]; for( const [key,value] of Object.entries(element)){ if((value) && (this.activationLIB[key]!=undefined)){ itemlist.push(this.activationLIB[key]); } } this.activationLIBSelected[element.pack_type]=itemlist; }); } this.ELEMENT_DATA=this.filterData; this.ngAfterViewInit(); this.getSavedData(); this.groupedOnPackType=groupByJson(this.filterData,'pack_type'); this.segmentList=Object.keys(this.groupedOnPackType); this.selectedSegmentList = this.segmentList; this.chartInit(this.ELEMENT_DATA); }else{ this.routes.navigate(['/planner']); } } }); } @ViewChild(MatPaginator) paginator: any; ngAfterViewInit() { console.log(this.ELEMENT_DATA,"this.ELEMENT_DATA__"); this.ELEMENT_DATA=this.ELEMENT_DATA.sort((a:any, b:any) => b.processed_lift - a.processed_lift); this.ELEMENT_DATA.forEach((element:any) => { element['csv_roas']=((element.total_incremental_sales/element.total_activation_cost)*100).toFixed() }); this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA); this.dataSource.paginator = this.paginator; this.dataSource.connect().subscribe(d => { this.renderedData = d}); } // File Reader ( EXCEL OR CSV) to JSON Format // Input Handler for the promocode upload async testData(event:any){ // let promoList:any=await this.onFileChange(event); // let FilteredSet=promoList['sheet1']; // this.ELEMENT_DATA=FilteredSet; // this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA); // this.ngAfterViewInit(); } saveScenarioTrigger(content:any) { this.modalService.open(content, this.modalOptions).result.then((result) => { }); } deleteSavedList(){ let that=this; Notiflix.Confirm.show('Confirm Delete','Are you sure you want to delete this item?','Yes','No', ()=>{ //scenario_planner_listdelete this.apiServices.scenario_planner_listdelete(this.valueSelected).subscribe((res:any)=>{ if(res.code==200 && res.status=='success'){ that.getSavedData(); Notiflix.Notify.success('Deleted Successfully ! '); } }); }); } LoadSaveList(){ this.incremantalCSV=0; this.totalActivationCost=0; this.totalscvROAS=0; if(this.valueSelected!=0){ //load data Notiflix.Loading.dots('Loading...'); this.apiServices.scenario_planner_listdetails(this.valueSelected).subscribe((res:any)=>{ console.log(res,"listDetails"); Notiflix.Loading.remove(); let response=res; if(res.code==200 && res.status=='success'){ this.resetFilter(); let filterData:any=response['data'][0].json_data; this.groupedOnPackType=groupByJson(filterData,'pack_type'); this.segmentList=Object.keys(this.groupedOnPackType); this.selectedSegmentList = this.segmentList; filterData = filterData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"])); if(this.selectedplacementTypes.length!=0){ let to_find:any=[...this.selectedplacementTypes]; console.log(to_find,"to_find"); filterData=recursiveFind(filterData,to_find); console.log(to_find,"to_find") } filterData=filterData.sort((a:any, b:any) => b.processed_lift - a.processed_lift); console.log(filterData,"filterData"); this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData); this.dataSource.paginator = this.paginator; this.chartInit(filterData); Notiflix.Notify.success('Senario is loaded successfully !!!'); this.filterData=filterData; this.modalService.dismissAll(); } }); }else{ //load default data let filterData:any = this.defaultData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"])); if(this.selectedplacementTypes.length!=0){ let to_find:any=[...this.selectedplacementTypes]; filterData=recursiveFind(filterData,to_find); } this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData); this.dataSource.paginator = this.paginator; this.chartInit(filterData); this.modalService.dismissAll(); } } saveScenario(){ let planner_type=''; if(this.SOURCE=='from_opt_activation'){ planner_type='optimizer'; }else{ planner_type='simulation' } let payload={ "name":this.FileName, "json_data":this.filterData, "planner_type":planner_type } if(this.FileName.trim()!=''){ this.apiServices.scenario_planner_simulate_save(payload).subscribe((res:any)=>{ console.log(res,"res") if(res.code==200){ this.modalService.dismissAll(); Notiflix.Notify.success('Simulation is Saved Successfully'); this.getSavedData(); this.FileName=''; }else{ if(res.status=='Failed'){ Notiflix.Notify.failure('Failed to save record'); } } }); }else{ Notiflix.Notify.failure('Please Enter The Scenario Name') } } getSavedData(){ this.apiServices.scenario_planner_list().subscribe((res:any) =>{ console.log(res,"scenatio_list"); this.saveList=[]; if(res.code==200 && res.status=='success'){ if(this.SOURCE=='from_opt_activation'){ // planner_type='optimizer'; this.saveList=[{'name':'Default','id':0}]; this.saveList.push(...res.data['optimizer']); }else{ // planner_type='simulation' //this.saveList=res.data['simulation']; this.saveList=[{'name':'Default','id':0}]; this.saveList.push(...res.data['simulation']); } } console.log(this.saveList,"saveList"); }); } getpackTypeList(filterData:any,byPacktype:any)
downloadProducts(){ let filename="Scenario-Planner - OPTIMIZER" var options = { fieldSeparator: ',', quoteStrings: '"', decimalseparator: '.', showLabels: true, showTitle: true, title: filename, useBom: true, noDownload: false, headers: ['Pack Type', 'Product Sub Type', 'Activity','Cost','Incremental Sales','Expected Lift','CSV ROAS'], nullToEmptyString: true, }; this.renderedData.map((item:any)=> { for(let [key,value] of Object.entries(item)){ let values:any=value; if(!this.displayedColumns.includes(key)){ delete item[key]; }else{ if(key=='processed_lift'){ item[key]=values.toFixed(2)+"%"; } else if(key=='csv_roas'){ item[key]=values+"%"; } else if(key=='total_activation_cost'){ item[key]=values.toFixed(2); } else if(key=='total_incremental_sales'){ item[key]=values.toFixed(2); } //'total_activation_cost','total_incremental_sales' } } }); new Angular5Csv(this.renderedData, filename, options); } test_filter(){ } decrementRange(value:any){ value.discount=value.discount-5; } incrementRange(value:any){ value.discount=value.discount+5; } goBack(){ console.log(this.SOURCE,"this.SOURCE") if(this.SOURCE=='from_opt_activation'){ this.routes.navigate(['/optimizer'],{ state: {'source':'from_output','data':[this.ELEMENT_DATA_CONSTRAINTS,this.selectedData,this.response_data,this.Ratecardjson]}}); }else{ this.routes.navigate(['/simulator'],{ state: {'source':'from_output','data':[this.ELEMENT_DATA_CONSTRAINTS,this.selectedData,this.response_data,this.Ratecardjson]}}); } } resetFilter(){ this.dataSource = new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA); this.dataSource.paginator = this.paginator; this.chartInit(this.ELEMENT_DATA); } doFilter(){ this.incremantalCSV=0; console.log(this.selectedSegmentList,"Segmentedlist") let filterData:any = this.ELEMENT_DATA.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"])); if(this.selectedplacementTypes.length!=0){ let to_find:any=[...this.selectedplacementTypes]; filterData=recursiveFind(filterData,to_find); } this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData); this.dataSource.paginator = this.paginator; this.chartInit(filterData); } chartInit(filterData:any){ this.TATS={}; this.incremantalCSV=0; this.totalActivationCost=0; this.totalscvROAS=0; this.optimizedLift=0; this.totalLift=0; this.DynActivationColumns.forEach((element:any) => { this.TATS[element.value]=0; //this.Chartpoints_pla_rev[element.value]=0; //this.incremantalCSV+=element.total_incremental_sales; }); let gbActivity=groupByJson(filterData,'activation_type'); console.log(gbActivity,"gbActivity") let gbActivityList=Object.keys(gbActivity); gbActivityList.forEach((item)=>{ this.Chartpoints_pla_rev[item]=0; }); let predictedSales=0; filterData.forEach((element:any)=>{ this.incremantalCSV+=element.total_incremental_sales; this.totalActivationCost+=element.total_activation_cost; this.totalscvROAS+=element.total_incremental_sales/element.total_activation_cost; this.optimizedLift+=element.total_activation_cost; //this.totalLift+=element.processed_lift; // calculation = item["total_incremental_sales"] /(item["predicted_sales"] - item["total_incremental_sales"]) predictedSales+=element.predicted_sales; }); this.totalLift=this.incremantalCSV/(predictedSales-this.incremantalCSV)*100; this.optimizedLift=this.optimizedLift.toFixed() this.optimizedLift= numberWithCommas(this.optimizedLift); gbActivityList.forEach((item)=>{ filterData.forEach((element:any)=>{ if(element.activation_type.includes(item)){ this.Chartpoints_pla_rev[item]=element.total_incremental_sales } }); }); for(let [key,value] of Object.entries(this.activationLIB)){ filterData.forEach((element:any)=>{ if(element.activation_type.includes(value)){ this.TATS[key]+=1; //this.Chartpoints_pla_rev[key]+=element.total_incremental_sales.toFixed(2); } }); } console.log(this.Chartpoints_pla_rev,"==="); let byPacktype=groupByJson(filterData,'pack_type'); console.log(filterData,byPacktype,"1"); this.chartRender(this.Chartpoints_pla_rev,filterData); this.chartExpLift(filterData,byPacktype); this.getpackTypeList(filterData,byPacktype); } chartRender(data:any,filterData:any){ this.reload=false; let data_points:any=[]; this.dataSetLabel=[]; console.log(data,"data") let gbActivity=groupByJson(filterData,'activation_type'); console.log(gbActivity,"gbActivity") let gbActivityList=Object.keys(gbActivity); gbActivityList.forEach((item)=>{ if(data[item]!=0){ this.dataSetLabel.push(item); data_points.push(data[item].toFixed(2)); } console.log( this.dataSetLabel," this.dataSetLabel",data_points); }); this.dataSet={ data: data_points, label: 'Incremental Revenue by Placement' ,backgroundColor:[ 'rgb(156, 39, 176)', 'rgb(103, 58, 183 )', 'rgb(33, 150, 243 )', 'rgb(0, 150, 136 )', 'rgb(139, 195, 74 )', 'rgb(233, 30, 99 )', 'rgb(103, 58, 183 )', ]}; setTimeout(()=>{ this.reload=true; },200); } chartExpLift(data:any,byPacktype:any){ this.reload1=false; let data_points1:any=[]; this.dataSetLabel1=[]; for(let [key,value] of Object.entries(byPacktype)){ this.dataSetLabel1.push(key); let items:any=value; let tssum=0; items.map((item:any)=>{ tssum+=parseInt(item.processed_lift); }); console.log(tssum.toFixed(2),"tssum"); data_points1.push(tssum); } console.log(data_points1,"data_points1") this.dataSet1={ data: data_points1, label: 'Expected Lift By Pack Type' ,backgroundColor:[ 'rgb(156, 39, 176)', 'rgb(103, 58, 183 )', 'rgb(33, 150, 243 )', 'rgb(0, 150, 136 )', 'rgb(139, 195, 74 )', 'rgb(233, 30, 99 )', 'rgb(103, 58, 183 )', ]}; this.apiServices.getpackTypeList().subscribe((res: any) => { console.log(res, "getpackTypeList"); if (res.code == 200 && res.status == 'success') { this.packTypeList = res.data; this.packTypeList.forEach((element:any) => { element['counts']=0; }); this.packTypeList.forEach((element:any) => { console.log(byPacktype[element.name],"byPacktype[element.name]"); element['counts']=byPacktype[element.name]?.length || 0; }); console.log(this.packTypeList,"updated"); } }); setTimeout(()=>{ this.reload1=true; console.log(this.dataSet1, this.dataSetLabel) },200); } sortData(sort: Sort) { console.log("sort"); const data = this.filterData.slice(); if (!sort.active || sort.direction === '') { this.sortedData = data; return; } this.sortedData = data.sort((a:any, b:any) => { const isAsc = sort.direction === 'desc'; switch (sort.active) { case 'processed_lift': return compare(a.processed_lift, b.processed_lift, isAsc); case 'total_activation_cost': return compare(a.total_activation_total_activation_cost, b.total_activation_total_activation_cost, isAsc); case 'total_incremental_sales': return compare(a.total_incremental_sales, b.total_incremental_sales, isAsc); default: return 0; } }); console.log(this.sortedData,"sortedData") this.dataSource = new MatTableDataSource<ScenarioPlanner>(this.sortedData); this.dataSource.paginator = this.paginator; this.dataSource.connect().subscribe(d => { this.renderedData = d}); // this.ngAfterViewInit(); } triggerModal(content :any) { this.modalService.open(content, {ariaLabelledBy: 'modal-basic-title'}).result.then((res) => { this.closeModal = `Closed with: ${res}`; }, (res) => { this.closeModal = `Dismissed ${this.getDismissReason(res)}`; }); } private getDismissReason(reason: any): string { if (reason === ModalDismissReasons.ESC) { return 'by pressing ESC'; } else if (reason === ModalDismissReasons.BACKDROP_CLICK) { return 'by clicking on a backdrop'; } else { return `with: ${reason}`; } } isAllSelected() { const numSelected = this.selection.selected.length; const numRows = this.dataSource.data.length; return numSelected === numRows; } masterToggle() { if (this.isAllSelected()) { this.selection.clear(); return;} this.selection.select(...this.dataSource.data); this.setActivationCounter(); } checkbox_row(row:any){ this.selection.toggle(row); this.setActivationCounter(); } checkboxLabel(row?: ScenarioPlanner): string { if (!row) { return `${this.isAllSelected() ? 'deselect' : 'select'} all`; } return `${this.selection.isSelected(row) ? 'deselect' : 'select'} row ${row.product_tpn + 1}`; } updateProductCounter(){ //totalProducts } recountCheckbox(event:any){ event.stopPropagation(); this.setActivationCounter(); } setActivationCounter(){ setTimeout(()=>{ // this.totalActivities=this.selection.selected.length; // //console.log(this.selection.selected,"this.totalActivities"); // console.log(groupByJson(this.selection.selected,'sku'),"SKU group") // this.totalProducts=Object.keys(groupByJson(this.selection.selected,'sku')).length; },200); } } // Used For Datatable sorting function compare(a: number | string, b: number | string, isAsc: boolean) { return (a < b ? -1 : 1) * (isAsc ? 1 : -1); } function recursiveFind(inputArr:any,find:any):any{ //break-condition if(find.length==0){ return inputArr }else{ // if(find.length==1){ // inputArr=inputArr.filter((data:any) => find[0] == data["activation_type"]); // find.shift(); // }else{ // inputArr=inputArr.filter((data:any) => data["activation_type"].includes(find[0])); // find.shift(); // console.log(inputArr,"inputArr"); // } inputArr=inputArr.filter((data:any) => data["activation_type"].includes(find[0])); find.shift(); return recursiveFind(inputArr,find) } } function numberWithCommas(x:any) { return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ","); }
{ this.TATS_ARRAY=[]; for(let [key,value] of Object.entries(this.activationLIB)){ this.TATS_ARRAY.push({'name':value,'value':this.TATS[key]}) } this.TATSPack_ARRAY=[]; if(this.packTypeList){ for(let [key,value] of Object.entries(this.packTypeList)){ let values:any=value; this.TATSPack_ARRAY.push({'name':values.name,'value':this.TATS[key]}) } for(let [key,value] of Object.entries(byPacktype)){ let lvalue:any=value; this.TATS_BY_PACK[key.toLowerCase()]=lvalue.length; } } }
identifier_body
optimizer-output.component.ts
import { Router } from '@angular/router'; import { Sort } from '@angular/material/sort'; import { FormControl } from '@angular/forms'; import { Component, EventEmitter, OnInit, Output, ViewChild } from '@angular/core'; import { MatTableDataSource } from '@angular/material/table'; import { SelectionModel } from '@angular/cdk/collections'; import { NgbModal, NgbModalOptions } from '@ng-bootstrap/ng-bootstrap'; import {ModalDismissReasons} from '@ng-bootstrap/ng-bootstrap'; import { MatPaginator } from '@angular/material/paginator'; import { groupByJson } from '../../planner/scenario-planning/scenario-planning.component'; import { Input } from '@angular/core'; import { Angular5Csv } from 'angular5-csv/dist/Angular5-csv'; import { ScenarioPlannerService } from '../../backend-services/scenario-planner.service'; import * as Notiflix from 'notiflix'; import { environment } from 'src/environments/environment'; import { DataControllerService } from 'src/app/base/data-controller/data-controller.service'; import { trigger, state, style, animate, transition } from "@angular/animations"; export interface ScenarioPlanner { product_tpn: number; total_incremental_sales: string; total_activation_cost:number; pack_type: string; product_name:string; activation_type:string; processed_lift: number; } export interface ScenarioPlannerConstraint { pack_type:string fsi: boolean; fai: boolean; search: boolean; sot: boolean; bpp: boolean; } Notiflix.Notify.init({ position:'right-bottom', timeout:3000 }) @Component({ selector: 'app-optimizer-output', templateUrl: './optimizer-output.component.html', styleUrls: ['./optimizer-output.component.scss'], animations: [ trigger("changeDivSize", [ state( "initial", style({ backgroundColor: "green", width: "100px", height: "100px" }) ), state( "final", style({ backgroundColor: "red", width: "200px", height: "200px" }) ), transition("initial=>final", animate("1500ms")), transition("final=>initial", animate("1000ms")) ]), trigger("balloonEffect", [ state( "initial", style({ backgroundColor: "green", transform: "scale(1)" }) ), state( "final", style({ backgroundColor: "red", transform: "scale(1.5)" }) ), transition("final=>initial", animate("1000ms")), transition("initial=>final", animate("1500ms")) ]), trigger("fadeInOut", [ state( "void", style({ opacity: 0 }) ), transition("void <=> *", animate(1000)) ]), trigger("EnterLeave", [ state("flyIn", style({ transform: "translateX(0)" })), transition(":enter", [ style({ transform: "translateX(-100%)" }), animate("0.5s 300ms ease-in") ]), transition(":leave", [ animate("0.3s ease-out", style({ transform: "translateX(100%)" })) ]) ]) ] }) export class OptimizerOutputComponent implements OnInit { response_data:any; SOURCE: any; valueSelected:any=0; modalOptions:NgbModalOptions | undefined; filterData: any; defaultData:any datastream:any; reload1: boolean=true; TATSPack_ARRAY: any=[]; currencySymbol: any; optimizedLift:any=0; totalLift:any=0; incremantalCSV: number=0; totalscvROAS:number=0; totalActivationCost:number=0; Ratecardjson: any; budgetConstraintSubscribe: any; totalBudget: any=0; constructor(private modalService: NgbModal, private dataservice:DataControllerService, private routes:Router,private apiServices:ScenarioPlannerService) { // console.log(this.route.getCurrentNavigation()?.extras.state); this.datastream=this.routes.getCurrentNavigation()?.extras.state; this.currencySymbol=environment.currencySymbol; this.modalOptions = { backdrop:'static', backdropClass:'customBackdrop' } }; ELEMENT_DATA: ScenarioPlanner[] = []; activationLIB:any={}; TATS:any={}; packTypeList:any; TATS_ARRAY:any=[]; DynActivationColumns:any=[]; TATS_BY_PACK:any={}; Chartpoints_pla_rev:any={}; FileName:string=''; activationLIBSelected:any={}; binaryOption=[ {id: 'Yes', name: "Yes"}, {id: 'No', name: "No"},]; reload:boolean=true; ELEMENT_DATA_CONSTRAINTS:any=[]; //displayedColumnsConstraints: string[] = ['pack_type','fsi', 'fai','search', 'sot', 'bpp']; //dataSourceConstraints = new MatTableDataSource<ScenarioPlannerConstraint>(this.ELEMENT_DATA_CONSTRAINTS); PlacementLabel:any=[]; @Input() dataSetLabel:any=[ 'FAI', 'FSI', 'SOT', 'BBP','Search']; @Input() dataSet:any={ data: [0, 0, 0, 0, 0], title: { text: 'Incremental Revenue by Placements', display: true } }; dataSetLabel1:any=[]; saveList:any=[{'name':'SELECT','id':0}, {'name':'Load1','id':1}] selectedplacementTypes=''; dataSet1:any={ data: [], label: 'Expected Lift by Pack type' }; //'total_activation_cost','total_incremental_sales','processed_lift' displayedColumns: string[] = ['pack_sub_type','pack_type','activation_type','total_activation_cost','total_incremental_sales','csv_roas','processed_lift',]; dataSource = new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA); selection = new SelectionModel<ScenarioPlanner>(true, []); sortedData: ScenarioPlanner[]=[]; selectedData:any=[]; skuList: ScenarioPlanner[] = []; activityType: ScenarioPlanner[] = []; activityLift:any = ''; activityROI:any = ''; renderedData: any; closeModal: any; liftSliderValue:any = [5,60]; roiSliderValue:any = [5,40]; groupedOnPackType=[]; // Configuration for the filters skuSelected:any = []; placementTypes = new FormControl(); //segment Segment = new FormControl(); segmentList: any[] = []; selectedSegmentList: any = []; constraint_list=[] ngOnInit(): void { Notiflix.Loading.dots('Loading...'); this.budgetConstraintSubscribe = this.dataservice.BudgetConstraintOb.subscribe((constraint:any) => { if(constraint){ this.totalBudget=constraint['total']; } console.log(constraint,"constraintz"); console.log(this.totalBudget,"totalbudget") }); this.apiServices.getActivationList().subscribe((res:any)=>{ console.log(res,"RES"); Notiflix.Loading.remove(); if(res.code==200){ this.DynActivationColumns=res.data; for(let [key,value] of Object.entries(res.data)){ let values:any=value; this.activationLIB[values.value]=values.name; this.PlacementLabel.push(values.name); } if(this.datastream){ this.SOURCE=this.datastream.source if(this.datastream.source=='from_opt_activation'){ this.ELEMENT_DATA_CONSTRAINTS=this.datastream.data[0] || []; this.selectedData=this.datastream.data[1] || []; this.response_data=this.datastream.data[2] || []; this.filterData=this.datastream.data[3] || []; this.defaultData=this.datastream.data[3] || []; this.Ratecardjson=this.datastream.data[4] || []; this.ELEMENT_DATA_CONSTRAINTS.forEach((element:any) => { let itemlist=[]; for( const [key,value] of Object.entries(element)){ if((value) && (this.activationLIB[key]!=undefined)){ itemlist.push(this.activationLIB[key]); } } this.activationLIBSelected[element.pack_type]=itemlist; }); } this.ELEMENT_DATA=this.filterData; this.ngAfterViewInit(); this.getSavedData(); this.groupedOnPackType=groupByJson(this.filterData,'pack_type'); this.segmentList=Object.keys(this.groupedOnPackType); this.selectedSegmentList = this.segmentList; this.chartInit(this.ELEMENT_DATA); }else{ this.routes.navigate(['/planner']); } } }); } @ViewChild(MatPaginator) paginator: any; ngAfterViewInit() { console.log(this.ELEMENT_DATA,"this.ELEMENT_DATA__"); this.ELEMENT_DATA=this.ELEMENT_DATA.sort((a:any, b:any) => b.processed_lift - a.processed_lift); this.ELEMENT_DATA.forEach((element:any) => { element['csv_roas']=((element.total_incremental_sales/element.total_activation_cost)*100).toFixed() }); this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA); this.dataSource.paginator = this.paginator; this.dataSource.connect().subscribe(d => { this.renderedData = d}); } // File Reader ( EXCEL OR CSV) to JSON Format // Input Handler for the promocode upload async testData(event:any){ // let promoList:any=await this.onFileChange(event); // let FilteredSet=promoList['sheet1']; // this.ELEMENT_DATA=FilteredSet; // this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA); // this.ngAfterViewInit(); } saveScenarioTrigger(content:any) { this.modalService.open(content, this.modalOptions).result.then((result) => { }); } deleteSavedList(){ let that=this; Notiflix.Confirm.show('Confirm Delete','Are you sure you want to delete this item?','Yes','No', ()=>{ //scenario_planner_listdelete this.apiServices.scenario_planner_listdelete(this.valueSelected).subscribe((res:any)=>{ if(res.code==200 && res.status=='success'){ that.getSavedData(); Notiflix.Notify.success('Deleted Successfully ! '); } }); }); } LoadSaveList(){ this.incremantalCSV=0; this.totalActivationCost=0; this.totalscvROAS=0; if(this.valueSelected!=0){ //load data Notiflix.Loading.dots('Loading...'); this.apiServices.scenario_planner_listdetails(this.valueSelected).subscribe((res:any)=>{ console.log(res,"listDetails"); Notiflix.Loading.remove(); let response=res; if(res.code==200 && res.status=='success'){ this.resetFilter(); let filterData:any=response['data'][0].json_data; this.groupedOnPackType=groupByJson(filterData,'pack_type'); this.segmentList=Object.keys(this.groupedOnPackType); this.selectedSegmentList = this.segmentList; filterData = filterData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"])); if(this.selectedplacementTypes.length!=0){ let to_find:any=[...this.selectedplacementTypes]; console.log(to_find,"to_find"); filterData=recursiveFind(filterData,to_find); console.log(to_find,"to_find") } filterData=filterData.sort((a:any, b:any) => b.processed_lift - a.processed_lift); console.log(filterData,"filterData"); this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData); this.dataSource.paginator = this.paginator; this.chartInit(filterData); Notiflix.Notify.success('Senario is loaded successfully !!!'); this.filterData=filterData; this.modalService.dismissAll(); } }); }else{ //load default data let filterData:any = this.defaultData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"])); if(this.selectedplacementTypes.length!=0){ let to_find:any=[...this.selectedplacementTypes]; filterData=recursiveFind(filterData,to_find); } this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData); this.dataSource.paginator = this.paginator; this.chartInit(filterData); this.modalService.dismissAll(); } } saveScenario(){ let planner_type=''; if(this.SOURCE=='from_opt_activation'){ planner_type='optimizer'; }else
let payload={ "name":this.FileName, "json_data":this.filterData, "planner_type":planner_type } if(this.FileName.trim()!=''){ this.apiServices.scenario_planner_simulate_save(payload).subscribe((res:any)=>{ console.log(res,"res") if(res.code==200){ this.modalService.dismissAll(); Notiflix.Notify.success('Simulation is Saved Successfully'); this.getSavedData(); this.FileName=''; }else{ if(res.status=='Failed'){ Notiflix.Notify.failure('Failed to save record'); } } }); }else{ Notiflix.Notify.failure('Please Enter The Scenario Name') } } getSavedData(){ this.apiServices.scenario_planner_list().subscribe((res:any) =>{ console.log(res,"scenatio_list"); this.saveList=[]; if(res.code==200 && res.status=='success'){ if(this.SOURCE=='from_opt_activation'){ // planner_type='optimizer'; this.saveList=[{'name':'Default','id':0}]; this.saveList.push(...res.data['optimizer']); }else{ // planner_type='simulation' //this.saveList=res.data['simulation']; this.saveList=[{'name':'Default','id':0}]; this.saveList.push(...res.data['simulation']); } } console.log(this.saveList,"saveList"); }); } getpackTypeList(filterData:any,byPacktype:any){ this.TATS_ARRAY=[]; for(let [key,value] of Object.entries(this.activationLIB)){ this.TATS_ARRAY.push({'name':value,'value':this.TATS[key]}) } this.TATSPack_ARRAY=[]; if(this.packTypeList){ for(let [key,value] of Object.entries(this.packTypeList)){ let values:any=value; this.TATSPack_ARRAY.push({'name':values.name,'value':this.TATS[key]}) } for(let [key,value] of Object.entries(byPacktype)){ let lvalue:any=value; this.TATS_BY_PACK[key.toLowerCase()]=lvalue.length; } } } downloadProducts(){ let filename="Scenario-Planner - OPTIMIZER" var options = { fieldSeparator: ',', quoteStrings: '"', decimalseparator: '.', showLabels: true, showTitle: true, title: filename, useBom: true, noDownload: false, headers: ['Pack Type', 'Product Sub Type', 'Activity','Cost','Incremental Sales','Expected Lift','CSV ROAS'], nullToEmptyString: true, }; this.renderedData.map((item:any)=> { for(let [key,value] of Object.entries(item)){ let values:any=value; if(!this.displayedColumns.includes(key)){ delete item[key]; }else{ if(key=='processed_lift'){ item[key]=values.toFixed(2)+"%"; } else if(key=='csv_roas'){ item[key]=values+"%"; } else if(key=='total_activation_cost'){ item[key]=values.toFixed(2); } else if(key=='total_incremental_sales'){ item[key]=values.toFixed(2); } //'total_activation_cost','total_incremental_sales' } } }); new Angular5Csv(this.renderedData, filename, options); } test_filter(){ } decrementRange(value:any){ value.discount=value.discount-5; } incrementRange(value:any){ value.discount=value.discount+5; } goBack(){ console.log(this.SOURCE,"this.SOURCE") if(this.SOURCE=='from_opt_activation'){ this.routes.navigate(['/optimizer'],{ state: {'source':'from_output','data':[this.ELEMENT_DATA_CONSTRAINTS,this.selectedData,this.response_data,this.Ratecardjson]}}); }else{ this.routes.navigate(['/simulator'],{ state: {'source':'from_output','data':[this.ELEMENT_DATA_CONSTRAINTS,this.selectedData,this.response_data,this.Ratecardjson]}}); } } resetFilter(){ this.dataSource = new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA); this.dataSource.paginator = this.paginator; this.chartInit(this.ELEMENT_DATA); } doFilter(){ this.incremantalCSV=0; console.log(this.selectedSegmentList,"Segmentedlist") let filterData:any = this.ELEMENT_DATA.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"])); if(this.selectedplacementTypes.length!=0){ let to_find:any=[...this.selectedplacementTypes]; filterData=recursiveFind(filterData,to_find); } this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData); this.dataSource.paginator = this.paginator; this.chartInit(filterData); } chartInit(filterData:any){ this.TATS={}; this.incremantalCSV=0; this.totalActivationCost=0; this.totalscvROAS=0; this.optimizedLift=0; this.totalLift=0; this.DynActivationColumns.forEach((element:any) => { this.TATS[element.value]=0; //this.Chartpoints_pla_rev[element.value]=0; //this.incremantalCSV+=element.total_incremental_sales; }); let gbActivity=groupByJson(filterData,'activation_type'); console.log(gbActivity,"gbActivity") let gbActivityList=Object.keys(gbActivity); gbActivityList.forEach((item)=>{ this.Chartpoints_pla_rev[item]=0; }); let predictedSales=0; filterData.forEach((element:any)=>{ this.incremantalCSV+=element.total_incremental_sales; this.totalActivationCost+=element.total_activation_cost; this.totalscvROAS+=element.total_incremental_sales/element.total_activation_cost; this.optimizedLift+=element.total_activation_cost; //this.totalLift+=element.processed_lift; // calculation = item["total_incremental_sales"] /(item["predicted_sales"] - item["total_incremental_sales"]) predictedSales+=element.predicted_sales; }); this.totalLift=this.incremantalCSV/(predictedSales-this.incremantalCSV)*100; this.optimizedLift=this.optimizedLift.toFixed() this.optimizedLift= numberWithCommas(this.optimizedLift); gbActivityList.forEach((item)=>{ filterData.forEach((element:any)=>{ if(element.activation_type.includes(item)){ this.Chartpoints_pla_rev[item]=element.total_incremental_sales } }); }); for(let [key,value] of Object.entries(this.activationLIB)){ filterData.forEach((element:any)=>{ if(element.activation_type.includes(value)){ this.TATS[key]+=1; //this.Chartpoints_pla_rev[key]+=element.total_incremental_sales.toFixed(2); } }); } console.log(this.Chartpoints_pla_rev,"==="); let byPacktype=groupByJson(filterData,'pack_type'); console.log(filterData,byPacktype,"1"); this.chartRender(this.Chartpoints_pla_rev,filterData); this.chartExpLift(filterData,byPacktype); this.getpackTypeList(filterData,byPacktype); } chartRender(data:any,filterData:any){ this.reload=false; let data_points:any=[]; this.dataSetLabel=[]; console.log(data,"data") let gbActivity=groupByJson(filterData,'activation_type'); console.log(gbActivity,"gbActivity") let gbActivityList=Object.keys(gbActivity); gbActivityList.forEach((item)=>{ if(data[item]!=0){ this.dataSetLabel.push(item); data_points.push(data[item].toFixed(2)); } console.log( this.dataSetLabel," this.dataSetLabel",data_points); }); this.dataSet={ data: data_points, label: 'Incremental Revenue by Placement' ,backgroundColor:[ 'rgb(156, 39, 176)', 'rgb(103, 58, 183 )', 'rgb(33, 150, 243 )', 'rgb(0, 150, 136 )', 'rgb(139, 195, 74 )', 'rgb(233, 30, 99 )', 'rgb(103, 58, 183 )', ]}; setTimeout(()=>{ this.reload=true; },200); } chartExpLift(data:any,byPacktype:any){ this.reload1=false; let data_points1:any=[]; this.dataSetLabel1=[]; for(let [key,value] of Object.entries(byPacktype)){ this.dataSetLabel1.push(key); let items:any=value; let tssum=0; items.map((item:any)=>{ tssum+=parseInt(item.processed_lift); }); console.log(tssum.toFixed(2),"tssum"); data_points1.push(tssum); } console.log(data_points1,"data_points1") this.dataSet1={ data: data_points1, label: 'Expected Lift By Pack Type' ,backgroundColor:[ 'rgb(156, 39, 176)', 'rgb(103, 58, 183 )', 'rgb(33, 150, 243 )', 'rgb(0, 150, 136 )', 'rgb(139, 195, 74 )', 'rgb(233, 30, 99 )', 'rgb(103, 58, 183 )', ]}; this.apiServices.getpackTypeList().subscribe((res: any) => { console.log(res, "getpackTypeList"); if (res.code == 200 && res.status == 'success') { this.packTypeList = res.data; this.packTypeList.forEach((element:any) => { element['counts']=0; }); this.packTypeList.forEach((element:any) => { console.log(byPacktype[element.name],"byPacktype[element.name]"); element['counts']=byPacktype[element.name]?.length || 0; }); console.log(this.packTypeList,"updated"); } }); setTimeout(()=>{ this.reload1=true; console.log(this.dataSet1, this.dataSetLabel) },200); } sortData(sort: Sort) { console.log("sort"); const data = this.filterData.slice(); if (!sort.active || sort.direction === '') { this.sortedData = data; return; } this.sortedData = data.sort((a:any, b:any) => { const isAsc = sort.direction === 'desc'; switch (sort.active) { case 'processed_lift': return compare(a.processed_lift, b.processed_lift, isAsc); case 'total_activation_cost': return compare(a.total_activation_total_activation_cost, b.total_activation_total_activation_cost, isAsc); case 'total_incremental_sales': return compare(a.total_incremental_sales, b.total_incremental_sales, isAsc); default: return 0; } }); console.log(this.sortedData,"sortedData") this.dataSource = new MatTableDataSource<ScenarioPlanner>(this.sortedData); this.dataSource.paginator = this.paginator; this.dataSource.connect().subscribe(d => { this.renderedData = d}); // this.ngAfterViewInit(); } triggerModal(content :any) { this.modalService.open(content, {ariaLabelledBy: 'modal-basic-title'}).result.then((res) => { this.closeModal = `Closed with: ${res}`; }, (res) => { this.closeModal = `Dismissed ${this.getDismissReason(res)}`; }); } private getDismissReason(reason: any): string { if (reason === ModalDismissReasons.ESC) { return 'by pressing ESC'; } else if (reason === ModalDismissReasons.BACKDROP_CLICK) { return 'by clicking on a backdrop'; } else { return `with: ${reason}`; } } isAllSelected() { const numSelected = this.selection.selected.length; const numRows = this.dataSource.data.length; return numSelected === numRows; } masterToggle() { if (this.isAllSelected()) { this.selection.clear(); return;} this.selection.select(...this.dataSource.data); this.setActivationCounter(); } checkbox_row(row:any){ this.selection.toggle(row); this.setActivationCounter(); } checkboxLabel(row?: ScenarioPlanner): string { if (!row) { return `${this.isAllSelected() ? 'deselect' : 'select'} all`; } return `${this.selection.isSelected(row) ? 'deselect' : 'select'} row ${row.product_tpn + 1}`; } updateProductCounter(){ //totalProducts } recountCheckbox(event:any){ event.stopPropagation(); this.setActivationCounter(); } setActivationCounter(){ setTimeout(()=>{ // this.totalActivities=this.selection.selected.length; // //console.log(this.selection.selected,"this.totalActivities"); // console.log(groupByJson(this.selection.selected,'sku'),"SKU group") // this.totalProducts=Object.keys(groupByJson(this.selection.selected,'sku')).length; },200); } } // Used For Datatable sorting function compare(a: number | string, b: number | string, isAsc: boolean) { return (a < b ? -1 : 1) * (isAsc ? 1 : -1); } function recursiveFind(inputArr:any,find:any):any{ //break-condition if(find.length==0){ return inputArr }else{ // if(find.length==1){ // inputArr=inputArr.filter((data:any) => find[0] == data["activation_type"]); // find.shift(); // }else{ // inputArr=inputArr.filter((data:any) => data["activation_type"].includes(find[0])); // find.shift(); // console.log(inputArr,"inputArr"); // } inputArr=inputArr.filter((data:any) => data["activation_type"].includes(find[0])); find.shift(); return recursiveFind(inputArr,find) } } function numberWithCommas(x:any) { return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ","); }
{ planner_type='simulation' }
conditional_block