file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
process_node.go | if manual == nil && len(parentNodeRuns) == 1 && parentNodeRuns[0].Manual != nil {
n := wr.Workflow.WorkflowData.NodeByID(parentNodeRuns[0].WorkflowNodeID)
// If fork or JOIN and No run conditions
if (n.Type == sdk.NodeTypeJoin || n.Type == sdk.NodeTypeFork) &&
(n.Context == nil || (n.Context.Conditions.LuaScript == "" && len(n.Context.Conditions.PlainConditions) == 0)) {
manual = parentNodeRuns[0].Manual
}
}
switch n.Type {
case sdk.NodeTypeFork, sdk.NodeTypePipeline, sdk.NodeTypeJoin:
r1, conditionOK, errT := processNode(ctx, db, store, proj, wr, mapNodes, n, subNumber, parentNodeRuns, hookEvent, manual)
if errT != nil {
return nil, false, sdk.WrapError(errT, "Unable to processNode")
}
report.Merge(r1, nil) // nolint
return report, conditionOK, nil
case sdk.NodeTypeOutGoingHook:
r1, conditionOK, errO := processNodeOutGoingHook(ctx, db, store, proj, wr, mapNodes, parentNodeRuns, n, subNumber)
if errO != nil {
return nil, false, sdk.WrapError(errO, "Unable to processNodeOutGoingHook")
}
report.Merge(r1, nil) // nolint
return report, conditionOK, nil
}
return nil, false, nil
}
func processNode(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, n *sdk.Node, subNumber int, parents []*sdk.WorkflowNodeRun, hookEvent *sdk.WorkflowNodeRunHookEvent, manual *sdk.WorkflowNodeRunManual) (*ProcessorReport, bool, error) {
report := new(ProcessorReport)
//TODO: Check user for manual done but check permission also for automatic trigger and hooks (with system to authenticate a webhook)
if n.Context == nil {
n.Context = &sdk.NodeContext{}
}
if n.Context.PipelineID == 0 && n.Type == sdk.NodeTypePipeline {
return nil, false, sdk.ErrPipelineNotFound
}
var runPayload map[string]string
var errPayload error
runPayload, errPayload = n.Context.DefaultPayloadToMap()
if errPayload != nil {
return nil, false, sdk.WrapError(errPayload, "Default payload is malformatted")
}
isDefaultPayload := true
// For node with pipeline
var stages []sdk.Stage
var pip sdk.Pipeline
if n.Context.PipelineID > 0 {
var has bool
pip, has = wr.Workflow.Pipelines[n.Context.PipelineID]
if !has {
return nil, false, fmt.Errorf("pipeline %d not found in workflow", n.Context.PipelineID)
}
stages = make([]sdk.Stage, len(pip.Stages))
copy(stages, pip.Stages)
//If the pipeline has parameter but none are defined on context, use the defaults
if len(pip.Parameter) > 0 && len(n.Context.DefaultPipelineParameters) == 0 {
n.Context.DefaultPipelineParameters = pip.Parameter
}
}
// Create run
run := &sdk.WorkflowNodeRun{
WorkflowID: wr.WorkflowID,
LastModified: time.Now(),
Start: time.Now(),
Number: wr.Number,
SubNumber: int64(subNumber),
WorkflowRunID: wr.ID,
WorkflowNodeID: n.ID,
WorkflowNodeName: n.Name,
Status: string(sdk.StatusWaiting),
Stages: stages,
Header: wr.Header,
}
if run.SubNumber >= wr.LastSubNumber {
wr.LastSubNumber = run.SubNumber
}
if n.Context.ApplicationID != 0 {
run.ApplicationID = n.Context.ApplicationID
}
parentsIDs := make([]int64, len(parents))
for i := range parents {
parentsIDs[i] = parents[i].ID
}
parentStatus := sdk.StatusSuccess.String()
run.SourceNodeRuns = parentsIDs
if parents != nil {
for _, p := range parents {
for _, v := range wr.WorkflowNodeRuns {
for _, run := range v {
if p.ID == run.ID {
if run.Status == sdk.StatusFail.String() || run.Status == sdk.StatusStopped.String() {
parentStatus = run.Status
}
}
}
}
}
//Merge the payloads from all the sources
_, next := observability.Span(ctx, "workflow.processNode.mergePayload")
for _, r := range parents {
e := dump.NewDefaultEncoder()
e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()}
e.ExtraFields.DetailedMap = false
e.ExtraFields.DetailedStruct = false
e.ExtraFields.Len = false
e.ExtraFields.Type = false
m1, errm1 := e.ToStringMap(r.Payload)
if errm1 != nil {
AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{
ID: sdk.MsgWorkflowError.ID,
Args: []interface{}{errm1.Error()},
})
log.Error("processNode> Unable to compute hook payload: %v", errm1)
}
if isDefaultPayload {
// Check if we try to merge for the first time so try to merge the default payload with the first parent run found
// if it is the default payload then we have to take the previous git values
runPayload = sdk.ParametersMapMerge(runPayload, m1)
isDefaultPayload = false
} else {
runPayload = sdk.ParametersMapMerge(runPayload, m1, sdk.MapMergeOptions.ExcludeGitParams)
}
}
run.Payload = runPayload
run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters)
// Take first value in pipeline parameter list if no default value is set
for i := range run.PipelineParameters {
if run.PipelineParameters[i].Type == sdk.ListParameter && strings.Contains(run.PipelineParameters[i].Value, ";") {
run.PipelineParameters[i].Value = strings.Split(run.PipelineParameters[i].Value, ";")[0]
}
}
next()
}
run.HookEvent = hookEvent
if hookEvent != nil {
runPayload = sdk.ParametersMapMerge(runPayload, hookEvent.Payload)
run.Payload = runPayload
run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters)
}
run.BuildParameters = append(run.BuildParameters, sdk.Parameter{
Name: "cds.node",
Type: sdk.StringParameter,
Value: run.WorkflowNodeName,
})
run.Manual = manual
if manual != nil {
payloadStr, err := json.Marshal(manual.Payload)
if err != nil {
log.Error("processNode> Unable to marshal payload: %v", err)
}
run.BuildParameters = append(run.BuildParameters, sdk.Parameter{
Name: "payload",
Type: sdk.TextParameter,
Value: string(payloadStr),
})
e := dump.NewDefaultEncoder()
e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()}
e.ExtraFields.DetailedMap = false
e.ExtraFields.DetailedStruct = false
e.ExtraFields.Len = false
e.ExtraFields.Type = false
m1, errm1 := e.ToStringMap(manual.Payload)
if errm1 != nil {
return report, false, sdk.WrapError(errm1, "processNode> Unable to compute payload")
}
runPayload = sdk.ParametersMapMerge(runPayload, m1)
run.Payload = runPayload
run.PipelineParameters = sdk.ParametersMerge(n.Context.DefaultPipelineParameters, manual.PipelineParameters)
run.BuildParameters = append(run.BuildParameters, sdk.Parameter{
Name: "cds.triggered_by.email",
Type: sdk.StringParameter,
Value: manual.User.Email,
}, sdk.Parameter{
Name: "cds.triggered_by.fullname",
Type: sdk.StringParameter,
Value: manual.User.Fullname,
}, sdk.Parameter{
Name: "cds.triggered_by.username",
Type: | {
report := new(ProcessorReport)
exist, errN := nodeRunExist(db, wr.ID, n.ID, wr.Number, subNumber)
if errN != nil {
return nil, false, sdk.WrapError(errN, "processNodeRun> unable to check if node run exist")
}
if exist {
return nil, false, nil
}
var end func()
ctx, end = observability.Span(ctx, "workflow.processNodeRun",
observability.Tag(observability.TagWorkflow, wr.Workflow.Name),
observability.Tag(observability.TagWorkflowRun, wr.Number),
observability.Tag(observability.TagWorkflowNode, n.Name),
)
defer end()
// Keep old model behaviour on fork and join
// Send manual event to join and fork children when it was a manual run and when fork and join don't have run condition | identifier_body | |
process_node.go | (ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, n *sdk.Node, subNumber int, parentNodeRuns []*sdk.WorkflowNodeRun, hookEvent *sdk.WorkflowNodeRunHookEvent, manual *sdk.WorkflowNodeRunManual) (*ProcessorReport, bool, error) {
report := new(ProcessorReport)
exist, errN := nodeRunExist(db, wr.ID, n.ID, wr.Number, subNumber)
if errN != nil {
return nil, false, sdk.WrapError(errN, "processNodeRun> unable to check if node run exist")
}
if exist {
return nil, false, nil
}
var end func()
ctx, end = observability.Span(ctx, "workflow.processNodeRun",
observability.Tag(observability.TagWorkflow, wr.Workflow.Name),
observability.Tag(observability.TagWorkflowRun, wr.Number),
observability.Tag(observability.TagWorkflowNode, n.Name),
)
defer end()
// Keep old model behaviour on fork and join
// Send manual event to join and fork children when it was a manual run and when fork and join don't have run condition
if manual == nil && len(parentNodeRuns) == 1 && parentNodeRuns[0].Manual != nil {
n := wr.Workflow.WorkflowData.NodeByID(parentNodeRuns[0].WorkflowNodeID)
// If fork or JOIN and No run conditions
if (n.Type == sdk.NodeTypeJoin || n.Type == sdk.NodeTypeFork) &&
(n.Context == nil || (n.Context.Conditions.LuaScript == "" && len(n.Context.Conditions.PlainConditions) == 0)) {
manual = parentNodeRuns[0].Manual
}
}
switch n.Type {
case sdk.NodeTypeFork, sdk.NodeTypePipeline, sdk.NodeTypeJoin:
r1, conditionOK, errT := processNode(ctx, db, store, proj, wr, mapNodes, n, subNumber, parentNodeRuns, hookEvent, manual)
if errT != nil {
return nil, false, sdk.WrapError(errT, "Unable to processNode")
}
report.Merge(r1, nil) // nolint
return report, conditionOK, nil
case sdk.NodeTypeOutGoingHook:
r1, conditionOK, errO := processNodeOutGoingHook(ctx, db, store, proj, wr, mapNodes, parentNodeRuns, n, subNumber)
if errO != nil {
return nil, false, sdk.WrapError(errO, "Unable to processNodeOutGoingHook")
}
report.Merge(r1, nil) // nolint
return report, conditionOK, nil
}
return nil, false, nil
}
func processNode(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, n *sdk.Node, subNumber int, parents []*sdk.WorkflowNodeRun, hookEvent *sdk.WorkflowNodeRunHookEvent, manual *sdk.WorkflowNodeRunManual) (*ProcessorReport, bool, error) {
report := new(ProcessorReport)
//TODO: Check user for manual done but check permission also for automatic trigger and hooks (with system to authenticate a webhook)
if n.Context == nil {
n.Context = &sdk.NodeContext{}
}
if n.Context.PipelineID == 0 && n.Type == sdk.NodeTypePipeline {
return nil, false, sdk.ErrPipelineNotFound
}
var runPayload map[string]string
var errPayload error
runPayload, errPayload = n.Context.DefaultPayloadToMap()
if errPayload != nil {
return nil, false, sdk.WrapError(errPayload, "Default payload is malformatted")
}
isDefaultPayload := true
// For node with pipeline
var stages []sdk.Stage
var pip sdk.Pipeline
if n.Context.PipelineID > 0 {
var has bool
pip, has = wr.Workflow.Pipelines[n.Context.PipelineID]
if !has {
return nil, false, fmt.Errorf("pipeline %d not found in workflow", n.Context.PipelineID)
}
stages = make([]sdk.Stage, len(pip.Stages))
copy(stages, pip.Stages)
//If the pipeline has parameter but none are defined on context, use the defaults
if len(pip.Parameter) > 0 && len(n.Context.DefaultPipelineParameters) == 0 {
n.Context.DefaultPipelineParameters = pip.Parameter
}
}
// Create run
run := &sdk.WorkflowNodeRun{
WorkflowID: wr.WorkflowID,
LastModified: time.Now(),
Start: time.Now(),
Number: wr.Number,
SubNumber: int64(subNumber),
WorkflowRunID: wr.ID,
WorkflowNodeID: n.ID,
WorkflowNodeName: n.Name,
Status: string(sdk.StatusWaiting),
Stages: stages,
Header: wr.Header,
}
if run.SubNumber >= wr.LastSubNumber {
wr.LastSubNumber = run.SubNumber
}
if n.Context.ApplicationID != 0 {
run.ApplicationID = n.Context.ApplicationID
}
parentsIDs := make([]int64, len(parents))
for i := range parents {
parentsIDs[i] = parents[i].ID
}
parentStatus := sdk.StatusSuccess.String()
run.SourceNodeRuns = parentsIDs
if parents != nil {
for _, p := range parents {
for _, v := range wr.WorkflowNodeRuns {
for _, run := range v {
if p.ID == run.ID {
if run.Status == sdk.StatusFail.String() || run.Status == sdk.StatusStopped.String() {
parentStatus = run.Status
}
}
}
}
}
//Merge the payloads from all the sources
_, next := observability.Span(ctx, "workflow.processNode.mergePayload")
for _, r := range parents {
e := dump.NewDefaultEncoder()
e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()}
e.ExtraFields.DetailedMap = false
e.ExtraFields.DetailedStruct = false
e.ExtraFields.Len = false
e.ExtraFields.Type = false
m1, errm1 := e.ToStringMap(r.Payload)
if errm1 != nil {
AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{
ID: sdk.MsgWorkflowError.ID,
Args: []interface{}{errm1.Error()},
})
log.Error("processNode> Unable to compute hook payload: %v", errm1)
}
if isDefaultPayload {
// Check if we try to merge for the first time so try to merge the default payload with the first parent run found
// if it is the default payload then we have to take the previous git values
runPayload = sdk.ParametersMapMerge(runPayload, m1)
isDefaultPayload = false
} else {
runPayload = sdk.ParametersMapMerge(runPayload, m1, sdk.MapMergeOptions.ExcludeGitParams)
}
}
run.Payload = runPayload
run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters)
// Take first value in pipeline parameter list if no default value is set
for i := range run.PipelineParameters {
if run.PipelineParameters[i].Type == sdk.ListParameter && strings.Contains(run.PipelineParameters[i].Value, ";") {
run.PipelineParameters[i].Value = strings.Split(run.PipelineParameters[i].Value, ";")[0]
}
}
next()
}
run.HookEvent = hookEvent
if hookEvent != nil {
runPayload = sdk.ParametersMapMerge(runPayload, hookEvent.Payload)
run.Payload = runPayload
run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters)
}
run.BuildParameters = append(run.BuildParameters, sdk.Parameter{
Name: "cds.node",
Type: sdk.StringParameter,
Value: run.WorkflowNodeName,
})
run.Manual = manual
if manual != nil {
payloadStr, err := json.Marshal(manual.Payload)
if err != nil {
log.Error("processNode> Unable to marshal payload: %v", err)
}
run.BuildParameters = append(run.BuildParameters, sdk.Parameter{
Name: "payload",
Type: sdk.TextParameter,
Value: string(payloadStr),
})
e := dump.NewDefaultEncoder()
e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()}
e.ExtraFields.DetailedMap = false
e.ExtraFields.DetailedStruct = false
e.ExtraFields.Len = false
e.ExtraFields.Type = false
m1, errm1 := e.ToStringMap(manual.Payload)
if errm1 != nil {
return report, false, sdk.WrapError(errm1, "processNode> Unable to compute payload")
}
runPayload = sdk.ParametersMapMerge(runPayload, m1)
run.Payload = runPayload
run.PipelineParameters = sdk.ParametersMerge(n.Context.DefaultPipelineParameters, manual.PipelineParameters)
run.BuildParameters = append(run.BuildParameters, | processNodeRun | identifier_name | |
process_node.go | if (n.Type == sdk.NodeTypeJoin || n.Type == sdk.NodeTypeFork) &&
(n.Context == nil || (n.Context.Conditions.LuaScript == "" && len(n.Context.Conditions.PlainConditions) == 0)) {
manual = parentNodeRuns[0].Manual
}
}
switch n.Type {
case sdk.NodeTypeFork, sdk.NodeTypePipeline, sdk.NodeTypeJoin:
r1, conditionOK, errT := processNode(ctx, db, store, proj, wr, mapNodes, n, subNumber, parentNodeRuns, hookEvent, manual)
if errT != nil {
return nil, false, sdk.WrapError(errT, "Unable to processNode")
}
report.Merge(r1, nil) // nolint
return report, conditionOK, nil
case sdk.NodeTypeOutGoingHook:
r1, conditionOK, errO := processNodeOutGoingHook(ctx, db, store, proj, wr, mapNodes, parentNodeRuns, n, subNumber)
if errO != nil {
return nil, false, sdk.WrapError(errO, "Unable to processNodeOutGoingHook")
}
report.Merge(r1, nil) // nolint
return report, conditionOK, nil
}
return nil, false, nil
}
func processNode(ctx context.Context, db gorp.SqlExecutor, store cache.Store, proj *sdk.Project, wr *sdk.WorkflowRun, mapNodes map[int64]*sdk.Node, n *sdk.Node, subNumber int, parents []*sdk.WorkflowNodeRun, hookEvent *sdk.WorkflowNodeRunHookEvent, manual *sdk.WorkflowNodeRunManual) (*ProcessorReport, bool, error) {
report := new(ProcessorReport)
//TODO: Check user for manual done but check permission also for automatic trigger and hooks (with system to authenticate a webhook)
if n.Context == nil {
n.Context = &sdk.NodeContext{}
}
if n.Context.PipelineID == 0 && n.Type == sdk.NodeTypePipeline {
return nil, false, sdk.ErrPipelineNotFound
}
var runPayload map[string]string
var errPayload error
runPayload, errPayload = n.Context.DefaultPayloadToMap()
if errPayload != nil {
return nil, false, sdk.WrapError(errPayload, "Default payload is malformatted")
}
isDefaultPayload := true
// For node with pipeline
var stages []sdk.Stage
var pip sdk.Pipeline
if n.Context.PipelineID > 0 {
var has bool
pip, has = wr.Workflow.Pipelines[n.Context.PipelineID]
if !has {
return nil, false, fmt.Errorf("pipeline %d not found in workflow", n.Context.PipelineID)
}
stages = make([]sdk.Stage, len(pip.Stages))
copy(stages, pip.Stages)
//If the pipeline has parameter but none are defined on context, use the defaults
if len(pip.Parameter) > 0 && len(n.Context.DefaultPipelineParameters) == 0 {
n.Context.DefaultPipelineParameters = pip.Parameter
}
}
// Create run
run := &sdk.WorkflowNodeRun{
WorkflowID: wr.WorkflowID,
LastModified: time.Now(),
Start: time.Now(),
Number: wr.Number,
SubNumber: int64(subNumber),
WorkflowRunID: wr.ID,
WorkflowNodeID: n.ID,
WorkflowNodeName: n.Name,
Status: string(sdk.StatusWaiting),
Stages: stages,
Header: wr.Header,
}
if run.SubNumber >= wr.LastSubNumber {
wr.LastSubNumber = run.SubNumber
}
if n.Context.ApplicationID != 0 {
run.ApplicationID = n.Context.ApplicationID
}
parentsIDs := make([]int64, len(parents))
for i := range parents {
parentsIDs[i] = parents[i].ID
}
parentStatus := sdk.StatusSuccess.String()
run.SourceNodeRuns = parentsIDs
if parents != nil {
for _, p := range parents {
for _, v := range wr.WorkflowNodeRuns {
for _, run := range v {
if p.ID == run.ID {
if run.Status == sdk.StatusFail.String() || run.Status == sdk.StatusStopped.String() {
parentStatus = run.Status
}
}
}
}
}
//Merge the payloads from all the sources
_, next := observability.Span(ctx, "workflow.processNode.mergePayload")
for _, r := range parents {
e := dump.NewDefaultEncoder()
e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()}
e.ExtraFields.DetailedMap = false
e.ExtraFields.DetailedStruct = false
e.ExtraFields.Len = false
e.ExtraFields.Type = false
m1, errm1 := e.ToStringMap(r.Payload)
if errm1 != nil {
AddWorkflowRunInfo(wr, true, sdk.SpawnMsg{
ID: sdk.MsgWorkflowError.ID,
Args: []interface{}{errm1.Error()},
})
log.Error("processNode> Unable to compute hook payload: %v", errm1)
}
if isDefaultPayload {
// Check if we try to merge for the first time so try to merge the default payload with the first parent run found
// if it is the default payload then we have to take the previous git values
runPayload = sdk.ParametersMapMerge(runPayload, m1)
isDefaultPayload = false
} else {
runPayload = sdk.ParametersMapMerge(runPayload, m1, sdk.MapMergeOptions.ExcludeGitParams)
}
}
run.Payload = runPayload
run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters)
// Take first value in pipeline parameter list if no default value is set
for i := range run.PipelineParameters {
if run.PipelineParameters[i].Type == sdk.ListParameter && strings.Contains(run.PipelineParameters[i].Value, ";") {
run.PipelineParameters[i].Value = strings.Split(run.PipelineParameters[i].Value, ";")[0]
}
}
next()
}
run.HookEvent = hookEvent
if hookEvent != nil {
runPayload = sdk.ParametersMapMerge(runPayload, hookEvent.Payload)
run.Payload = runPayload
run.PipelineParameters = sdk.ParametersMerge(pip.Parameter, n.Context.DefaultPipelineParameters)
}
run.BuildParameters = append(run.BuildParameters, sdk.Parameter{
Name: "cds.node",
Type: sdk.StringParameter,
Value: run.WorkflowNodeName,
})
run.Manual = manual
if manual != nil {
payloadStr, err := json.Marshal(manual.Payload)
if err != nil {
log.Error("processNode> Unable to marshal payload: %v", err)
}
run.BuildParameters = append(run.BuildParameters, sdk.Parameter{
Name: "payload",
Type: sdk.TextParameter,
Value: string(payloadStr),
})
e := dump.NewDefaultEncoder()
e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()}
e.ExtraFields.DetailedMap = false
e.ExtraFields.DetailedStruct = false
e.ExtraFields.Len = false
e.ExtraFields.Type = false
m1, errm1 := e.ToStringMap(manual.Payload)
if errm1 != nil {
return report, false, sdk.WrapError(errm1, "processNode> Unable to compute payload")
}
runPayload = sdk.ParametersMapMerge(runPayload, m1)
run.Payload = runPayload
run.PipelineParameters = sdk.ParametersMerge(n.Context.DefaultPipelineParameters, manual.PipelineParameters)
run.BuildParameters = append(run.BuildParameters, sdk.Parameter{
Name: "cds.triggered_by.email",
Type: sdk.StringParameter,
Value: manual.User.Email,
}, sdk.Parameter{
Name: "cds.triggered_by.fullname",
Type: sdk.StringParameter,
Value: manual.User.Fullname,
}, sdk.Parameter{
Name: "cds.triggered_by.username",
Type: sdk.StringParameter,
Value: manual.User.Username,
}, sdk.Parameter{
Name: "cds.manual",
Type: sdk.StringParameter,
Value: "true",
})
} else {
run.BuildParameters = append(run.BuildParameters, sdk.Parameter{
Name: "cds.manual",
Type: sdk.StringParameter,
Value: "false",
})
}
cdsStatusParam := sdk.Parameter{
Name: "cds.status",
Type: sdk.StringParameter,
Value: parentStatus,
}
run.BuildParameters = sdk.ParametersFromMap(
sdk.ParametersMapMerge(
sdk.ParametersToMap(run.BuildParameters),
sdk.ParametersToMap([]sdk.Parameter{cdsStatusParam}),
sdk.MapMergeOptions.ExcludeGitParams,
),
)
// Process parameters for the jobs
runContext := nodeRunContext{}
if n.Context.PipelineID != 0 {
runContext.Pipeline = wr.Workflow.Pipelines[n.Context.PipelineID]
}
if n.Context.ApplicationID != 0 {
runContext.Application = wr.Workflow | random_line_split | ||
client.go | // resource is already deleted (or never existsed)
// so we're done here
return nil
} else if err != nil {
// failed to get stack status
return err
}
if *state.StackStatus == DeleteComplete {
// resource already deleted
return nil
}
// trigger a delete unless we're already in a deleting state
if *state.StackStatus != DeleteInProgress {
_, err := r.Client.DeleteStackWithContext(ctx, &DeleteStackInput{
StackName: aws.String(stack.GetStackName()),
})
if err != nil {
return err
}
}
_, err = r.waitUntilDestroyedState(ctx, stack)
if err != nil {
return err
}
return nil
}
// Outputs fetches the cloudformation outputs for the given stack
// Returns ErrStackNotFound if stack does not exist
func (r *Client) Outputs(ctx context.Context, stack Stack) (Outputs, error) {
state, err := r.get(ctx, stack)
if err != nil {
return nil, err
}
return r.resolveOutputs(ctx, state.Outputs)
}
// resolveOutputs returns cloudformation outputs in a map format and resolves
// any values that are stored in AWS Secrets Manager
func (r *Client) resolveOutputs(ctx context.Context, list []*Output) (Outputs, error) {
outputs := Outputs{}
for _, item := range list {
if item.OutputKey == nil || item.OutputValue == nil {
continue
}
key := *item.OutputKey
value := *item.OutputValue
// we automatically resolve references to AWS Secrets Manager
// secrets here, so that we are able to make use of encrypted
// sensitive values in cloudformation templates
if strings.HasPrefix(value, "{{resolve:secretsmanager:") {
// extract ARN and key name from reference
secretARNMatcher := regexp.MustCompile(`{{resolve:secretsmanager:(.*):SecretString:(.*)}}`)
matches := secretARNMatcher.FindStringSubmatch(value)
if len(matches) == 0 {
return nil, fmt.Errorf("failed to extract ARN and key name from secretsmanager value: %s", secretARNMatcher)
}
arn := matches[1]
subkey := matches[2]
v, err := r.Client.GetSecretValueWithContext(ctx, &GetSecretValueInput{
SecretId: aws.String(arn),
})
if err != nil {
return nil, err
}
if v.SecretString == nil {
return nil, fmt.Errorf("unexpected nil value in SecretString of %s", arn)
}
secrets := map[string]interface{}{}
err = json.Unmarshal([]byte(*v.SecretString), &secrets)
if err != nil {
return nil, err
}
subval, haveSubkey := secrets[subkey]
if !haveSubkey {
return nil, fmt.Errorf("could not find subkey %s in SecretString of %s", subkey, arn)
}
subvalString, ok := subval.(string)
if !ok {
return nil, fmt.Errorf("subval at subkey %s in SecretString of %s is not a string", subkey, arn)
}
value = subvalString
}
outputs[key] = value
}
return outputs, nil
}
// get fetches the cloudformation stack state
// Returns ErrStackNotFound if stack does not exist
func (r *Client) get(ctx context.Context, stack Stack) (*State, error) {
describeOutput, err := r.Client.DescribeStacksWithContext(ctx, &DescribeStacksInput{
StackName: aws.String(stack.GetStackName()),
})
if err != nil {
if IsNotFoundError(err) {
return nil, ErrStackNotFound
}
return nil, err
}
if describeOutput == nil {
return nil, fmt.Errorf("describeOutput was nil, potential issue with AWS Client")
}
if len(describeOutput.Stacks) == 0 {
return nil, fmt.Errorf("describeOutput contained no Stacks, potential issue with AWS Client")
}
if len(describeOutput.Stacks) > 1 {
return nil, fmt.Errorf("describeOutput contained multiple Stacks which is unexpected when calling with StackName, potential issue with AWS Client")
}
state := describeOutput.Stacks[0]
if state.StackStatus == nil {
return nil, fmt.Errorf("describeOutput contained a nil StackStatus, potential issue with AWS Client")
}
return state, nil
}
// update mutates the stack's status with current state, events and any
// whitelisted outputs. ignores any errors encountered and just updates
// whatever it can with the intension of getting as much info visible as
// possible even under error conditions.
func (r *Client) updateStatus(stack Stack) {
// use a fresh context or we might not be able to update status after
// deadline is hit, but this feels a little wrong
ctx := context.Background()
state, _ := r.get(ctx, stack)
events, _ := r.events(ctx, stack)
s := stack.GetStatus()
// update aws specific state
if state != nil {
if state.StackId != nil {
s.AWS.ID = *state.StackId
}
if state.StackName != nil {
s.AWS.Name = *state.StackName
}
if state.StackStatus != nil {
s.AWS.Status = *state.StackStatus
}
if state.StackStatusReason != nil {
s.AWS.Reason = *state.StackStatusReason
}
}
// add any event details
if len(events) > 0 {
s.AWS.Events = []object.AWSEvent{}
for _, event := range events {
reason := "-"
if event.ResourceStatusReason != nil {
reason = *event.ResourceStatusReason
}
s.AWS.Events = append(s.AWS.Events, object.AWSEvent{
Status: *event.ResourceStatus,
Reason: reason,
Time: &metav1.Time{Time: *event.Timestamp},
})
}
}
// update generic state
switch s.AWS.Status {
case DeleteFailed, CreateFailed, RollbackFailed, UpdateRollbackFailed, RollbackComplete, UpdateRollbackComplete:
s.State = object.ErrorState
case DeleteInProgress, DeleteComplete:
s.State = object.DeletingState
case CreateComplete, UpdateComplete:
s.State = object.ReadyState
default:
s.State = object.ReconcilingState
}
// if object implements whitelisting of output keys, then update info
if w, ok := stack.(StackOutputWhitelister); ok {
if s.AWS.Info == nil {
s.AWS.Info = map[string]string{}
}
outputs, _ := r.Outputs(ctx, stack)
for _, whitelistedKey := range w.GetStackOutputWhitelist() {
if val, ok := outputs[whitelistedKey]; ok {
s.AWS.Info[whitelistedKey] = val
}
}
}
stack.SetStatus(s)
}
func (r *Client) events(ctx context.Context, stack Stack) ([]*StateEvent, error) {
eventsOutput, err := r.Client.DescribeStackEventsWithContext(ctx, &DescribeStackEventsInput{
StackName: aws.String(stack.GetStackName()),
})
if err != nil {
return nil, err
}
if eventsOutput == nil {
return []*StateEvent{}, nil
}
return eventsOutput.StackEvents, nil
}
// Exists checks if the stack has been provisioned
func (r *Client) exists(ctx context.Context, stack Stack) (bool, error) {
_, err := r.get(ctx, stack)
if err == ErrStackNotFound {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
func (r *Client) waitUntilCompleteState(ctx context.Context, stack Stack) (*State, error) {
return r.waitUntilState(ctx, stack, []string{
CreateComplete,
UpdateComplete,
UpdateRollbackComplete,
RollbackComplete,
})
}
func (r *Client) waitUntilDestroyedState(ctx context.Context, stack Stack) (*State, error) {
return r.waitUntilState(ctx, stack, []string{
DeleteComplete,
})
}
func (r *Client) waitUntilState(ctx context.Context, stack Stack, desiredStates []string) (*State, error) {
for {
select {
case <-ctx.Done():
return nil, context.DeadlineExceeded
default:
state, err := r.get(ctx, stack)
if IsNotFoundError(err) && in(DeleteComplete, desiredStates) {
// If we are waiting for DeleteComplete state and the
// stack has gone missing, consider this DeleteComplete
return &State{}, nil
} else if err != nil {
return nil, err
}
if in(*state.StackStatus, desiredStates) {
return state, nil
}
}
time.Sleep(r.PollingInterval)
}
}
func in(needle string, haystack []string) bool {
for _, s := range haystack {
if needle == s {
return true
}
}
return false
}
func IsNoUpdateError(err error) bool {
if err == nil | {
return false
} | conditional_block | |
client.go |
var Ref = goformation.Ref
var Sub = goformation.Sub
const CreateInProgress = cloudformation.StackStatusCreateInProgress
const DeleteInProgress = cloudformation.StackStatusDeleteInProgress
const UpdateInProgress = cloudformation.StackStatusUpdateInProgress
const ReviewInProgress = cloudformation.StackStatusReviewInProgress
const CreateComplete = cloudformation.StackStatusCreateComplete
const DeleteComplete = cloudformation.StackStatusDeleteComplete
const UpdateComplete = cloudformation.StackStatusUpdateComplete
const CreateFailed = cloudformation.StackStatusCreateFailed
const DeleteFailed = cloudformation.StackStatusDeleteFailed
const RollbackFailed = cloudformation.StackStatusRollbackFailed
const RollbackInProgress = cloudformation.StackStatusRollbackInProgress
const UpdateRollbackFailed = cloudformation.StackStatusUpdateRollbackFailed
const RollbackComplete = cloudformation.StackStatusRollbackComplete
const UpdateRollbackInProgress = cloudformation.StackStatusRollbackInProgress
const UpdateRollbackComplete = cloudformation.StackStatusUpdateRollbackComplete
const UpdateRollbackCompleteCleanupInProgress = cloudformation.StackStatusUpdateRollbackCompleteCleanupInProgress
var (
// capabilities required by cloudformation
capabilities = []*string{
aws.String("CAPABILITY_NAMED_IAM"),
}
// ErrStackNotFound returned when stack does not exist, or has been deleted
ErrStackNotFound = fmt.Errorf("STACK_NOT_FOUND")
// NoUpdatesErrMatch is string to match in error from aws to detect if nothing to update
NoUpdatesErrMatch = "No updates"
// NoExistErrMatch is a string to match if stack does not exist
NoExistErrMatch = "does not exist"
)
// Outputs is used as a more friendly version of cloudformation.Output
type Outputs map[string]string
// Client performs cloudformation operations on objects that implement the Stack interface
type Client struct {
// ClusterName is used to prefix any generated names to avoid clashes
ClusterName string
// Client is the AWS SDK Client implementation to use
Client sdk.Client
// PollingInterval is the duration between calls to check state when waiting for apply/destroy to complete
PollingInterval time.Duration
}
// Apply reconciles the state of the remote cloudformation stack and blocks
// until the stack is no longer in an creating/applying state or a ctx timeout is hit
// Calls should be retried if DeadlineExceeded errors are hit
// Returns any outputs on successful apply.
// Will update stack with current status
func (r *Client) Apply(ctx context.Context, stack Stack, params ...*Parameter) (Outputs, error) {
// always update stack status
defer r.updateStatus(stack)
// check if exists
exists, err := r.exists(ctx, stack)
if err != nil {
return nil, err
}
if !exists {
err := r.create(ctx, stack, params...)
if err != nil {
return nil, err
}
}
_, err = r.waitUntilCompleteState(ctx, stack)
if err != nil {
return nil, err
}
err = r.update(ctx, stack, params...)
if err != nil {
return nil, err
}
state, err := r.waitUntilCompleteState(ctx, stack)
if err != nil {
return nil, err
}
return r.resolveOutputs(ctx, state.Outputs)
}
// validateParams checks for any unset template parameters
func (r *Client) validateTemplateParams(t *Template, params []*Parameter) error {
missing := map[string]interface{}{}
// copy all wanted params into missing
for k, v := range t.Parameters {
missing[k] = v
}
// remove items from missing list as found
for wantedKey := range t.Parameters {
for _, param := range params {
if param.ParameterKey == nil {
continue
}
// phew found it
if *param.ParameterKey == wantedKey {
delete(missing, wantedKey)
break
}
}
}
// if any left, then we have an issue
if len(missing) > 0 {
keys := []string{}
for k := range missing {
keys = append(keys, k)
}
keysCSV := strings.Join(keys, ",")
return fmt.Errorf("missing required input parameters: [%s]", keysCSV)
}
return nil
}
// create initiates a cloudformation create passing in the given params
func (r *Client) create(ctx context.Context, stack Stack, params ...*Parameter) error {
// fetch and validate template
t, err := stack.GetStackTemplate()
if err != nil {
return err
}
err = r.validateTemplateParams(t, params)
if err != nil {
return err
}
yaml, err := t.YAML()
if err != nil {
return err
}
stackPolicy, err := getStackPolicy(stack)
if err != nil {
return err
}
_, err = r.Client.CreateStackWithContext(ctx, &CreateStackInput{
Capabilities: capabilities,
TemplateBody: aws.String(string(yaml)),
StackName: aws.String(stack.GetStackName()),
StackPolicyBody: stackPolicy,
Parameters: params,
})
if err != nil {
return err
}
return nil
}
// Update the stack and wait for update to complete.
func (r *Client) update(ctx context.Context, stack Stack, params ...*Parameter) error {
// fetch and validate template params
t, err := stack.GetStackTemplate()
if err != nil {
return err
}
err = r.validateTemplateParams(t, params)
if err != nil {
return err
}
yaml, err := t.YAML()
if err != nil {
return err
}
stackPolicy, err := getStackPolicy(stack)
if err != nil {
return err
}
_, err = r.Client.UpdateStackWithContext(ctx, &UpdateStackInput{
Capabilities: capabilities,
TemplateBody: aws.String(string(yaml)),
StackName: aws.String(stack.GetStackName()),
StackPolicyBody: stackPolicy,
Parameters: params,
})
if err != nil && !IsNoUpdateError(err) {
return err
}
return nil
}
// Destroy will attempt to deprovision the cloudformation stack and block until complete or the ctx Deadline expires
// Calls should be retried if DeadlineExceeded errors are hit
// Will update stack with current status
func (r *Client) Destroy(ctx context.Context, stack Stack) error {
// always update stack status
defer r.updateStatus(stack)
// fetch current state
state, err := r.get(ctx, stack)
if err == ErrStackNotFound {
// resource is already deleted (or never existsed)
// so we're done here
return nil
} else if err != nil {
// failed to get stack status
return err
}
if *state.StackStatus == DeleteComplete {
// resource already deleted
return nil
}
// trigger a delete unless we're already in a deleting state
if *state.StackStatus != DeleteInProgress {
_, err := r.Client.DeleteStackWithContext(ctx, &DeleteStackInput{
StackName: aws.String(stack.GetStackName()),
})
if err != nil {
return err
}
}
_, err = r.waitUntilDestroyedState(ctx, stack)
if err != nil {
return err
}
return nil
}
// Outputs fetches the cloudformation outputs for the given stack
// Returns ErrStackNotFound if stack does not exist
func (r *Client) | (ctx context.Context, stack Stack) (Outputs, error) {
state, err := r.get(ctx, stack)
if err != nil {
return nil, err
}
return r.resolveOutputs(ctx, state.Outputs)
}
// resolveOutputs returns cloudformation outputs in a map format and resolves
// any values that are stored in AWS Secrets Manager
func (r *Client) resolveOutputs(ctx context.Context, list []*Output) (Outputs, error) {
outputs := Outputs{}
for _, item := range list {
if item.OutputKey == nil || item.OutputValue == nil {
continue
}
key := *item.OutputKey
value := *item.OutputValue
// we automatically resolve references to AWS Secrets Manager
// secrets here, so that we are able to make use of encrypted
// sensitive values in cloudformation templates
if strings.HasPrefix(value, "{{resolve:secretsmanager:") {
// extract ARN and key name from reference
secretARNMatcher := regexp.MustCompile(`{{resolve:secretsmanager:(.*):SecretString:(.*)}}`)
matches := secretARNMatcher.FindStringSubmatch(value)
if len(matches) == 0 {
return nil, fmt.Errorf("failed to extract ARN and key name from secretsmanager value: %s", secretARNMatcher)
}
arn := matches[1]
subkey := matches[2]
v, err := r.Client.GetSecretValueWithContext(ctx, &GetSecretValueInput{
SecretId: aws.String(arn),
})
if err != nil {
return nil, err
}
if v.SecretString == nil {
return nil, fmt.Errorf("unexpected nil value in SecretString of %s", arn)
}
secrets := map[string]interface{}{}
err = json.Unmarshal([]byte(*v.SecretString), &secrets)
if err != nil {
return nil, err
}
subval, haveSub | Outputs | identifier_name |
client.go | return nil, err
}
}
_, err = r.waitUntilCompleteState(ctx, stack)
if err != nil {
return nil, err
}
err = r.update(ctx, stack, params...)
if err != nil {
return nil, err
}
state, err := r.waitUntilCompleteState(ctx, stack)
if err != nil {
return nil, err
}
return r.resolveOutputs(ctx, state.Outputs)
}
// validateParams checks for any unset template parameters
func (r *Client) validateTemplateParams(t *Template, params []*Parameter) error {
missing := map[string]interface{}{}
// copy all wanted params into missing
for k, v := range t.Parameters {
missing[k] = v
}
// remove items from missing list as found
for wantedKey := range t.Parameters {
for _, param := range params {
if param.ParameterKey == nil {
continue
}
// phew found it
if *param.ParameterKey == wantedKey {
delete(missing, wantedKey)
break
}
}
}
// if any left, then we have an issue
if len(missing) > 0 {
keys := []string{}
for k := range missing {
keys = append(keys, k)
}
keysCSV := strings.Join(keys, ",")
return fmt.Errorf("missing required input parameters: [%s]", keysCSV)
}
return nil
}
// create initiates a cloudformation create passing in the given params
func (r *Client) create(ctx context.Context, stack Stack, params ...*Parameter) error {
// fetch and validate template
t, err := stack.GetStackTemplate()
if err != nil {
return err
}
err = r.validateTemplateParams(t, params)
if err != nil {
return err
}
yaml, err := t.YAML()
if err != nil {
return err
}
stackPolicy, err := getStackPolicy(stack)
if err != nil {
return err
}
_, err = r.Client.CreateStackWithContext(ctx, &CreateStackInput{
Capabilities: capabilities,
TemplateBody: aws.String(string(yaml)),
StackName: aws.String(stack.GetStackName()),
StackPolicyBody: stackPolicy,
Parameters: params,
})
if err != nil {
return err
}
return nil
}
// Update the stack and wait for update to complete.
func (r *Client) update(ctx context.Context, stack Stack, params ...*Parameter) error {
// fetch and validate template params
t, err := stack.GetStackTemplate()
if err != nil {
return err
}
err = r.validateTemplateParams(t, params)
if err != nil {
return err
}
yaml, err := t.YAML()
if err != nil {
return err
}
stackPolicy, err := getStackPolicy(stack)
if err != nil {
return err
}
_, err = r.Client.UpdateStackWithContext(ctx, &UpdateStackInput{
Capabilities: capabilities,
TemplateBody: aws.String(string(yaml)),
StackName: aws.String(stack.GetStackName()),
StackPolicyBody: stackPolicy,
Parameters: params,
})
if err != nil && !IsNoUpdateError(err) {
return err
}
return nil
}
// Destroy will attempt to deprovision the cloudformation stack and block until complete or the ctx Deadline expires
// Calls should be retried if DeadlineExceeded errors are hit
// Will update stack with current status
func (r *Client) Destroy(ctx context.Context, stack Stack) error {
// always update stack status
defer r.updateStatus(stack)
// fetch current state
state, err := r.get(ctx, stack)
if err == ErrStackNotFound {
// resource is already deleted (or never existsed)
// so we're done here
return nil
} else if err != nil {
// failed to get stack status
return err
}
if *state.StackStatus == DeleteComplete {
// resource already deleted
return nil
}
// trigger a delete unless we're already in a deleting state
if *state.StackStatus != DeleteInProgress {
_, err := r.Client.DeleteStackWithContext(ctx, &DeleteStackInput{
StackName: aws.String(stack.GetStackName()),
})
if err != nil {
return err
}
}
_, err = r.waitUntilDestroyedState(ctx, stack)
if err != nil {
return err
}
return nil
}
// Outputs fetches the cloudformation outputs for the given stack
// Returns ErrStackNotFound if stack does not exist
func (r *Client) Outputs(ctx context.Context, stack Stack) (Outputs, error) {
state, err := r.get(ctx, stack)
if err != nil {
return nil, err
}
return r.resolveOutputs(ctx, state.Outputs)
}
// resolveOutputs returns cloudformation outputs in a map format and resolves
// any values that are stored in AWS Secrets Manager
func (r *Client) resolveOutputs(ctx context.Context, list []*Output) (Outputs, error) {
outputs := Outputs{}
for _, item := range list {
if item.OutputKey == nil || item.OutputValue == nil {
continue
}
key := *item.OutputKey
value := *item.OutputValue
// we automatically resolve references to AWS Secrets Manager
// secrets here, so that we are able to make use of encrypted
// sensitive values in cloudformation templates
if strings.HasPrefix(value, "{{resolve:secretsmanager:") {
// extract ARN and key name from reference
secretARNMatcher := regexp.MustCompile(`{{resolve:secretsmanager:(.*):SecretString:(.*)}}`)
matches := secretARNMatcher.FindStringSubmatch(value)
if len(matches) == 0 {
return nil, fmt.Errorf("failed to extract ARN and key name from secretsmanager value: %s", secretARNMatcher)
}
arn := matches[1]
subkey := matches[2]
v, err := r.Client.GetSecretValueWithContext(ctx, &GetSecretValueInput{
SecretId: aws.String(arn),
})
if err != nil {
return nil, err
}
if v.SecretString == nil {
return nil, fmt.Errorf("unexpected nil value in SecretString of %s", arn)
}
secrets := map[string]interface{}{}
err = json.Unmarshal([]byte(*v.SecretString), &secrets)
if err != nil {
return nil, err
}
subval, haveSubkey := secrets[subkey]
if !haveSubkey {
return nil, fmt.Errorf("could not find subkey %s in SecretString of %s", subkey, arn)
}
subvalString, ok := subval.(string)
if !ok {
return nil, fmt.Errorf("subval at subkey %s in SecretString of %s is not a string", subkey, arn)
}
value = subvalString
}
outputs[key] = value
}
return outputs, nil
}
// get fetches the cloudformation stack state
// Returns ErrStackNotFound if stack does not exist
func (r *Client) get(ctx context.Context, stack Stack) (*State, error) {
describeOutput, err := r.Client.DescribeStacksWithContext(ctx, &DescribeStacksInput{
StackName: aws.String(stack.GetStackName()),
})
if err != nil {
if IsNotFoundError(err) {
return nil, ErrStackNotFound
}
return nil, err
}
if describeOutput == nil {
return nil, fmt.Errorf("describeOutput was nil, potential issue with AWS Client")
}
if len(describeOutput.Stacks) == 0 {
return nil, fmt.Errorf("describeOutput contained no Stacks, potential issue with AWS Client")
}
if len(describeOutput.Stacks) > 1 {
return nil, fmt.Errorf("describeOutput contained multiple Stacks which is unexpected when calling with StackName, potential issue with AWS Client")
}
state := describeOutput.Stacks[0]
if state.StackStatus == nil {
return nil, fmt.Errorf("describeOutput contained a nil StackStatus, potential issue with AWS Client")
}
return state, nil
}
// update mutates the stack's status with current state, events and any
// whitelisted outputs. ignores any errors encountered and just updates
// whatever it can with the intension of getting as much info visible as
// possible even under error conditions.
func (r *Client) updateStatus(stack Stack) | {
// use a fresh context or we might not be able to update status after
// deadline is hit, but this feels a little wrong
ctx := context.Background()
state, _ := r.get(ctx, stack)
events, _ := r.events(ctx, stack)
s := stack.GetStatus()
// update aws specific state
if state != nil {
if state.StackId != nil {
s.AWS.ID = *state.StackId
}
if state.StackName != nil {
s.AWS.Name = *state.StackName
}
if state.StackStatus != nil {
s.AWS.Status = *state.StackStatus
}
if state.StackStatusReason != nil {
s.AWS.Reason = *state.StackStatusReason | identifier_body | |
client.go |
var Ref = goformation.Ref
var Sub = goformation.Sub
const CreateInProgress = cloudformation.StackStatusCreateInProgress
const DeleteInProgress = cloudformation.StackStatusDeleteInProgress
const UpdateInProgress = cloudformation.StackStatusUpdateInProgress
const ReviewInProgress = cloudformation.StackStatusReviewInProgress
const CreateComplete = cloudformation.StackStatusCreateComplete
const DeleteComplete = cloudformation.StackStatusDeleteComplete
const UpdateComplete = cloudformation.StackStatusUpdateComplete
const CreateFailed = cloudformation.StackStatusCreateFailed
const DeleteFailed = cloudformation.StackStatusDeleteFailed
const RollbackFailed = cloudformation.StackStatusRollbackFailed
const RollbackInProgress = cloudformation.StackStatusRollbackInProgress
const UpdateRollbackFailed = cloudformation.StackStatusUpdateRollbackFailed
const RollbackComplete = cloudformation.StackStatusRollbackComplete
const UpdateRollbackInProgress = cloudformation.StackStatusRollbackInProgress
const UpdateRollbackComplete = cloudformation.StackStatusUpdateRollbackComplete
const UpdateRollbackCompleteCleanupInProgress = cloudformation.StackStatusUpdateRollbackCompleteCleanupInProgress
var (
// capabilities required by cloudformation
capabilities = []*string{
aws.String("CAPABILITY_NAMED_IAM"),
}
// ErrStackNotFound returned when stack does not exist, or has been deleted
ErrStackNotFound = fmt.Errorf("STACK_NOT_FOUND")
// NoUpdatesErrMatch is string to match in error from aws to detect if nothing to update
NoUpdatesErrMatch = "No updates"
// NoExistErrMatch is a string to match if stack does not exist
NoExistErrMatch = "does not exist"
)
// Outputs is used as a more friendly version of cloudformation.Output
type Outputs map[string]string
// Client performs cloudformation operations on objects that implement the Stack interface
type Client struct {
// ClusterName is used to prefix any generated names to avoid clashes
ClusterName string
// Client is the AWS SDK Client implementation to use
Client sdk.Client
// PollingInterval is the duration between calls to check state when waiting for apply/destroy to complete
PollingInterval time.Duration
}
// Apply reconciles the state of the remote cloudformation stack and blocks
// until the stack is no longer in an creating/applying state or a ctx timeout is hit
// Calls should be retried if DeadlineExceeded errors are hit
// Returns any outputs on successful apply.
// Will update stack with current status
func (r *Client) Apply(ctx context.Context, stack Stack, params ...*Parameter) (Outputs, error) {
// always update stack status
defer r.updateStatus(stack)
// check if exists
exists, err := r.exists(ctx, stack)
if err != nil {
return nil, err
}
if !exists {
err := r.create(ctx, stack, params...)
if err != nil {
return nil, err
}
}
_, err = r.waitUntilCompleteState(ctx, stack)
if err != nil {
return nil, err
}
err = r.update(ctx, stack, params...)
if err != nil {
return nil, err
}
state, err := r.waitUntilCompleteState(ctx, stack)
if err != nil {
return nil, err
}
return r.resolveOutputs(ctx, state.Outputs)
}
// validateParams checks for any unset template parameters
func (r *Client) validateTemplateParams(t *Template, params []*Parameter) error {
missing := map[string]interface{}{}
// copy all wanted params into missing
for k, v := range t.Parameters {
missing[k] = v
}
// remove items from missing list as found
for wantedKey := range t.Parameters {
for _, param := range params {
if param.ParameterKey == nil {
continue
}
// phew found it
if *param.ParameterKey == wantedKey {
delete(missing, wantedKey)
break
}
}
}
// if any left, then we have an issue
if len(missing) > 0 {
keys := []string{}
for k := range missing {
keys = append(keys, k)
}
keysCSV := strings.Join(keys, ",")
return fmt.Errorf("missing required input parameters: [%s]", keysCSV)
}
return nil
}
// create initiates a cloudformation create passing in the given params
func (r *Client) create(ctx context.Context, stack Stack, params ...*Parameter) error {
// fetch and validate template
t, err := stack.GetStackTemplate()
if err != nil {
return err
}
err = r.validateTemplateParams(t, params)
if err != nil {
return err
}
yaml, err := t.YAML()
if err != nil {
return err
}
stackPolicy, err := getStackPolicy(stack)
if err != nil {
return err
}
_, err = r.Client.CreateStackWithContext(ctx, &CreateStackInput{
Capabilities: capabilities,
TemplateBody: aws.String(string(yaml)),
StackName: aws.String(stack.GetStackName()),
StackPolicyBody: stackPolicy,
Parameters: params,
})
if err != nil {
return err
}
return nil
}
// Update the stack and wait for update to complete.
func (r *Client) update(ctx context.Context, stack Stack, params ...*Parameter) error {
// fetch and validate template params
t, err := stack.GetStackTemplate()
if err != nil {
return err
}
err = r.validateTemplateParams(t, params)
if err != nil {
return err
}
yaml, err := t.YAML()
if err != nil {
return err
}
stackPolicy, err := getStackPolicy(stack)
if err != nil {
return err
}
_, err = r.Client.UpdateStackWithContext(ctx, &UpdateStackInput{
Capabilities: capabilities,
TemplateBody: aws.String(string(yaml)),
StackName: aws.String(stack.GetStackName()),
StackPolicyBody: stackPolicy,
Parameters: params,
})
if err != nil && !IsNoUpdateError(err) {
return err
}
return nil
}
// Destroy will attempt to deprovision the cloudformation stack and block until complete or the ctx Deadline expires
// Calls should be retried if DeadlineExceeded errors are hit
// Will update stack with current status
func (r *Client) Destroy(ctx context.Context, stack Stack) error {
// always update stack status
defer r.updateStatus(stack)
// fetch current state
state, err := r.get(ctx, stack)
if err == ErrStackNotFound {
// resource is already deleted (or never existsed)
// so we're done here
return nil
} else if err != nil {
// failed to get stack status
return err
}
if *state.StackStatus == DeleteComplete {
// resource already deleted
return nil
}
// trigger a delete unless we're already in a deleting state
if *state.StackStatus != DeleteInProgress {
_, err := r.Client.DeleteStackWithContext(ctx, &DeleteStackInput{
StackName: aws.String(stack.GetStackName()),
})
if err != nil {
return err
}
}
_, err = r.waitUntilDestroyedState(ctx, stack)
if err != nil {
return err
}
return nil
}
// Outputs fetches the cloudformation outputs for the given stack
// Returns ErrStackNotFound if stack does not exist
func (r *Client) Outputs(ctx context.Context, stack Stack) (Outputs, error) {
state, err := r.get(ctx, stack)
if err != nil {
return nil, err
}
return r.resolveOutputs(ctx, state.Outputs)
}
// resolveOutputs returns cloudformation outputs in a map format and resolves
// any values that are stored in AWS Secrets Manager
func (r *Client) resolveOutputs(ctx context.Context, list []*Output) (Outputs, error) {
outputs := Outputs{}
for _, item := range list {
if item.OutputKey == nil || item.OutputValue == nil {
continue
}
key := *item.OutputKey
value := *item.OutputValue
// we automatically resolve references to AWS Secrets Manager
// secrets here, so that we are able to make use of encrypted
// sensitive values in cloudformation templates
if strings.HasPrefix(value, "{{resolve:secretsmanager:") {
// extract ARN and key name from reference
secretARNMatcher := regexp.MustCompile(`{{resolve:secretsmanager:(.*):SecretString:(.*)}}`)
matches := secretARNMatcher.FindStringSubmatch(value)
if len(matches) == 0 {
return nil, fmt.Errorf("failed to extract ARN and key name from secretsmanager value: %s", secretARNMatcher)
}
arn := matches[1]
subkey := matches[2]
v, err := r.Client.GetSecretValueWithContext(ctx, &GetSecretValueInput{
SecretId: aws.String(arn),
})
if err != nil {
return nil, err
}
if v.SecretString == nil { | if err != nil {
return nil, err
}
subval, haveSubkey | return nil, fmt.Errorf("unexpected nil value in SecretString of %s", arn)
}
secrets := map[string]interface{}{}
err = json.Unmarshal([]byte(*v.SecretString), &secrets) | random_line_split |
runtime.rs | dpListen`]) use this to make sure they have a runtime to handle the sockets
/// on.
///
/// If you prefer to specify configuration of the runtime to use, instead of the default one, you
/// can create an instance of this extension yourself and register it *before registering any socket
/// pipelines*, which will take precedence and the sockets will use the one provided by you. You
/// must register it using the [`with_singleton`] method.
///
/// Similarly, if all the pipelines are registered within the [`run`] method (or generally, after
/// building is done), you need to install this manually *before* doing [`run`].
///
/// Note that the provided closures are `FnMut` mostly because `Box<FnOnce>` doesn't work. They
/// will be called just once, so you can use `Option<T>` inside and consume the value by
/// `take.unwrap()`.
///
/// # Runtime configuration
///
/// You may have noticed the callbacks here don't have access to configuration. If you intend to
/// configure eg. the number of threads from user configuration, use the [`ThreadPoolConfig`]
/// instead.
///
/// # Future compatibility
///
/// More variants may be added into the enum at any time. Such change will not be considered a
/// breaking change.
///
/// # Examples
///
/// ```
/// extern crate failure;
/// extern crate serde;
/// #[macro_use]
/// extern crate serde_derive;
/// extern crate spirit;
/// extern crate spirit_tokio;
/// extern crate tokio;
///
/// use std::sync::Arc;
///
/// use failure::Error;
/// use spirit::prelude::*;
/// use spirit_tokio::{HandleListener, TcpListen};
/// use spirit_tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// #[derive(Default, Deserialize)]
/// struct Config {
/// #[serde(default)]
/// listening_socket: Vec<TcpListen>,
/// }
///
/// impl Config {
/// fn listener(&self) -> Vec<TcpListen> {
/// self.listening_socket.clone()
/// }
/// }
///
/// fn connection() -> impl Future<Item = (), Error = Error> {
/// future::ok(()) // Just a dummy implementation
/// }
///
/// fn main() {
/// Spirit::<Empty, Config>::new()
/// // Uses the current thread runtime instead of the default threadpool. This'll create
/// // smaller number of threads.
/// .with_singleton(Runtime::CurrentThread(Box::new(|_| ())))
/// .with(
/// Pipeline::new("listener")
/// .extract_cfg(Config::listener)
/// .transform(HandleListener(|_conn, _cfg: &_| connection()))
/// )
/// .run(|spirit| {
/// # let spirit = Arc::clone(spirit);
/// # std::thread::spawn(move || spirit.terminate());
/// Ok(())
/// });
/// }
/// ```
///
/// [`TcpListen`]: crate::TcpListen
/// [`UdpListen`]: crate::UdpListen
/// [`FutureInstaller`]: crate::installer::FutureInstaller
/// [`Fragment`]: spirit::Fragment
/// [`run`]: spirit::SpiritBuilder::run
/// [`with_singleton`]: spirit::extension::Extension::with_singleton
pub enum | {
/// Use the threadpool runtime.
///
/// The threadpool runtime is the default (both in tokio and spirit).
///
/// This allows you to modify the builder prior to starting it, specifying custom options like
/// number of threads.
ThreadPool(Box<dyn FnMut(&mut runtime::Builder) + Send>),
/// Use the current thread runtime.
///
/// If you prefer to run everything in a single thread, use this variant. The provided closure
/// can modify the builder prior to starting it.
CurrentThread(Box<dyn FnMut(&mut runtime::current_thread::Builder) + Send>),
/// Use completely custom runtime.
///
/// The provided closure should start the runtime and execute the provided future on it,
/// blocking until the runtime becomes empty.
///
/// This allows combining arbitrary runtimes that are not directly supported by either tokio or
/// spirit.
Custom(Box<dyn FnMut(TokioBody) -> Result<(), Error> + Send>),
#[doc(hidden)]
__NonExhaustive__,
// TODO: Support loading this from configuration? But it won't be possible to modify at
// runtime, will it?
}
impl Default for Runtime {
fn default() -> Self {
Runtime::ThreadPool(Box::new(|_| {}))
}
}
impl Runtime {
fn execute<O, C>(self, spirit: &Arc<Spirit<O, C>>, inner: InnerBody) -> Result<(), Error>
where
C: DeserializeOwned + Send + Sync + 'static,
O: StructOpt + Send + Sync + 'static,
{
let spirit = Arc::clone(spirit);
let fut = future::lazy(move || {
inner.run().map_err(move |e| {
spirit.terminate();
e
})
});
match self {
Runtime::ThreadPool(mut mod_builder) => {
let mut builder = runtime::Builder::new();
mod_builder(&mut builder);
let mut runtime = builder.build()?;
runtime.block_on(fut)?;
runtime.block_on_all(future::lazy(|| Ok(())))
}
Runtime::CurrentThread(mut mod_builder) => {
let mut builder = runtime::current_thread::Builder::new();
mod_builder(&mut builder);
let mut runtime = builder.build()?;
runtime.block_on(fut)?;
runtime.run().map_err(Error::from)
}
Runtime::Custom(mut callback) => callback(Box::new(fut)),
Runtime::__NonExhaustive__ => unreachable!(),
}
}
}
impl<E> Extension<E> for Runtime
where
E: Extensible<Ok = E>,
E::Config: DeserializeOwned + Send + Sync + 'static,
E::Opts: StructOpt + Send + Sync + 'static,
{
fn apply(self, ext: E) -> Result<E, Error> {
trace!("Wrapping in tokio runtime");
ext.run_around(|spirit, inner| self.execute(spirit, inner))
}
}
/// A configuration extension for the Tokio Threadpool runtime.
///
/// Using the [`extension`][ThreadPoolConfig::extension] or the
/// [`postprocess_extension`][ThreadPoolConfig::postprocess_extension] provides the [`Runtime`] to
/// the spirit application. However, this allows reading the parameters of the threadpool (mostly
/// number of threads) from the configuration instead of hardcoding it into the application.
///
/// # Panics
///
/// If this is inserted after something already registered a [`Runtime`].
///
/// # Examples
///
/// ```rust
/// use serde::Deserialize;
/// use spirit::prelude::*;
/// use spirit_tokio::runtime::ThreadPoolConfig;
///
/// #[derive(Debug, Default, Deserialize)]
/// struct Cfg {
/// #[serde(default)] // Allow empty configuration with default runtime
/// threadpool: ThreadPoolConfig,
/// }
///
/// impl Cfg {
/// fn threadpool(&self) -> ThreadPoolConfig {
/// self.threadpool.clone()
/// }
/// }
///
/// fn main() {
/// Spirit::<Empty, Cfg>::new()
/// .with(ThreadPoolConfig::extension(Cfg::threadpool))
/// .run(|_| {
/// // This runs inside a configured runtime
/// Ok(())
/// });
/// }
/// ```
#[derive(
Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize, StructDoc, Ord, PartialOrd, Hash,
)]
#[serde(rename_all = "kebab-case")]
pub struct ThreadPoolConfig {
/// Maximum number of asynchronous worker threads.
///
/// These do most of the work. There's little reason to set it to more than number of CPUs, but
/// it may make sense to set it lower.
///
/// If not set, the application will start with number of CPUs available in the system.
#[serde(skip_serializing_if = "Option::is_none")]
pub async_threads: Option<usize>,
/// Maximum number of blocking worker threads.
///
/// These do tasks that take longer time. This includes file IO and CPU intensive tasks.
///
/// If not set, defaults to 100.
///
/// Often, the application doesn't start these threads as they might not always be needed.
#[serde(skip_serializing_if = "Option::is_none")]
pub blocking_threads: Option<usize>,
#[serde(
skip_serializing_if = "Option::is_none",
serialize_with = "spirit::utils::serialize_opt_duration",
deserialize_with = "spirit::utils::deserialize_opt_duration",
default
)]
/// How long to keep an idle thread around.
///
/// A thread will be shut down if it sits around idle for this long. The default (unset) is
/// never to shut it down.
///
/// Accepts human-parsable times, like „3days“ or „5s“.
pub keep_alive: Option<Duration>,
#[serde(skip)]
_sentinel: (),
}
impl ThreadPoolConfig {
/// The extension to be plugged in with [`with`].
///
/// See the [example](#examples).
///
/// [`with`]: spirit::extension::Extension::with
pub fn extension<O, C, F>(extract: F) -> impl Extension<Builder<O, C>>
where
F: Fn(&C) -> Self + Clone + Send + Sync + 'static,
O: Debug + StructOpt + | Runtime | identifier_name |
runtime.rs | dpListen`]) use this to make sure they have a runtime to handle the sockets
/// on.
///
/// If you prefer to specify configuration of the runtime to use, instead of the default one, you
/// can create an instance of this extension yourself and register it *before registering any socket
/// pipelines*, which will take precedence and the sockets will use the one provided by you. You
/// must register it using the [`with_singleton`] method.
///
/// Similarly, if all the pipelines are registered within the [`run`] method (or generally, after
/// building is done), you need to install this manually *before* doing [`run`].
///
/// Note that the provided closures are `FnMut` mostly because `Box<FnOnce>` doesn't work. They
/// will be called just once, so you can use `Option<T>` inside and consume the value by
/// `take.unwrap()`.
///
/// # Runtime configuration
///
/// You may have noticed the callbacks here don't have access to configuration. If you intend to
/// configure eg. the number of threads from user configuration, use the [`ThreadPoolConfig`]
/// instead.
///
/// # Future compatibility
///
/// More variants may be added into the enum at any time. Such change will not be considered a
/// breaking change.
///
/// # Examples
///
/// ```
/// extern crate failure;
/// extern crate serde;
/// #[macro_use]
/// extern crate serde_derive;
/// extern crate spirit;
/// extern crate spirit_tokio;
/// extern crate tokio;
///
/// use std::sync::Arc;
///
/// use failure::Error;
/// use spirit::prelude::*;
/// use spirit_tokio::{HandleListener, TcpListen};
/// use spirit_tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// #[derive(Default, Deserialize)]
/// struct Config {
/// #[serde(default)]
/// listening_socket: Vec<TcpListen>,
/// }
///
/// impl Config {
/// fn listener(&self) -> Vec<TcpListen> {
/// self.listening_socket.clone()
/// }
/// }
///
/// fn connection() -> impl Future<Item = (), Error = Error> {
/// future::ok(()) // Just a dummy implementation
/// }
///
/// fn main() {
/// Spirit::<Empty, Config>::new()
/// // Uses the current thread runtime instead of the default threadpool. This'll create
/// // smaller number of threads.
/// .with_singleton(Runtime::CurrentThread(Box::new(|_| ())))
/// .with(
/// Pipeline::new("listener")
/// .extract_cfg(Config::listener)
/// .transform(HandleListener(|_conn, _cfg: &_| connection()))
/// )
/// .run(|spirit| {
/// # let spirit = Arc::clone(spirit);
/// # std::thread::spawn(move || spirit.terminate());
/// Ok(())
/// });
/// }
/// ```
///
/// [`TcpListen`]: crate::TcpListen
/// [`UdpListen`]: crate::UdpListen
/// [`FutureInstaller`]: crate::installer::FutureInstaller
/// [`Fragment`]: spirit::Fragment
/// [`run`]: spirit::SpiritBuilder::run
/// [`with_singleton`]: spirit::extension::Extension::with_singleton
pub enum Runtime {
/// Use the threadpool runtime.
///
/// The threadpool runtime is the default (both in tokio and spirit).
///
/// This allows you to modify the builder prior to starting it, specifying custom options like
/// number of threads.
ThreadPool(Box<dyn FnMut(&mut runtime::Builder) + Send>),
/// Use the current thread runtime.
///
/// If you prefer to run everything in a single thread, use this variant. The provided closure
/// can modify the builder prior to starting it.
CurrentThread(Box<dyn FnMut(&mut runtime::current_thread::Builder) + Send>),
/// Use completely custom runtime.
///
/// The provided closure should start the runtime and execute the provided future on it,
/// blocking until the runtime becomes empty.
///
/// This allows combining arbitrary runtimes that are not directly supported by either tokio or
/// spirit.
Custom(Box<dyn FnMut(TokioBody) -> Result<(), Error> + Send>),
#[doc(hidden)]
__NonExhaustive__,
// TODO: Support loading this from configuration? But it won't be possible to modify at
// runtime, will it?
}
impl Default for Runtime {
fn default() -> Self {
Runtime::ThreadPool(Box::new(|_| {}))
}
}
impl Runtime {
fn execute<O, C>(self, spirit: &Arc<Spirit<O, C>>, inner: InnerBody) -> Result<(), Error>
where
C: DeserializeOwned + Send + Sync + 'static,
O: StructOpt + Send + Sync + 'static,
| runtime.block_on(fut)?;
runtime.run().map_err(Error::from)
}
Runtime::Custom(mut callback) => callback(Box::new(fut)),
Runtime::__NonExhaustive__ => unreachable!(),
}
}
}
impl<E> Extension<E> for Runtime
where
E: Extensible<Ok = E>,
E::Config: DeserializeOwned + Send + Sync + 'static,
E::Opts: StructOpt + Send + Sync + 'static,
{
fn apply(self, ext: E) -> Result<E, Error> {
trace!("Wrapping in tokio runtime");
ext.run_around(|spirit, inner| self.execute(spirit, inner))
}
}
/// A configuration extension for the Tokio Threadpool runtime.
///
/// Using the [`extension`][ThreadPoolConfig::extension] or the
/// [`postprocess_extension`][ThreadPoolConfig::postprocess_extension] provides the [`Runtime`] to
/// the spirit application. However, this allows reading the parameters of the threadpool (mostly
/// number of threads) from the configuration instead of hardcoding it into the application.
///
/// # Panics
///
/// If this is inserted after something already registered a [`Runtime`].
///
/// # Examples
///
/// ```rust
/// use serde::Deserialize;
/// use spirit::prelude::*;
/// use spirit_tokio::runtime::ThreadPoolConfig;
///
/// #[derive(Debug, Default, Deserialize)]
/// struct Cfg {
/// #[serde(default)] // Allow empty configuration with default runtime
/// threadpool: ThreadPoolConfig,
/// }
///
/// impl Cfg {
/// fn threadpool(&self) -> ThreadPoolConfig {
/// self.threadpool.clone()
/// }
/// }
///
/// fn main() {
/// Spirit::<Empty, Cfg>::new()
/// .with(ThreadPoolConfig::extension(Cfg::threadpool))
/// .run(|_| {
/// // This runs inside a configured runtime
/// Ok(())
/// });
/// }
/// ```
#[derive(
Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize, StructDoc, Ord, PartialOrd, Hash,
)]
#[serde(rename_all = "kebab-case")]
pub struct ThreadPoolConfig {
/// Maximum number of asynchronous worker threads.
///
/// These do most of the work. There's little reason to set it to more than number of CPUs, but
/// it may make sense to set it lower.
///
/// If not set, the application will start with number of CPUs available in the system.
#[serde(skip_serializing_if = "Option::is_none")]
pub async_threads: Option<usize>,
/// Maximum number of blocking worker threads.
///
/// These do tasks that take longer time. This includes file IO and CPU intensive tasks.
///
/// If not set, defaults to 100.
///
/// Often, the application doesn't start these threads as they might not always be needed.
#[serde(skip_serializing_if = "Option::is_none")]
pub blocking_threads: Option<usize>,
#[serde(
skip_serializing_if = "Option::is_none",
serialize_with = "spirit::utils::serialize_opt_duration",
deserialize_with = "spirit::utils::deserialize_opt_duration",
default
)]
/// How long to keep an idle thread around.
///
/// A thread will be shut down if it sits around idle for this long. The default (unset) is
/// never to shut it down.
///
/// Accepts human-parsable times, like „3days“ or „5s“.
pub keep_alive: Option<Duration>,
#[serde(skip)]
_sentinel: (),
}
impl ThreadPoolConfig {
/// The extension to be plugged in with [`with`].
///
/// See the [example](#examples).
///
/// [`with`]: spirit::extension::Extension::with
pub fn extension<O, C, F>(extract: F) -> impl Extension<Builder<O, C>>
where
F: Fn(&C) -> Self + Clone + Send + Sync + 'static,
O: Debug + StructOpt + | {
let spirit = Arc::clone(spirit);
let fut = future::lazy(move || {
inner.run().map_err(move |e| {
spirit.terminate();
e
})
});
match self {
Runtime::ThreadPool(mut mod_builder) => {
let mut builder = runtime::Builder::new();
mod_builder(&mut builder);
let mut runtime = builder.build()?;
runtime.block_on(fut)?;
runtime.block_on_all(future::lazy(|| Ok(())))
}
Runtime::CurrentThread(mut mod_builder) => {
let mut builder = runtime::current_thread::Builder::new();
mod_builder(&mut builder);
let mut runtime = builder.build()?; | identifier_body |
runtime.rs | dpListen`]) use this to make sure they have a runtime to handle the sockets
/// on.
///
/// If you prefer to specify configuration of the runtime to use, instead of the default one, you
/// can create an instance of this extension yourself and register it *before registering any socket
/// pipelines*, which will take precedence and the sockets will use the one provided by you. You
/// must register it using the [`with_singleton`] method.
///
/// Similarly, if all the pipelines are registered within the [`run`] method (or generally, after
/// building is done), you need to install this manually *before* doing [`run`].
///
/// Note that the provided closures are `FnMut` mostly because `Box<FnOnce>` doesn't work. They
/// will be called just once, so you can use `Option<T>` inside and consume the value by
/// `take.unwrap()`.
///
/// # Runtime configuration
///
/// You may have noticed the callbacks here don't have access to configuration. If you intend to
/// configure eg. the number of threads from user configuration, use the [`ThreadPoolConfig`]
/// instead.
///
/// # Future compatibility
///
/// More variants may be added into the enum at any time. Such change will not be considered a
/// breaking change.
///
/// # Examples
///
/// ```
/// extern crate failure;
/// extern crate serde;
/// #[macro_use]
/// extern crate serde_derive;
/// extern crate spirit;
/// extern crate spirit_tokio;
/// extern crate tokio;
///
/// use std::sync::Arc;
///
/// use failure::Error;
/// use spirit::prelude::*;
/// use spirit_tokio::{HandleListener, TcpListen};
/// use spirit_tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// #[derive(Default, Deserialize)]
/// struct Config {
/// #[serde(default)]
/// listening_socket: Vec<TcpListen>,
/// }
///
/// impl Config {
/// fn listener(&self) -> Vec<TcpListen> {
/// self.listening_socket.clone()
/// }
/// }
///
/// fn connection() -> impl Future<Item = (), Error = Error> {
/// future::ok(()) // Just a dummy implementation
/// }
///
/// fn main() {
/// Spirit::<Empty, Config>::new()
/// // Uses the current thread runtime instead of the default threadpool. This'll create
/// // smaller number of threads.
/// .with_singleton(Runtime::CurrentThread(Box::new(|_| ())))
/// .with(
/// Pipeline::new("listener")
/// .extract_cfg(Config::listener)
/// .transform(HandleListener(|_conn, _cfg: &_| connection()))
/// )
/// .run(|spirit| {
/// # let spirit = Arc::clone(spirit);
/// # std::thread::spawn(move || spirit.terminate());
/// Ok(())
/// });
/// }
/// ```
///
/// [`TcpListen`]: crate::TcpListen
/// [`UdpListen`]: crate::UdpListen
/// [`FutureInstaller`]: crate::installer::FutureInstaller
/// [`Fragment`]: spirit::Fragment
/// [`run`]: spirit::SpiritBuilder::run
/// [`with_singleton`]: spirit::extension::Extension::with_singleton
pub enum Runtime {
/// Use the threadpool runtime.
///
/// The threadpool runtime is the default (both in tokio and spirit).
///
/// This allows you to modify the builder prior to starting it, specifying custom options like
/// number of threads.
ThreadPool(Box<dyn FnMut(&mut runtime::Builder) + Send>),
/// Use the current thread runtime.
///
/// If you prefer to run everything in a single thread, use this variant. The provided closure
/// can modify the builder prior to starting it.
CurrentThread(Box<dyn FnMut(&mut runtime::current_thread::Builder) + Send>),
/// Use completely custom runtime.
///
/// The provided closure should start the runtime and execute the provided future on it,
/// blocking until the runtime becomes empty.
///
/// This allows combining arbitrary runtimes that are not directly supported by either tokio or
/// spirit.
Custom(Box<dyn FnMut(TokioBody) -> Result<(), Error> + Send>),
#[doc(hidden)]
__NonExhaustive__,
// TODO: Support loading this from configuration? But it won't be possible to modify at
// runtime, will it?
}
impl Default for Runtime {
fn default() -> Self {
Runtime::ThreadPool(Box::new(|_| {}))
}
}
impl Runtime {
fn execute<O, C>(self, spirit: &Arc<Spirit<O, C>>, inner: InnerBody) -> Result<(), Error>
where
C: DeserializeOwned + Send + Sync + 'static,
O: StructOpt + Send + Sync + 'static,
{
let spirit = Arc::clone(spirit);
let fut = future::lazy(move || {
inner.run().map_err(move |e| {
spirit.terminate();
e
})
});
match self {
Runtime::ThreadPool(mut mod_builder) => {
let mut builder = runtime::Builder::new();
mod_builder(&mut builder);
let mut runtime = builder.build()?;
runtime.block_on(fut)?;
runtime.block_on_all(future::lazy(|| Ok(())))
}
Runtime::CurrentThread(mut mod_builder) => {
let mut builder = runtime::current_thread::Builder::new();
mod_builder(&mut builder);
let mut runtime = builder.build()?;
runtime.block_on(fut)?;
runtime.run().map_err(Error::from)
}
Runtime::Custom(mut callback) => callback(Box::new(fut)),
Runtime::__NonExhaustive__ => unreachable!(),
}
}
}
impl<E> Extension<E> for Runtime
where
E: Extensible<Ok = E>,
E::Config: DeserializeOwned + Send + Sync + 'static,
E::Opts: StructOpt + Send + Sync + 'static,
{
fn apply(self, ext: E) -> Result<E, Error> {
trace!("Wrapping in tokio runtime");
ext.run_around(|spirit, inner| self.execute(spirit, inner))
}
}
/// A configuration extension for the Tokio Threadpool runtime.
///
/// Using the [`extension`][ThreadPoolConfig::extension] or the
/// [`postprocess_extension`][ThreadPoolConfig::postprocess_extension] provides the [`Runtime`] to
/// the spirit application. However, this allows reading the parameters of the threadpool (mostly
/// number of threads) from the configuration instead of hardcoding it into the application.
///
/// # Panics
///
/// If this is inserted after something already registered a [`Runtime`].
///
/// # Examples
///
/// ```rust
/// use serde::Deserialize;
/// use spirit::prelude::*;
/// use spirit_tokio::runtime::ThreadPoolConfig;
///
/// #[derive(Debug, Default, Deserialize)]
/// struct Cfg {
/// #[serde(default)] // Allow empty configuration with default runtime
/// threadpool: ThreadPoolConfig,
/// }
///
/// impl Cfg {
/// fn threadpool(&self) -> ThreadPoolConfig {
/// self.threadpool.clone()
/// }
/// }
///
/// fn main() {
/// Spirit::<Empty, Cfg>::new()
/// .with(ThreadPoolConfig::extension(Cfg::threadpool))
/// .run(|_| {
/// // This runs inside a configured runtime
/// Ok(())
/// });
/// }
/// ```
#[derive(
Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize, StructDoc, Ord, PartialOrd, Hash,
)]
#[serde(rename_all = "kebab-case")]
pub struct ThreadPoolConfig {
/// Maximum number of asynchronous worker threads.
///
/// These do most of the work. There's little reason to set it to more than number of CPUs, but |
/// Maximum number of blocking worker threads.
///
/// These do tasks that take longer time. This includes file IO and CPU intensive tasks.
///
/// If not set, defaults to 100.
///
/// Often, the application doesn't start these threads as they might not always be needed.
#[serde(skip_serializing_if = "Option::is_none")]
pub blocking_threads: Option<usize>,
#[serde(
skip_serializing_if = "Option::is_none",
serialize_with = "spirit::utils::serialize_opt_duration",
deserialize_with = "spirit::utils::deserialize_opt_duration",
default
)]
/// How long to keep an idle thread around.
///
/// A thread will be shut down if it sits around idle for this long. The default (unset) is
/// never to shut it down.
///
/// Accepts human-parsable times, like „3days“ or „5s“.
pub keep_alive: Option<Duration>,
#[serde(skip)]
_sentinel: (),
}
impl ThreadPoolConfig {
/// The extension to be plugged in with [`with`].
///
/// See the [example](#examples).
///
/// [`with`]: spirit::extension::Extension::with
pub fn extension<O, C, F>(extract: F) -> impl Extension<Builder<O, C>>
where
F: Fn(&C) -> Self + Clone + Send + Sync + 'static,
O: Debug + StructOpt + Send + | /// it may make sense to set it lower.
///
/// If not set, the application will start with number of CPUs available in the system.
#[serde(skip_serializing_if = "Option::is_none")]
pub async_threads: Option<usize>, | random_line_split |
init.rs | , Service>,
}
impl InitServer {
fn new(hostname: &str) -> Result<InitServer> {
Self::check_pid1()?;
let hostname = hostname.to_string();
let cmdline = CmdLine::load()?;
let homedir = cmdline.lookup("phinit.home")
.unwrap_or("/home/user".to_string());
let rootfs = RootFS::load(&cmdline)?;
let services = BTreeMap::new();
Ok(InitServer {
hostname,
homedir,
cmdline,
rootfs,
services,
})
}
pub fn create(hostname: &str) -> Result<InitServer> {
let init = Self::new(hostname)?;
init.initialize()?;
Ok(init)
}
fn initialize(&self) -> Result<()> {
self.set_loglevel();
umask(0);
sethostname(&self.hostname)?;
setsid()?;
set_controlling_tty(0, true)?;
Ok(())
}
fn check_pid1() -> Result<()> {
if getpid() == 1 {
Ok(())
} else {
Err(Error::Pid1)
}
}
fn homedir(&self) -> &str {
&self.homedir
}
pub fn set_loglevel(&self) {
if self.cmdline.has_var("phinit.verbose") {
Logger::set_log_level(LogLevel::Verbose);
} else if self.cmdline.has_var("phinit.debug") {
Logger::set_log_level(LogLevel::Debug);
} else {
Logger::set_log_level(LogLevel::Info);
}
}
pub fn setup_filesystem(&self) -> Result<()> {
sys::set_umask(0o022);
//mount_devtmpfs()?;
mount_tmpfs("/tmp")?;
mkdir("/tmp/sysroot")?;
if self.rootfs.read_only() {
self.setup_readonly_root()?;
} else {
self.setup_writeable_root()?;
}
fs::write("/etc/hosts", format!("127.0.0.1 {} localhost\n", self.hostname))
.map_err(Error::WriteEtcHosts)?;
umount("/opt/ph/tmp")?;
umount("/opt/ph/proc")?;
umount("/opt/ph/dev")?;
mount_sysfs()?;
mount_cgroup()?;
mount_procfs()?;
mount_devtmpfs()?;
mount_devpts()?;
mount_tmpfs("/run")?;
mount_tmpdir("/tmp")?;
mkdir("/dev/shm")?;
mount_tmpdir("/dev/shm")?;
mkdir("/run/user")?;
mkdir("/run/user/1000")?;
chown("/run/user/1000", 1000,1000)?;
AudioSupport::setup()?;
self.mount_home_if_exists()?;
Logger::set_file_output("/run/phinit.log")
.map_err(Error::OpenLogFailed)?;
Ok(())
}
fn setup_readonly_root(&self) -> Result<()> {
create_directories(&[
"/tmp/ro",
"/tmp/rw",
"/tmp/rw/upper",
"/tmp/rw/work",
])?;
mount_tmpfs("/tmp/rw")?;
create_directories(&["/tmp/rw/upper", "/tmp/rw/work"])?;
self.rootfs.mount("/tmp/ro")?;
mount_overlay("/tmp/sysroot",
"lowerdir=/tmp/ro,upperdir=/tmp/rw/upper,workdir=/tmp/rw/work")?;
create_directories(&[
"/tmp/sysroot/ro",
"/tmp/sysroot/rw"
])?;
move_mount("/tmp/ro", "/tmp/sysroot/ro")?;
move_mount("/tmp/rw", "/tmp/sysroot/rw")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if !toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn setup_writeable_root(&self) -> Result<()> {
self.rootfs.mount("/tmp/sysroot")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if !toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn has_9p_home(&self) -> bool {
// XXX
// /sys/bus/virtio/drivers/9pnet_virtio/virtio*/mount_tag
true
}
pub fn mount_home_if_exists(&self) -> Result<()> {
if self.has_9p_home() {
let homedir = Path::new(self.homedir());
if !homedir.exists() {
mkdir(homedir)?;
}
mount_9p("home", self.homedir())?;
}
Ok(())
}
pub fn run_daemons(&mut self) -> Result<()> {
if !Path::new("/dev/wl0").exists() {
return Ok(());
}
chmod("/dev/wl0", 0o666)?;
let dbus = ServiceLaunch::new("dbus-daemon", "/usr/bin/dbus-daemon")
.base_environment()
.uidgid(1000,1000)
.env("HOME", self.homedir())
.env("NO_AT_BRIDGE", "1")
.env("QT_ACCESSIBILITY", "1")
.env("SHELL", "/bin/bash")
.env("USER", "user")
.env("WAYLAND_DISPLAY", "wayland-0")
.arg("--session")
.arg("--nosyslog")
.arg("--address=unix:path=/run/user/1000/bus")
.arg("--print-address")
.pipe_output()
.launch()?;
self.services.insert(dbus.pid(), dbus);
let sommelier = ServiceLaunch::new("sommelier", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("--parent")
.pipe_output()
.launch()?;
self.services.insert(sommelier.pid(), sommelier);
if self.cmdline.has_var("phinit.no_x11") {
return Ok(());
}
mkdir("/tmp/.X11-unix")?;
chmod("/tmp/.X11-unix", 0o1777)?;
self.write_xauth().map_err(Error::XAuthFail)?;
let sommelierx = ServiceLaunch::new("sommelier-x", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("-X")
.arg("--x-display=0")
.arg("--no-exit-with-child")
.arg(format!("--x-auth={}/.Xauthority", self.homedir()))
.arg("/bin/true")
.pipe_output()
.launch()?;
self.services.insert(sommelierx.pid(), sommelierx);
Ok(())
}
pub fn setup_network(&self) -> Result<()> {
if let Some(val) = self.cmdline.lookup("phinit.ip") {
if let Ok(ip) = Ipv4Addr::from_str(&val) {
self.configure_network(ip)
.map_err(Error::NetworkConfigure)?;
}
sys::bind_mount("/opt/ph/etc/resolv.conf", "/etc/resolv.conf")?;
}
Ok(())
}
fn configure_network(&self, ip: Ipv4Addr) -> netlink::Result<()> |
fn write_xauth(&self) -> io::Result<()> {
let xauth_path = format!("{}/.Xauthority", self.homedir());
let mut randbuf = [0; 16];
let mut file = fs::File::open("/dev/urandom")?;
file.read_exact(&mut randbuf)?;
let mut v: Vec<u8> = Vec::new();
// ???
v.extend_from_slice(&[0x01, 0x00]);
// "airwolf".len()
v.extend_from_slice(&[0x00, 0x07]);
v.extend_from_slice(b"airwolf");
// "0".len() (DISPLAY=:0)
v.extend_from_slice(&[0x00, 0x01]);
v.extend_from_slice(b"0");
// "MIT-MAGIC-COOKIE-a".len()
v.extend_from_slice(&[0x00, 0x12]);
v.extend_from_slice(b"MIT-MAGIC-COOKIE-1");
// randbuf | {
let mut octets = ip.octets();
octets[3] = 1;
let gw = Ipv4Addr::from(octets);
let nl = NetlinkSocket::open()?;
if !nl.interface_exists("eth0") {
}
nl.add_ip_address("eth0", ip, 24)?;
nl.set_interface_up("eth0")?;
nl.add_default_route(gw)?;
Ok(())
} | identifier_body |
init.rs | , Service>,
}
impl InitServer {
fn new(hostname: &str) -> Result<InitServer> {
Self::check_pid1()?;
let hostname = hostname.to_string();
let cmdline = CmdLine::load()?;
let homedir = cmdline.lookup("phinit.home")
.unwrap_or("/home/user".to_string());
let rootfs = RootFS::load(&cmdline)?;
let services = BTreeMap::new();
Ok(InitServer {
hostname,
homedir,
cmdline,
rootfs,
services,
})
}
pub fn create(hostname: &str) -> Result<InitServer> {
let init = Self::new(hostname)?;
init.initialize()?;
Ok(init)
}
fn initialize(&self) -> Result<()> {
self.set_loglevel();
umask(0);
sethostname(&self.hostname)?;
setsid()?;
set_controlling_tty(0, true)?;
Ok(())
}
fn check_pid1() -> Result<()> {
if getpid() == 1 {
Ok(())
} else {
Err(Error::Pid1)
}
}
fn homedir(&self) -> &str {
&self.homedir
}
pub fn set_loglevel(&self) {
if self.cmdline.has_var("phinit.verbose") {
Logger::set_log_level(LogLevel::Verbose);
} else if self.cmdline.has_var("phinit.debug") {
Logger::set_log_level(LogLevel::Debug);
} else |
}
pub fn setup_filesystem(&self) -> Result<()> {
sys::set_umask(0o022);
//mount_devtmpfs()?;
mount_tmpfs("/tmp")?;
mkdir("/tmp/sysroot")?;
if self.rootfs.read_only() {
self.setup_readonly_root()?;
} else {
self.setup_writeable_root()?;
}
fs::write("/etc/hosts", format!("127.0.0.1 {} localhost\n", self.hostname))
.map_err(Error::WriteEtcHosts)?;
umount("/opt/ph/tmp")?;
umount("/opt/ph/proc")?;
umount("/opt/ph/dev")?;
mount_sysfs()?;
mount_cgroup()?;
mount_procfs()?;
mount_devtmpfs()?;
mount_devpts()?;
mount_tmpfs("/run")?;
mount_tmpdir("/tmp")?;
mkdir("/dev/shm")?;
mount_tmpdir("/dev/shm")?;
mkdir("/run/user")?;
mkdir("/run/user/1000")?;
chown("/run/user/1000", 1000,1000)?;
AudioSupport::setup()?;
self.mount_home_if_exists()?;
Logger::set_file_output("/run/phinit.log")
.map_err(Error::OpenLogFailed)?;
Ok(())
}
fn setup_readonly_root(&self) -> Result<()> {
create_directories(&[
"/tmp/ro",
"/tmp/rw",
"/tmp/rw/upper",
"/tmp/rw/work",
])?;
mount_tmpfs("/tmp/rw")?;
create_directories(&["/tmp/rw/upper", "/tmp/rw/work"])?;
self.rootfs.mount("/tmp/ro")?;
mount_overlay("/tmp/sysroot",
"lowerdir=/tmp/ro,upperdir=/tmp/rw/upper,workdir=/tmp/rw/work")?;
create_directories(&[
"/tmp/sysroot/ro",
"/tmp/sysroot/rw"
])?;
move_mount("/tmp/ro", "/tmp/sysroot/ro")?;
move_mount("/tmp/rw", "/tmp/sysroot/rw")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if !toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn setup_writeable_root(&self) -> Result<()> {
self.rootfs.mount("/tmp/sysroot")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if !toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn has_9p_home(&self) -> bool {
// XXX
// /sys/bus/virtio/drivers/9pnet_virtio/virtio*/mount_tag
true
}
pub fn mount_home_if_exists(&self) -> Result<()> {
if self.has_9p_home() {
let homedir = Path::new(self.homedir());
if !homedir.exists() {
mkdir(homedir)?;
}
mount_9p("home", self.homedir())?;
}
Ok(())
}
pub fn run_daemons(&mut self) -> Result<()> {
if !Path::new("/dev/wl0").exists() {
return Ok(());
}
chmod("/dev/wl0", 0o666)?;
let dbus = ServiceLaunch::new("dbus-daemon", "/usr/bin/dbus-daemon")
.base_environment()
.uidgid(1000,1000)
.env("HOME", self.homedir())
.env("NO_AT_BRIDGE", "1")
.env("QT_ACCESSIBILITY", "1")
.env("SHELL", "/bin/bash")
.env("USER", "user")
.env("WAYLAND_DISPLAY", "wayland-0")
.arg("--session")
.arg("--nosyslog")
.arg("--address=unix:path=/run/user/1000/bus")
.arg("--print-address")
.pipe_output()
.launch()?;
self.services.insert(dbus.pid(), dbus);
let sommelier = ServiceLaunch::new("sommelier", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("--parent")
.pipe_output()
.launch()?;
self.services.insert(sommelier.pid(), sommelier);
if self.cmdline.has_var("phinit.no_x11") {
return Ok(());
}
mkdir("/tmp/.X11-unix")?;
chmod("/tmp/.X11-unix", 0o1777)?;
self.write_xauth().map_err(Error::XAuthFail)?;
let sommelierx = ServiceLaunch::new("sommelier-x", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("-X")
.arg("--x-display=0")
.arg("--no-exit-with-child")
.arg(format!("--x-auth={}/.Xauthority", self.homedir()))
.arg("/bin/true")
.pipe_output()
.launch()?;
self.services.insert(sommelierx.pid(), sommelierx);
Ok(())
}
pub fn setup_network(&self) -> Result<()> {
if let Some(val) = self.cmdline.lookup("phinit.ip") {
if let Ok(ip) = Ipv4Addr::from_str(&val) {
self.configure_network(ip)
.map_err(Error::NetworkConfigure)?;
}
sys::bind_mount("/opt/ph/etc/resolv.conf", "/etc/resolv.conf")?;
}
Ok(())
}
fn configure_network(&self, ip: Ipv4Addr) -> netlink::Result<()> {
let mut octets = ip.octets();
octets[3] = 1;
let gw = Ipv4Addr::from(octets);
let nl = NetlinkSocket::open()?;
if !nl.interface_exists("eth0") {
}
nl.add_ip_address("eth0", ip, 24)?;
nl.set_interface_up("eth0")?;
nl.add_default_route(gw)?;
Ok(())
}
fn write_xauth(&self) -> io::Result<()> {
let xauth_path = format!("{}/.Xauthority", self.homedir());
let mut randbuf = [0; 16];
let mut file = fs::File::open("/dev/urandom")?;
file.read_exact(&mut randbuf)?;
let mut v: Vec<u8> = Vec::new();
// ???
v.extend_from_slice(&[0x01, 0x00]);
// "airwolf".len()
v.extend_from_slice(&[0x00, 0x07]);
v.extend_from_slice(b"airwolf");
// "0".len() (DISPLAY=:0)
v.extend_from_slice(&[0x00, 0x01]);
v.extend_from_slice(b"0");
// "MIT-MAGIC-COOKIE-a".len()
v.extend_from_slice(&[0x00, 0x12]);
v.extend_from_slice(b"MIT-MAGIC-COOKIE-1");
// randbuf | {
Logger::set_log_level(LogLevel::Info);
} | conditional_block |
init.rs | Service>,
}
impl InitServer {
fn new(hostname: &str) -> Result<InitServer> {
Self::check_pid1()?;
let hostname = hostname.to_string();
let cmdline = CmdLine::load()?;
let homedir = cmdline.lookup("phinit.home")
.unwrap_or("/home/user".to_string());
let rootfs = RootFS::load(&cmdline)?;
let services = BTreeMap::new();
Ok(InitServer {
hostname,
homedir,
cmdline,
rootfs,
services,
})
}
pub fn create(hostname: &str) -> Result<InitServer> {
let init = Self::new(hostname)?;
init.initialize()?;
Ok(init)
}
fn initialize(&self) -> Result<()> {
self.set_loglevel();
umask(0);
sethostname(&self.hostname)?;
setsid()?;
set_controlling_tty(0, true)?;
Ok(())
}
fn check_pid1() -> Result<()> {
if getpid() == 1 {
Ok(())
} else {
Err(Error::Pid1)
}
}
fn homedir(&self) -> &str {
&self.homedir
}
pub fn set_loglevel(&self) {
if self.cmdline.has_var("phinit.verbose") {
Logger::set_log_level(LogLevel::Verbose);
} else if self.cmdline.has_var("phinit.debug") {
Logger::set_log_level(LogLevel::Debug);
} else {
Logger::set_log_level(LogLevel::Info);
}
}
pub fn setup_filesystem(&self) -> Result<()> {
sys::set_umask(0o022);
//mount_devtmpfs()?;
mount_tmpfs("/tmp")?;
mkdir("/tmp/sysroot")?;
if self.rootfs.read_only() {
self.setup_readonly_root()?;
} else {
self.setup_writeable_root()?;
}
fs::write("/etc/hosts", format!("127.0.0.1 {} localhost\n", self.hostname))
.map_err(Error::WriteEtcHosts)?;
umount("/opt/ph/tmp")?;
umount("/opt/ph/proc")?;
umount("/opt/ph/dev")?;
mount_sysfs()?;
mount_cgroup()?;
mount_procfs()?;
mount_devtmpfs()?;
mount_devpts()?;
mount_tmpfs("/run")?;
mount_tmpdir("/tmp")?;
mkdir("/dev/shm")?;
mount_tmpdir("/dev/shm")?;
mkdir("/run/user")?;
mkdir("/run/user/1000")?;
chown("/run/user/1000", 1000,1000)?;
AudioSupport::setup()?;
self.mount_home_if_exists()?;
Logger::set_file_output("/run/phinit.log")
.map_err(Error::OpenLogFailed)?;
Ok(())
}
fn setup_readonly_root(&self) -> Result<()> {
create_directories(&[
"/tmp/ro",
"/tmp/rw",
"/tmp/rw/upper",
"/tmp/rw/work",
])?;
mount_tmpfs("/tmp/rw")?;
create_directories(&["/tmp/rw/upper", "/tmp/rw/work"])?;
self.rootfs.mount("/tmp/ro")?;
mount_overlay("/tmp/sysroot",
"lowerdir=/tmp/ro,upperdir=/tmp/rw/upper,workdir=/tmp/rw/work")?;
create_directories(&[
"/tmp/sysroot/ro",
"/tmp/sysroot/rw"
])?;
move_mount("/tmp/ro", "/tmp/sysroot/ro")?;
move_mount("/tmp/rw", "/tmp/sysroot/rw")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if !toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn setup_writeable_root(&self) -> Result<()> {
self.rootfs.mount("/tmp/sysroot")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if !toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn has_9p_home(&self) -> bool {
// XXX
// /sys/bus/virtio/drivers/9pnet_virtio/virtio*/mount_tag
true
}
pub fn mount_home_if_exists(&self) -> Result<()> {
if self.has_9p_home() {
let homedir = Path::new(self.homedir());
if !homedir.exists() {
mkdir(homedir)?;
}
mount_9p("home", self.homedir())?;
}
Ok(())
}
pub fn run_daemons(&mut self) -> Result<()> {
if !Path::new("/dev/wl0").exists() {
return Ok(()); |
chmod("/dev/wl0", 0o666)?;
let dbus = ServiceLaunch::new("dbus-daemon", "/usr/bin/dbus-daemon")
.base_environment()
.uidgid(1000,1000)
.env("HOME", self.homedir())
.env("NO_AT_BRIDGE", "1")
.env("QT_ACCESSIBILITY", "1")
.env("SHELL", "/bin/bash")
.env("USER", "user")
.env("WAYLAND_DISPLAY", "wayland-0")
.arg("--session")
.arg("--nosyslog")
.arg("--address=unix:path=/run/user/1000/bus")
.arg("--print-address")
.pipe_output()
.launch()?;
self.services.insert(dbus.pid(), dbus);
let sommelier = ServiceLaunch::new("sommelier", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("--parent")
.pipe_output()
.launch()?;
self.services.insert(sommelier.pid(), sommelier);
if self.cmdline.has_var("phinit.no_x11") {
return Ok(());
}
mkdir("/tmp/.X11-unix")?;
chmod("/tmp/.X11-unix", 0o1777)?;
self.write_xauth().map_err(Error::XAuthFail)?;
let sommelierx = ServiceLaunch::new("sommelier-x", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("-X")
.arg("--x-display=0")
.arg("--no-exit-with-child")
.arg(format!("--x-auth={}/.Xauthority", self.homedir()))
.arg("/bin/true")
.pipe_output()
.launch()?;
self.services.insert(sommelierx.pid(), sommelierx);
Ok(())
}
pub fn setup_network(&self) -> Result<()> {
if let Some(val) = self.cmdline.lookup("phinit.ip") {
if let Ok(ip) = Ipv4Addr::from_str(&val) {
self.configure_network(ip)
.map_err(Error::NetworkConfigure)?;
}
sys::bind_mount("/opt/ph/etc/resolv.conf", "/etc/resolv.conf")?;
}
Ok(())
}
fn configure_network(&self, ip: Ipv4Addr) -> netlink::Result<()> {
let mut octets = ip.octets();
octets[3] = 1;
let gw = Ipv4Addr::from(octets);
let nl = NetlinkSocket::open()?;
if !nl.interface_exists("eth0") {
}
nl.add_ip_address("eth0", ip, 24)?;
nl.set_interface_up("eth0")?;
nl.add_default_route(gw)?;
Ok(())
}
fn write_xauth(&self) -> io::Result<()> {
let xauth_path = format!("{}/.Xauthority", self.homedir());
let mut randbuf = [0; 16];
let mut file = fs::File::open("/dev/urandom")?;
file.read_exact(&mut randbuf)?;
let mut v: Vec<u8> = Vec::new();
// ???
v.extend_from_slice(&[0x01, 0x00]);
// "airwolf".len()
v.extend_from_slice(&[0x00, 0x07]);
v.extend_from_slice(b"airwolf");
// "0".len() (DISPLAY=:0)
v.extend_from_slice(&[0x00, 0x01]);
v.extend_from_slice(b"0");
// "MIT-MAGIC-COOKIE-a".len()
v.extend_from_slice(&[0x00, 0x12]);
v.extend_from_slice(b"MIT-MAGIC-COOKIE-1");
// randbuf.len()
| } | random_line_split |
init.rs | , Service>,
}
impl InitServer {
fn new(hostname: &str) -> Result<InitServer> {
Self::check_pid1()?;
let hostname = hostname.to_string();
let cmdline = CmdLine::load()?;
let homedir = cmdline.lookup("phinit.home")
.unwrap_or("/home/user".to_string());
let rootfs = RootFS::load(&cmdline)?;
let services = BTreeMap::new();
Ok(InitServer {
hostname,
homedir,
cmdline,
rootfs,
services,
})
}
pub fn create(hostname: &str) -> Result<InitServer> {
let init = Self::new(hostname)?;
init.initialize()?;
Ok(init)
}
fn initialize(&self) -> Result<()> {
self.set_loglevel();
umask(0);
sethostname(&self.hostname)?;
setsid()?;
set_controlling_tty(0, true)?;
Ok(())
}
fn check_pid1() -> Result<()> {
if getpid() == 1 {
Ok(())
} else {
Err(Error::Pid1)
}
}
fn homedir(&self) -> &str {
&self.homedir
}
pub fn set_loglevel(&self) {
if self.cmdline.has_var("phinit.verbose") {
Logger::set_log_level(LogLevel::Verbose);
} else if self.cmdline.has_var("phinit.debug") {
Logger::set_log_level(LogLevel::Debug);
} else {
Logger::set_log_level(LogLevel::Info);
}
}
pub fn | (&self) -> Result<()> {
sys::set_umask(0o022);
//mount_devtmpfs()?;
mount_tmpfs("/tmp")?;
mkdir("/tmp/sysroot")?;
if self.rootfs.read_only() {
self.setup_readonly_root()?;
} else {
self.setup_writeable_root()?;
}
fs::write("/etc/hosts", format!("127.0.0.1 {} localhost\n", self.hostname))
.map_err(Error::WriteEtcHosts)?;
umount("/opt/ph/tmp")?;
umount("/opt/ph/proc")?;
umount("/opt/ph/dev")?;
mount_sysfs()?;
mount_cgroup()?;
mount_procfs()?;
mount_devtmpfs()?;
mount_devpts()?;
mount_tmpfs("/run")?;
mount_tmpdir("/tmp")?;
mkdir("/dev/shm")?;
mount_tmpdir("/dev/shm")?;
mkdir("/run/user")?;
mkdir("/run/user/1000")?;
chown("/run/user/1000", 1000,1000)?;
AudioSupport::setup()?;
self.mount_home_if_exists()?;
Logger::set_file_output("/run/phinit.log")
.map_err(Error::OpenLogFailed)?;
Ok(())
}
fn setup_readonly_root(&self) -> Result<()> {
create_directories(&[
"/tmp/ro",
"/tmp/rw",
"/tmp/rw/upper",
"/tmp/rw/work",
])?;
mount_tmpfs("/tmp/rw")?;
create_directories(&["/tmp/rw/upper", "/tmp/rw/work"])?;
self.rootfs.mount("/tmp/ro")?;
mount_overlay("/tmp/sysroot",
"lowerdir=/tmp/ro,upperdir=/tmp/rw/upper,workdir=/tmp/rw/work")?;
create_directories(&[
"/tmp/sysroot/ro",
"/tmp/sysroot/rw"
])?;
move_mount("/tmp/ro", "/tmp/sysroot/ro")?;
move_mount("/tmp/rw", "/tmp/sysroot/rw")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if !toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn setup_writeable_root(&self) -> Result<()> {
self.rootfs.mount("/tmp/sysroot")?;
let toolsdir = Path::new("/tmp/sysroot/opt/ph");
if !toolsdir.exists() {
fs::create_dir_all(toolsdir)
.map_err(|e| Error::MkDir(String::from("/tmp/sysroot/opt/ph"), e))?;
}
pivot_root("/tmp/sysroot", "/tmp/sysroot/opt/ph")?;
Ok(())
}
fn has_9p_home(&self) -> bool {
// XXX
// /sys/bus/virtio/drivers/9pnet_virtio/virtio*/mount_tag
true
}
pub fn mount_home_if_exists(&self) -> Result<()> {
if self.has_9p_home() {
let homedir = Path::new(self.homedir());
if !homedir.exists() {
mkdir(homedir)?;
}
mount_9p("home", self.homedir())?;
}
Ok(())
}
pub fn run_daemons(&mut self) -> Result<()> {
if !Path::new("/dev/wl0").exists() {
return Ok(());
}
chmod("/dev/wl0", 0o666)?;
let dbus = ServiceLaunch::new("dbus-daemon", "/usr/bin/dbus-daemon")
.base_environment()
.uidgid(1000,1000)
.env("HOME", self.homedir())
.env("NO_AT_BRIDGE", "1")
.env("QT_ACCESSIBILITY", "1")
.env("SHELL", "/bin/bash")
.env("USER", "user")
.env("WAYLAND_DISPLAY", "wayland-0")
.arg("--session")
.arg("--nosyslog")
.arg("--address=unix:path=/run/user/1000/bus")
.arg("--print-address")
.pipe_output()
.launch()?;
self.services.insert(dbus.pid(), dbus);
let sommelier = ServiceLaunch::new("sommelier", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("--parent")
.pipe_output()
.launch()?;
self.services.insert(sommelier.pid(), sommelier);
if self.cmdline.has_var("phinit.no_x11") {
return Ok(());
}
mkdir("/tmp/.X11-unix")?;
chmod("/tmp/.X11-unix", 0o1777)?;
self.write_xauth().map_err(Error::XAuthFail)?;
let sommelierx = ServiceLaunch::new("sommelier-x", "/opt/ph/usr/bin/sommelier")
.base_environment()
.uidgid(1000,1000)
.arg("-X")
.arg("--x-display=0")
.arg("--no-exit-with-child")
.arg(format!("--x-auth={}/.Xauthority", self.homedir()))
.arg("/bin/true")
.pipe_output()
.launch()?;
self.services.insert(sommelierx.pid(), sommelierx);
Ok(())
}
pub fn setup_network(&self) -> Result<()> {
if let Some(val) = self.cmdline.lookup("phinit.ip") {
if let Ok(ip) = Ipv4Addr::from_str(&val) {
self.configure_network(ip)
.map_err(Error::NetworkConfigure)?;
}
sys::bind_mount("/opt/ph/etc/resolv.conf", "/etc/resolv.conf")?;
}
Ok(())
}
fn configure_network(&self, ip: Ipv4Addr) -> netlink::Result<()> {
let mut octets = ip.octets();
octets[3] = 1;
let gw = Ipv4Addr::from(octets);
let nl = NetlinkSocket::open()?;
if !nl.interface_exists("eth0") {
}
nl.add_ip_address("eth0", ip, 24)?;
nl.set_interface_up("eth0")?;
nl.add_default_route(gw)?;
Ok(())
}
fn write_xauth(&self) -> io::Result<()> {
let xauth_path = format!("{}/.Xauthority", self.homedir());
let mut randbuf = [0; 16];
let mut file = fs::File::open("/dev/urandom")?;
file.read_exact(&mut randbuf)?;
let mut v: Vec<u8> = Vec::new();
// ???
v.extend_from_slice(&[0x01, 0x00]);
// "airwolf".len()
v.extend_from_slice(&[0x00, 0x07]);
v.extend_from_slice(b"airwolf");
// "0".len() (DISPLAY=:0)
v.extend_from_slice(&[0x00, 0x01]);
v.extend_from_slice(b"0");
// "MIT-MAGIC-COOKIE-a".len()
v.extend_from_slice(&[0x00, 0x12]);
v.extend_from_slice(b"MIT-MAGIC-COOKIE-1");
// randbuf.len | setup_filesystem | identifier_name |
main.go | add channel error")
}
}
common.Band = bandConfig
common.BandName = band.Name(c.String("band"))
return nil
}
func setDeduplicationDelay(c *cli.Context) error {
common.DeduplicationDelay = c.Duration("deduplication-delay")
return nil
}
func setGetDownlinkDataDelay(c *cli.Context) error {
common.GetDownlinkDataDelay = c.Duration("get-downlink-data-delay")
return nil
}
func setCreateGatewayOnStats(c *cli.Context) error {
common.CreateGatewayOnStats = c.Bool("gw-create-on-stats")
return nil
}
func setNodeSessionTTL(c *cli.Context) error {
common.NodeSessionTTL = c.Duration("node-session-ttl")
return nil
}
func setLogNodeFrames(c *cli.Context) error {
common.LogNodeFrames = c.Bool("log-node-frames")
return nil
}
func setGatewayServerJWTSecret(c *cli.Context) error {
common.GatewayServerJWTSecret = c.String("gw-server-jwt-secret")
return nil
}
func setStatsAggregationIntervals(c *cli.Context) error {
// get the gw stats aggregation intervals
gateway.MustSetStatsAggregationIntervals(strings.Split(c.String("gw-stats-aggregation-intervals"), ","))
return nil
}
func setTimezone(c *cli.Context) error {
// get the timezone
if c.String("timezone") != "" {
l, err := time.LoadLocation(c.String("timezone"))
if err != nil {
return errors.Wrap(err, "load timezone location error")
}
common.TimeLocation = l
}
return nil
}
func printStartMessage(c *cli.Context) error {
log.WithFields(log.Fields{
"version": version,
"net_id": common.NetID.String(),
"band": c.String("band"),
"docs": "https://docs.loraserver.io/",
}).Info("starting LoRa Server")
return nil
}
func enableUplinkChannels(c *cli.Context) error {
if c.String("enable-uplink-channels") == "" {
return nil
}
log.Info("disabling all channels")
for _, c := range common.Band.GetEnabledUplinkChannels() {
if err := common.Band.DisableUplinkChannel(c); err != nil {
return errors.Wrap(err, "disable uplink channel error")
}
}
blocks := strings.Split(c.String("enable-uplink-channels"), ",")
for _, block := range blocks {
block = strings.Trim(block, " ")
var start, end int
if _, err := fmt.Sscanf(block, "%d-%d", &start, &end); err != nil {
if _, err := fmt.Sscanf(block, "%d", &start); err != nil {
return errors.Wrap(err, "parse channel range error")
}
end = start
}
log.WithFields(log.Fields{
"first_channel": start,
"last_channel": end,
}).Info("enabling channel block")
for ; start <= end; start++ {
if err := common.Band.EnableUplinkChannel(start); err != nil {
errors.Wrap(err, "enable uplink channel error")
}
}
}
return nil
}
func setRedisPool(c *cli.Context) error {
log.WithField("url", c.String("redis-url")).Info("setup redis connection pool")
common.RedisPool = common.NewRedisPool(c.String("redis-url"))
return nil
}
func setPostgreSQLConnection(c *cli.Context) error {
log.Info("connecting to postgresql")
db, err := common.OpenDatabase(c.String("postgres-dsn"))
if err != nil {
return errors.Wrap(err, "database connection error")
}
common.DB = db
return nil
}
func setGatewayBackend(c *cli.Context) error {
gw, err := gwBackend.NewBackend(c.String("gw-mqtt-server"), c.String("gw-mqtt-username"), c.String("gw-mqtt-password"), c.String("gw-mqtt-ca-cert"))
if err != nil {
return errors.Wrap(err, "gateway-backend setup failed")
}
common.Gateway = gw
return nil
}
func setApplicationServer(c *cli.Context) error {
log.WithFields(log.Fields{
"server": c.String("as-server"),
"ca-cert": c.String("as-ca-cert"),
"tls-cert": c.String("as-tls-cert"),
"tls-key": c.String("as-tls-key"),
}).Info("connecting to application-server")
var asDialOptions []grpc.DialOption
if c.String("as-tls-cert") != "" && c.String("as-tls-key") != "" {
asDialOptions = append(asDialOptions, grpc.WithTransportCredentials(
mustGetTransportCredentials(c.String("as-tls-cert"), c.String("as-tls-key"), c.String("as-ca-cert"), false),
))
} else {
asDialOptions = append(asDialOptions, grpc.WithInsecure())
}
asConn, err := grpc.Dial(c.String("as-server"), asDialOptions...)
if err != nil {
return errors.Wrap(err, "application-server dial error")
}
common.Application = as.NewApplicationServerClient(asConn)
return nil
}
func setNetworkController(c *cli.Context) error {
var ncClient nc.NetworkControllerClient
if c.String("nc-server") != "" {
// setup network-controller client
log.WithFields(log.Fields{
"server": c.String("nc-server"),
"ca-cert": c.String("nc-ca-cert"),
"tls-cert": c.String("nc-tls-cert"),
"tls-key": c.String("nc-tls-key"),
}).Info("connecting to network-controller")
var ncDialOptions []grpc.DialOption
if c.String("nc-tls-cert") != "" && c.String("nc-tls-key") != "" {
ncDialOptions = append(ncDialOptions, grpc.WithTransportCredentials(
mustGetTransportCredentials(c.String("nc-tls-cert"), c.String("nc-tls-key"), c.String("nc-ca-cert"), false),
))
} else {
ncDialOptions = append(ncDialOptions, grpc.WithInsecure())
}
ncConn, err := grpc.Dial(c.String("nc-server"), ncDialOptions...)
if err != nil {
return errors.Wrap(err, "network-controller dial error")
}
ncClient = nc.NewNetworkControllerClient(ncConn)
} else {
log.Info("no network-controller configured")
ncClient = &controller.NopNetworkControllerClient{}
}
common.Controller = ncClient
return nil
}
func runDatabaseMigrations(c *cli.Context) error {
if c.Bool("db-automigrate") {
log.Info("applying database migrations")
m := &migrate.AssetMigrationSource{
Asset: migrations.Asset,
AssetDir: migrations.AssetDir,
Dir: "",
}
n, err := migrate.Exec(common.DB.DB, "postgres", m, migrate.Up)
if err != nil {
return errors.Wrap(err, "applying migrations failed")
}
log.WithField("count", n).Info("migrations applied")
}
return nil
}
func startAPIServer(c *cli.Context) error {
log.WithFields(log.Fields{
"bind": c.String("bind"),
"ca-cert": c.String("ca-cert"),
"tls-cert": c.String("tls-cert"),
"tls-key": c.String("tls-key"),
}).Info("starting api server")
var opts []grpc.ServerOption
if c.String("tls-cert") != "" && c.String("tls-key") != "" |
gs := grpc.NewServer(opts...)
nsAPI := api.NewNetworkServerAPI()
ns.RegisterNetworkServerServer(gs, nsAPI)
ln, err := net.Listen("tcp", c.String("bind"))
if err != nil {
return errors.Wrap(err, "start api listener error")
}
go gs.Serve(ln)
return nil
}
func startGatewayAPIServer(c *cli.Context) error {
log.WithFields(log.Fields{
"bind": c.String("gw-server-bind"),
"ca-cert": c.String("gw-server-ca-cert"),
"tls-cert": c.String("gw-server-tls-cert"),
"tls-key": c.String("gw-server-tls-key"),
}).Info("starting gateway api server")
var validator auth.Validator
if c.String("gw-server-jwt-secret") != "" {
validator = auth.NewJWTValidator("HS256", c.String("gw-server-jwt-secret"))
} else {
return errors.New("--gw-server-jwt-secret must be set")
}
var opts []grpc.ServerOption
if c.String("gw-server-tls-cert") != "" && c.String("gw-server-tls-key") != "" {
creds := mustGetTransportCredentials(c.String("gw-server-tls-cert"), c.String("gw-server-tls-key"), c.String("gw-server-ca-cert"), false)
opts = append(opts, grpc.Creds(creds))
}
gs := grpc.NewServer(opts...)
gwAPI := api.NewGatewayAPI(validator)
gw.RegisterGateway | {
creds := mustGetTransportCredentials(c.String("tls-cert"), c.String("tls-key"), c.String("ca-cert"), false)
opts = append(opts, grpc.Creds(creds))
} | conditional_block |
main.go | "github.com/brocaar/lorawan"
"github.com/brocaar/lorawan/band"
)
func init() {
grpclog.SetLogger(log.StandardLogger())
}
var version string // set by the compiler
var bands = []string{
string(band.AS_923),
string(band.AU_915_928),
string(band.CN_470_510),
string(band.CN_779_787),
string(band.EU_433),
string(band.EU_863_870),
string(band.IN_865_867),
string(band.KR_920_923),
string(band.US_902_928),
}
func run(c *cli.Context) error {
var server *uplink.Server
var gwStats *gateway.StatsHandler
tasks := []func(*cli.Context) error{
setNetID,
setBandConfig,
setDeduplicationDelay,
setGetDownlinkDataDelay,
setCreateGatewayOnStats,
setNodeSessionTTL,
setLogNodeFrames,
setGatewayServerJWTSecret,
setStatsAggregationIntervals,
setTimezone,
printStartMessage,
enableUplinkChannels,
setRedisPool,
setPostgreSQLConnection,
setGatewayBackend,
setApplicationServer,
setNetworkController,
runDatabaseMigrations,
startAPIServer,
startGatewayAPIServer,
startLoRaServer(server),
startStatsServer(gwStats),
}
for _, t := range tasks {
if err := t(c); err != nil {
log.Fatal(err)
}
}
sigChan := make(chan os.Signal)
exitChan := make(chan struct{})
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
log.WithField("signal", <-sigChan).Info("signal received")
go func() {
log.Warning("stopping loraserver")
if err := server.Stop(); err != nil {
log.Fatal(err)
}
if err := gwStats.Stop(); err != nil {
log.Fatal(err)
}
exitChan <- struct{}{}
}()
select {
case <-exitChan:
case s := <-sigChan:
log.WithField("signal", s).Info("signal received, stopping immediately")
}
return nil
}
func setNetID(c *cli.Context) error {
var netID lorawan.NetID
if err := netID.UnmarshalText([]byte(c.String("net-id"))); err != nil {
return errors.Wrap(err, "NetID parse error")
}
common.NetID = netID
return nil
}
func setBandConfig(c *cli.Context) error {
if c.String("band") == "" {
return fmt.Errorf("--band is undefined, valid options are: %s", strings.Join(bands, ", "))
}
dwellTime := lorawan.DwellTimeNoLimit
if c.Bool("band-dwell-time-400ms") {
dwellTime = lorawan.DwellTime400ms
}
bandConfig, err := band.GetConfig(band.Name(c.String("band")), c.Bool("band-repeater-compatible"), dwellTime)
if err != nil {
return errors.Wrap(err, "get band config error")
}
for _, f := range c.IntSlice("extra-frequencies") {
if err := bandConfig.AddChannel(f); err != nil {
return errors.Wrap(err, "add channel error")
}
}
common.Band = bandConfig
common.BandName = band.Name(c.String("band"))
return nil
}
func setDeduplicationDelay(c *cli.Context) error {
common.DeduplicationDelay = c.Duration("deduplication-delay")
return nil
}
func setGetDownlinkDataDelay(c *cli.Context) error {
common.GetDownlinkDataDelay = c.Duration("get-downlink-data-delay")
return nil
}
func setCreateGatewayOnStats(c *cli.Context) error {
common.CreateGatewayOnStats = c.Bool("gw-create-on-stats")
return nil
}
func setNodeSessionTTL(c *cli.Context) error {
common.NodeSessionTTL = c.Duration("node-session-ttl")
return nil
}
func setLogNodeFrames(c *cli.Context) error {
common.LogNodeFrames = c.Bool("log-node-frames")
return nil
}
func setGatewayServerJWTSecret(c *cli.Context) error {
common.GatewayServerJWTSecret = c.String("gw-server-jwt-secret")
return nil
}
func setStatsAggregationIntervals(c *cli.Context) error {
// get the gw stats aggregation intervals
gateway.MustSetStatsAggregationIntervals(strings.Split(c.String("gw-stats-aggregation-intervals"), ","))
return nil
}
func setTimezone(c *cli.Context) error {
// get the timezone
if c.String("timezone") != "" {
l, err := time.LoadLocation(c.String("timezone"))
if err != nil {
return errors.Wrap(err, "load timezone location error")
}
common.TimeLocation = l
}
return nil
}
func printStartMessage(c *cli.Context) error {
log.WithFields(log.Fields{
"version": version,
"net_id": common.NetID.String(),
"band": c.String("band"),
"docs": "https://docs.loraserver.io/",
}).Info("starting LoRa Server")
return nil
}
func enableUplinkChannels(c *cli.Context) error {
if c.String("enable-uplink-channels") == "" {
return nil
}
log.Info("disabling all channels")
for _, c := range common.Band.GetEnabledUplinkChannels() {
if err := common.Band.DisableUplinkChannel(c); err != nil {
return errors.Wrap(err, "disable uplink channel error")
}
}
blocks := strings.Split(c.String("enable-uplink-channels"), ",")
for _, block := range blocks {
block = strings.Trim(block, " ")
var start, end int
if _, err := fmt.Sscanf(block, "%d-%d", &start, &end); err != nil {
if _, err := fmt.Sscanf(block, "%d", &start); err != nil {
return errors.Wrap(err, "parse channel range error")
}
end = start
}
log.WithFields(log.Fields{
"first_channel": start,
"last_channel": end,
}).Info("enabling channel block")
for ; start <= end; start++ {
if err := common.Band.EnableUplinkChannel(start); err != nil {
errors.Wrap(err, "enable uplink channel error")
}
}
}
return nil
}
func setRedisPool(c *cli.Context) error {
log.WithField("url", c.String("redis-url")).Info("setup redis connection pool")
common.RedisPool = common.NewRedisPool(c.String("redis-url"))
return nil
}
func setPostgreSQLConnection(c *cli.Context) error {
log.Info("connecting to postgresql")
db, err := common.OpenDatabase(c.String("postgres-dsn"))
if err != nil {
return errors.Wrap(err, "database connection error")
}
common.DB = db
return nil
}
func setGatewayBackend(c *cli.Context) error {
gw, err := gwBackend.NewBackend(c.String("gw-mqtt-server"), c.String("gw-mqtt-username"), c.String("gw-mqtt-password"), c.String("gw-mqtt-ca-cert"))
if err != nil {
return errors.Wrap(err, "gateway-backend setup failed")
}
common.Gateway = gw
return nil
}
func setApplicationServer(c *cli.Context) error {
log.WithFields(log.Fields{
"server": c.String("as-server"),
"ca-cert": c.String("as-ca-cert"),
"tls-cert": c.String("as-tls-cert"),
"tls-key": c.String("as-tls-key"),
}).Info("connecting to application-server")
var asDialOptions []grpc.DialOption
if c.String("as-tls-cert") != "" && c.String("as-tls-key") != "" {
asDialOptions = append(asDialOptions, grpc.WithTransportCredentials(
mustGetTransportCredentials(c.String("as-tls-cert"), c.String("as-tls-key"), c.String("as-ca-cert"), false),
))
} else {
asDialOptions = append(asDialOptions, grpc.WithInsecure())
}
asConn, err := grpc.Dial(c.String("as-server"), asDialOptions...)
if err != nil {
return errors.Wrap(err, "application-server dial error")
}
common.Application = as.NewApplicationServerClient(asConn)
return nil
}
func setNetworkController(c *cli.Context) error {
var ncClient nc.NetworkControllerClient
if c.String("nc-server") != "" {
// setup network-controller client
log.WithFields(log.Fields{
"server": c.String("nc-server"),
"ca-cert": c.String("nc-ca-cert"),
"tls-cert": c.String("nc-tls-cert"),
" | "github.com/brocaar/loraserver/internal/common"
"github.com/brocaar/loraserver/internal/migrations"
// TODO: merge backend/gateway into internal/gateway?
"github.com/brocaar/loraserver/internal/gateway"
"github.com/brocaar/loraserver/internal/uplink" | random_line_split | |
main.go |
func setBandConfig(c *cli.Context) error {
if c.String("band") == "" {
return fmt.Errorf("--band is undefined, valid options are: %s", strings.Join(bands, ", "))
}
dwellTime := lorawan.DwellTimeNoLimit
if c.Bool("band-dwell-time-400ms") {
dwellTime = lorawan.DwellTime400ms
}
bandConfig, err := band.GetConfig(band.Name(c.String("band")), c.Bool("band-repeater-compatible"), dwellTime)
if err != nil {
return errors.Wrap(err, "get band config error")
}
for _, f := range c.IntSlice("extra-frequencies") {
if err := bandConfig.AddChannel(f); err != nil {
return errors.Wrap(err, "add channel error")
}
}
common.Band = bandConfig
common.BandName = band.Name(c.String("band"))
return nil
}
func setDeduplicationDelay(c *cli.Context) error {
common.DeduplicationDelay = c.Duration("deduplication-delay")
return nil
}
func setGetDownlinkDataDelay(c *cli.Context) error {
common.GetDownlinkDataDelay = c.Duration("get-downlink-data-delay")
return nil
}
func setCreateGatewayOnStats(c *cli.Context) error {
common.CreateGatewayOnStats = c.Bool("gw-create-on-stats")
return nil
}
func setNodeSessionTTL(c *cli.Context) error {
common.NodeSessionTTL = c.Duration("node-session-ttl")
return nil
}
func setLogNodeFrames(c *cli.Context) error {
common.LogNodeFrames = c.Bool("log-node-frames")
return nil
}
func setGatewayServerJWTSecret(c *cli.Context) error {
common.GatewayServerJWTSecret = c.String("gw-server-jwt-secret")
return nil
}
func setStatsAggregationIntervals(c *cli.Context) error {
// get the gw stats aggregation intervals
gateway.MustSetStatsAggregationIntervals(strings.Split(c.String("gw-stats-aggregation-intervals"), ","))
return nil
}
func setTimezone(c *cli.Context) error {
// get the timezone
if c.String("timezone") != "" {
l, err := time.LoadLocation(c.String("timezone"))
if err != nil {
return errors.Wrap(err, "load timezone location error")
}
common.TimeLocation = l
}
return nil
}
func printStartMessage(c *cli.Context) error {
log.WithFields(log.Fields{
"version": version,
"net_id": common.NetID.String(),
"band": c.String("band"),
"docs": "https://docs.loraserver.io/",
}).Info("starting LoRa Server")
return nil
}
func enableUplinkChannels(c *cli.Context) error {
if c.String("enable-uplink-channels") == "" {
return nil
}
log.Info("disabling all channels")
for _, c := range common.Band.GetEnabledUplinkChannels() {
if err := common.Band.DisableUplinkChannel(c); err != nil {
return errors.Wrap(err, "disable uplink channel error")
}
}
blocks := strings.Split(c.String("enable-uplink-channels"), ",")
for _, block := range blocks {
block = strings.Trim(block, " ")
var start, end int
if _, err := fmt.Sscanf(block, "%d-%d", &start, &end); err != nil {
if _, err := fmt.Sscanf(block, "%d", &start); err != nil {
return errors.Wrap(err, "parse channel range error")
}
end = start
}
log.WithFields(log.Fields{
"first_channel": start,
"last_channel": end,
}).Info("enabling channel block")
for ; start <= end; start++ {
if err := common.Band.EnableUplinkChannel(start); err != nil {
errors.Wrap(err, "enable uplink channel error")
}
}
}
return nil
}
func setRedisPool(c *cli.Context) error {
log.WithField("url", c.String("redis-url")).Info("setup redis connection pool")
common.RedisPool = common.NewRedisPool(c.String("redis-url"))
return nil
}
func setPostgreSQLConnection(c *cli.Context) error {
log.Info("connecting to postgresql")
db, err := common.OpenDatabase(c.String("postgres-dsn"))
if err != nil {
return errors.Wrap(err, "database connection error")
}
common.DB = db
return nil
}
func setGatewayBackend(c *cli.Context) error {
gw, err := gwBackend.NewBackend(c.String("gw-mqtt-server"), c.String("gw-mqtt-username"), c.String("gw-mqtt-password"), c.String("gw-mqtt-ca-cert"))
if err != nil {
return errors.Wrap(err, "gateway-backend setup failed")
}
common.Gateway = gw
return nil
}
func setApplicationServer(c *cli.Context) error {
log.WithFields(log.Fields{
"server": c.String("as-server"),
"ca-cert": c.String("as-ca-cert"),
"tls-cert": c.String("as-tls-cert"),
"tls-key": c.String("as-tls-key"),
}).Info("connecting to application-server")
var asDialOptions []grpc.DialOption
if c.String("as-tls-cert") != "" && c.String("as-tls-key") != "" {
asDialOptions = append(asDialOptions, grpc.WithTransportCredentials(
mustGetTransportCredentials(c.String("as-tls-cert"), c.String("as-tls-key"), c.String("as-ca-cert"), false),
))
} else {
asDialOptions = append(asDialOptions, grpc.WithInsecure())
}
asConn, err := grpc.Dial(c.String("as-server"), asDialOptions...)
if err != nil {
return errors.Wrap(err, "application-server dial error")
}
common.Application = as.NewApplicationServerClient(asConn)
return nil
}
func setNetworkController(c *cli.Context) error {
var ncClient nc.NetworkControllerClient
if c.String("nc-server") != "" {
// setup network-controller client
log.WithFields(log.Fields{
"server": c.String("nc-server"),
"ca-cert": c.String("nc-ca-cert"),
"tls-cert": c.String("nc-tls-cert"),
"tls-key": c.String("nc-tls-key"),
}).Info("connecting to network-controller")
var ncDialOptions []grpc.DialOption
if c.String("nc-tls-cert") != "" && c.String("nc-tls-key") != "" {
ncDialOptions = append(ncDialOptions, grpc.WithTransportCredentials(
mustGetTransportCredentials(c.String("nc-tls-cert"), c.String("nc-tls-key"), c.String("nc-ca-cert"), false),
))
} else {
ncDialOptions = append(ncDialOptions, grpc.WithInsecure())
}
ncConn, err := grpc.Dial(c.String("nc-server"), ncDialOptions...)
if err != nil {
return errors.Wrap(err, "network-controller dial error")
}
ncClient = nc.NewNetworkControllerClient(ncConn)
} else {
log.Info("no network-controller configured")
ncClient = &controller.NopNetworkControllerClient{}
}
common.Controller = ncClient
return nil
}
func runDatabaseMigrations(c *cli.Context) error {
if c.Bool("db-automigrate") {
log.Info("applying database migrations")
m := &migrate.AssetMigrationSource{
Asset: migrations.Asset,
AssetDir: migrations.AssetDir,
Dir: "",
}
n, err := migrate.Exec(common.DB.DB, "postgres", m, migrate.Up)
if err != nil {
return errors.Wrap(err, "applying migrations failed")
}
log.WithField("count", n).Info("migrations applied")
}
return nil
}
func startAPIServer(c *cli.Context) error {
log.WithFields(log.Fields{
"bind": c.String("bind"),
"ca-cert": c.String("ca-cert"),
"tls-cert": c.String("tls-cert"),
"tls-key": c.String("tls-key"),
}).Info("starting api server")
var opts []grpc.ServerOption
if c.String("tls-cert") != "" && c.String("tls-key") != "" {
creds := mustGetTransportCredentials(c.String("tls-cert"), c.String("tls-key"), c.String("ca-cert"), false)
opts = append(opts, grpc.Creds(creds))
}
gs := grpc.NewServer(opts...)
nsAPI := api.NewNetworkServerAPI()
ns.RegisterNetworkServerServer(gs, nsAPI)
ln, err := net.Listen("tcp", c.String("bind"))
if err != nil {
return errors.Wrap(err, "start api listener error")
}
go gs.Serve(ln)
return nil
}
func startGatewayAPIServer(c *cli.Context) error {
log.WithFields(log.Fields{
"bind": c.String("gw-server-bind | {
var netID lorawan.NetID
if err := netID.UnmarshalText([]byte(c.String("net-id"))); err != nil {
return errors.Wrap(err, "NetID parse error")
}
common.NetID = netID
return nil
} | identifier_body | |
main.go | add channel error")
}
}
common.Band = bandConfig
common.BandName = band.Name(c.String("band"))
return nil
}
func setDeduplicationDelay(c *cli.Context) error {
common.DeduplicationDelay = c.Duration("deduplication-delay")
return nil
}
func setGetDownlinkDataDelay(c *cli.Context) error {
common.GetDownlinkDataDelay = c.Duration("get-downlink-data-delay")
return nil
}
func setCreateGatewayOnStats(c *cli.Context) error {
common.CreateGatewayOnStats = c.Bool("gw-create-on-stats")
return nil
}
func setNodeSessionTTL(c *cli.Context) error {
common.NodeSessionTTL = c.Duration("node-session-ttl")
return nil
}
func setLogNodeFrames(c *cli.Context) error {
common.LogNodeFrames = c.Bool("log-node-frames")
return nil
}
func setGatewayServerJWTSecret(c *cli.Context) error {
common.GatewayServerJWTSecret = c.String("gw-server-jwt-secret")
return nil
}
func setStatsAggregationIntervals(c *cli.Context) error {
// get the gw stats aggregation intervals
gateway.MustSetStatsAggregationIntervals(strings.Split(c.String("gw-stats-aggregation-intervals"), ","))
return nil
}
func setTimezone(c *cli.Context) error {
// get the timezone
if c.String("timezone") != "" {
l, err := time.LoadLocation(c.String("timezone"))
if err != nil {
return errors.Wrap(err, "load timezone location error")
}
common.TimeLocation = l
}
return nil
}
func printStartMessage(c *cli.Context) error {
log.WithFields(log.Fields{
"version": version,
"net_id": common.NetID.String(),
"band": c.String("band"),
"docs": "https://docs.loraserver.io/",
}).Info("starting LoRa Server")
return nil
}
func enableUplinkChannels(c *cli.Context) error {
if c.String("enable-uplink-channels") == "" {
return nil
}
log.Info("disabling all channels")
for _, c := range common.Band.GetEnabledUplinkChannels() {
if err := common.Band.DisableUplinkChannel(c); err != nil {
return errors.Wrap(err, "disable uplink channel error")
}
}
blocks := strings.Split(c.String("enable-uplink-channels"), ",")
for _, block := range blocks {
block = strings.Trim(block, " ")
var start, end int
if _, err := fmt.Sscanf(block, "%d-%d", &start, &end); err != nil {
if _, err := fmt.Sscanf(block, "%d", &start); err != nil {
return errors.Wrap(err, "parse channel range error")
}
end = start
}
log.WithFields(log.Fields{
"first_channel": start,
"last_channel": end,
}).Info("enabling channel block")
for ; start <= end; start++ {
if err := common.Band.EnableUplinkChannel(start); err != nil {
errors.Wrap(err, "enable uplink channel error")
}
}
}
return nil
}
func setRedisPool(c *cli.Context) error {
log.WithField("url", c.String("redis-url")).Info("setup redis connection pool")
common.RedisPool = common.NewRedisPool(c.String("redis-url"))
return nil
}
func | (c *cli.Context) error {
log.Info("connecting to postgresql")
db, err := common.OpenDatabase(c.String("postgres-dsn"))
if err != nil {
return errors.Wrap(err, "database connection error")
}
common.DB = db
return nil
}
func setGatewayBackend(c *cli.Context) error {
gw, err := gwBackend.NewBackend(c.String("gw-mqtt-server"), c.String("gw-mqtt-username"), c.String("gw-mqtt-password"), c.String("gw-mqtt-ca-cert"))
if err != nil {
return errors.Wrap(err, "gateway-backend setup failed")
}
common.Gateway = gw
return nil
}
func setApplicationServer(c *cli.Context) error {
log.WithFields(log.Fields{
"server": c.String("as-server"),
"ca-cert": c.String("as-ca-cert"),
"tls-cert": c.String("as-tls-cert"),
"tls-key": c.String("as-tls-key"),
}).Info("connecting to application-server")
var asDialOptions []grpc.DialOption
if c.String("as-tls-cert") != "" && c.String("as-tls-key") != "" {
asDialOptions = append(asDialOptions, grpc.WithTransportCredentials(
mustGetTransportCredentials(c.String("as-tls-cert"), c.String("as-tls-key"), c.String("as-ca-cert"), false),
))
} else {
asDialOptions = append(asDialOptions, grpc.WithInsecure())
}
asConn, err := grpc.Dial(c.String("as-server"), asDialOptions...)
if err != nil {
return errors.Wrap(err, "application-server dial error")
}
common.Application = as.NewApplicationServerClient(asConn)
return nil
}
func setNetworkController(c *cli.Context) error {
var ncClient nc.NetworkControllerClient
if c.String("nc-server") != "" {
// setup network-controller client
log.WithFields(log.Fields{
"server": c.String("nc-server"),
"ca-cert": c.String("nc-ca-cert"),
"tls-cert": c.String("nc-tls-cert"),
"tls-key": c.String("nc-tls-key"),
}).Info("connecting to network-controller")
var ncDialOptions []grpc.DialOption
if c.String("nc-tls-cert") != "" && c.String("nc-tls-key") != "" {
ncDialOptions = append(ncDialOptions, grpc.WithTransportCredentials(
mustGetTransportCredentials(c.String("nc-tls-cert"), c.String("nc-tls-key"), c.String("nc-ca-cert"), false),
))
} else {
ncDialOptions = append(ncDialOptions, grpc.WithInsecure())
}
ncConn, err := grpc.Dial(c.String("nc-server"), ncDialOptions...)
if err != nil {
return errors.Wrap(err, "network-controller dial error")
}
ncClient = nc.NewNetworkControllerClient(ncConn)
} else {
log.Info("no network-controller configured")
ncClient = &controller.NopNetworkControllerClient{}
}
common.Controller = ncClient
return nil
}
func runDatabaseMigrations(c *cli.Context) error {
if c.Bool("db-automigrate") {
log.Info("applying database migrations")
m := &migrate.AssetMigrationSource{
Asset: migrations.Asset,
AssetDir: migrations.AssetDir,
Dir: "",
}
n, err := migrate.Exec(common.DB.DB, "postgres", m, migrate.Up)
if err != nil {
return errors.Wrap(err, "applying migrations failed")
}
log.WithField("count", n).Info("migrations applied")
}
return nil
}
func startAPIServer(c *cli.Context) error {
log.WithFields(log.Fields{
"bind": c.String("bind"),
"ca-cert": c.String("ca-cert"),
"tls-cert": c.String("tls-cert"),
"tls-key": c.String("tls-key"),
}).Info("starting api server")
var opts []grpc.ServerOption
if c.String("tls-cert") != "" && c.String("tls-key") != "" {
creds := mustGetTransportCredentials(c.String("tls-cert"), c.String("tls-key"), c.String("ca-cert"), false)
opts = append(opts, grpc.Creds(creds))
}
gs := grpc.NewServer(opts...)
nsAPI := api.NewNetworkServerAPI()
ns.RegisterNetworkServerServer(gs, nsAPI)
ln, err := net.Listen("tcp", c.String("bind"))
if err != nil {
return errors.Wrap(err, "start api listener error")
}
go gs.Serve(ln)
return nil
}
func startGatewayAPIServer(c *cli.Context) error {
log.WithFields(log.Fields{
"bind": c.String("gw-server-bind"),
"ca-cert": c.String("gw-server-ca-cert"),
"tls-cert": c.String("gw-server-tls-cert"),
"tls-key": c.String("gw-server-tls-key"),
}).Info("starting gateway api server")
var validator auth.Validator
if c.String("gw-server-jwt-secret") != "" {
validator = auth.NewJWTValidator("HS256", c.String("gw-server-jwt-secret"))
} else {
return errors.New("--gw-server-jwt-secret must be set")
}
var opts []grpc.ServerOption
if c.String("gw-server-tls-cert") != "" && c.String("gw-server-tls-key") != "" {
creds := mustGetTransportCredentials(c.String("gw-server-tls-cert"), c.String("gw-server-tls-key"), c.String("gw-server-ca-cert"), false)
opts = append(opts, grpc.Creds(creds))
}
gs := grpc.NewServer(opts...)
gwAPI := api.NewGatewayAPI(validator)
gw.RegisterGatewayServer | setPostgreSQLConnection | identifier_name |
SVD_getMatrixCompletion.py | gram
return " ".join(new_tokens)
def iter_folder(folder, extension, ngrams=[1]):
for subdir, dirs, files in os.walk(folder):
for file in sorted(files):
if not file.endswith(extension): continue
print file
document = open(file).readlines()
for line in document:
line = ProcessLine(line, ngrams)
#print line
# break document into utf8 tokens
yield gensim.utils.tokenize(line, lower=True, errors='ignore')
def iter_documents(outdir, types, sheets = range(0,25), np='syntax', ngrams=[1]):
"""
Generator: iterate over all relevant documents, yielding one
document (=list of utf8 tokens) at a time.
"""
print "types:", types
# find all .txt documents, no matter how deep under top_directory
for i, sheet in enumerate(sheets):
week = i + 1
dir = outdir + str(week) + '/'
for question in types:
prefix = dir + question + "." + np
filename = prefix + phraseext
if not fio.IsExist(filename): continue
document = open(prefix + phraseext).readlines()
for line in document:
line = ProcessLine(line,ngrams)
#print line
# break document into utf8 tokens
yield gensim.utils.tokenize(line, lower=True, errors='ignore')
def readbook(path, ngrams=[1]):
document = open(path).readlines()
for line in document:
line = re.sub( '\s+', ' ', line).strip()
if len(line) == 0: continue
line = ProcessLine(line, ngrams)
# break document into utf8 tokens
yield gensim.utils.tokenize(line, lower=True, errors='ignore')
class TxtSubdirsCorpus(object):
"""
Iterable: on each iteration, return bag-of-words vectors,
one vector for each document.
Process one document at a time using generators, never
load the entire corpus into RAM.
"""
def __init__(self, top_dir, types=['POI', 'MP', 'LP'], sheets = range(0,25), np='syntax', ngrams=[1]):
self.types = types
self.top_dir = top_dir
self.np = np
self.ngrams = ngrams
self.sheets = sheets
# create dictionary = mapping for documents => sparse vectors
self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, types, sheets, np, ngrams))
def __iter__(self):
"""
Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable.
"""
for tokens in iter_documents(self.top_dir, self.types, self.sheets, self.np, self.ngrams):
# transform tokens (strings) into a sparse vector, one at a time
yield self.dictionary.doc2bow(tokens)
class TacCorpus(object):
def __init__(self, top_dir, ngrams=[1]):
self.top_dir = top_dir
self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, ngrams))
def __iter__(self):
"""
Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable.
"""
for tokens in iter_folder(self.top_dir, self.ngrams):
# transform tokens (strings) into a sparse vector, one at a time
yield self.dictionary.doc2bow(tokens)
class BookCorpus(object):
"""
Iterable: on each iteration, return bag-of-words vectors,
one vector for each document.
Process one document at a time using generators, never
load the entire corpus into RAM.
"""
def __init__(self, path, ngrams=[1]):
self.path = path
self.ngrams = ngrams
# create dictionary = mapping for documents => sparse vectors
self.dictionary = gensim.corpora.Dictionary(readbook(path, ngrams))
def __iter__(self):
"""
Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable.
"""
for tokens in readbook(self.path, self.ngrams):
# transform tokens (strings) into a sparse vector, one at a time
yield self.dictionary.doc2bow(tokens)
def SaveCSC2(csc, filename):
s = csc.shape
m = s[0]
n = s[1]
body = []
for i in range(m):
row = []
for j in range(n):
row.append(csc[i, j])
body.append(row)
fio.WriteMatrix(filename, body, header=None)
def SaveCSC(csc, filename):
A = csc.toarray()
s = csc.shape
m = s[0]
n = s[1]
data = []
for i in range(m):
row = []
for j in range(n):
x = A[i][j]
if x != 0:
row.append([j, A[i][j]])
data.append(row)
with open(filename, 'w') as fin:
json.dump(data, fin, indent = 2)
def SaveSparseMatrix(A, filename):
m = len(A)
n = len(A[0])
data = []
for i in range(m):
row = []
for j in range(n):
x = A[i][j]
if x != 0:
row.append([j, A[i][j]])
data.append(row)
with open(filename, 'w') as fin:
json.dump(data, fin, indent = 2)
def SaveNewA(A, dict, path, ngrams, prefixname="", sheets = range(0,25), np='sentence', types=['POI', 'MP', 'LP']):
TotoalLine = 0
for i in sheets:
week = i + 1
dir = path + str(week) + '/'
for type in types:
prefix = dir + type + "." + np
print prefix
if not fio.IsExist(prefix + phraseext):
print prefix + phraseext
continue
document = open(prefix + phraseext).readlines()
LineRange = range(TotoalLine, TotoalLine + len(document))
TotoalLine = TotoalLine + len(document)
Bigrams = []
for line in document:
line = ProcessLine(line, ngrams)
tokens = list(gensim.utils.tokenize(line, lower=True, errors='ignore'))
Bigrams = Bigrams + tokens
PartA = {}
for bigram in set(Bigrams):
if bigram not in dict:
print "error", bigram
id = dict[bigram]
row = A[id]
PartA[bigram] = [row[x] for x in LineRange]
svdAname = dir + type + '.' +prefixname + '.softA'
print svdAname
with open(svdAname, 'w') as fout:
json.dump(PartA, fout, indent=2)
def ToBinary(csc):
A = csc.toarray()
s = csc.shape
m = s[0]
n = s[1]
m = len(A)
n = len(A[0])
for i in range(m):
row = []
for j in range(n):
if A[i][j] >= 1:
A[i][j] = 1
return A
def CheckBinary(A):
m = len(A)
n = len(A[0])
for i in range(m):
|
return True
def getSVD(prefix, np, corpusname, ngrams, rank_max, softImpute_lambda, binary_matrix, output, types = ['POI', 'MP', 'LP']):
#types = ['POI', 'MP', 'LP']
path = prefix
sheets = range(0,26)
dictname = output + "_".join(types) + '_' + corpusname + corpusdictexe
# # that's it! the streamed corpus of sparse vectors is ready
# if corpusname=='book':
# corpus = BookCorpus(np, ngrams)
# elif corpusname == 'tac':
# corpus = TacCorpus(prefix, ngrams)
# dictname = path + '_' + corpusname + corpusdictexe
# else:
# corpus = TxtSubdirsCorpus(prefix, types, sheets, np, ngrams)
#
# fio.SaveDict2Json(corpus.dictionary.token2id, dictname)
#
# # or run truncated Singular Value Decomposition (SVD) on the streamed corpus
# #from gensim.models.lsimodel import stochastic_svd as svd
# #u, s = svd(corpus, rank=300, num_terms=len(corpus.dictionary), chunksize=5000)
#
# #https://pypi.python.org/pypi/sparsesvd/
# scipy_csc_matrix = gensim.mat | row = []
for j in range(n):
if A[i][j] != 0 and A[i][j] != 1: return False | conditional_block |
SVD_getMatrixCompletion.py | gram
return " ".join(new_tokens)
def iter_folder(folder, extension, ngrams=[1]):
for subdir, dirs, files in os.walk(folder):
for file in sorted(files):
if not file.endswith(extension): continue
print file
document = open(file).readlines()
for line in document:
line = ProcessLine(line, ngrams)
#print line
# break document into utf8 tokens
yield gensim.utils.tokenize(line, lower=True, errors='ignore')
def iter_documents(outdir, types, sheets = range(0,25), np='syntax', ngrams=[1]):
"""
Generator: iterate over all relevant documents, yielding one
document (=list of utf8 tokens) at a time.
"""
print "types:", types
# find all .txt documents, no matter how deep under top_directory
for i, sheet in enumerate(sheets):
week = i + 1
dir = outdir + str(week) + '/'
for question in types:
prefix = dir + question + "." + np
filename = prefix + phraseext
if not fio.IsExist(filename): continue
document = open(prefix + phraseext).readlines()
for line in document:
line = ProcessLine(line,ngrams)
#print line
# break document into utf8 tokens
yield gensim.utils.tokenize(line, lower=True, errors='ignore')
def readbook(path, ngrams=[1]):
document = open(path).readlines()
for line in document:
line = re.sub( '\s+', ' ', line).strip()
if len(line) == 0: continue
line = ProcessLine(line, ngrams)
# break document into utf8 tokens
yield gensim.utils.tokenize(line, lower=True, errors='ignore')
class TxtSubdirsCorpus(object):
"""
Iterable: on each iteration, return bag-of-words vectors,
one vector for each document. | Process one document at a time using generators, never
load the entire corpus into RAM.
"""
def __init__(self, top_dir, types=['POI', 'MP', 'LP'], sheets = range(0,25), np='syntax', ngrams=[1]):
self.types = types
self.top_dir = top_dir
self.np = np
self.ngrams = ngrams
self.sheets = sheets
# create dictionary = mapping for documents => sparse vectors
self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, types, sheets, np, ngrams))
def __iter__(self):
"""
Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable.
"""
for tokens in iter_documents(self.top_dir, self.types, self.sheets, self.np, self.ngrams):
# transform tokens (strings) into a sparse vector, one at a time
yield self.dictionary.doc2bow(tokens)
class TacCorpus(object):
def __init__(self, top_dir, ngrams=[1]):
self.top_dir = top_dir
self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, ngrams))
def __iter__(self):
"""
Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable.
"""
for tokens in iter_folder(self.top_dir, self.ngrams):
# transform tokens (strings) into a sparse vector, one at a time
yield self.dictionary.doc2bow(tokens)
class BookCorpus(object):
"""
Iterable: on each iteration, return bag-of-words vectors,
one vector for each document.
Process one document at a time using generators, never
load the entire corpus into RAM.
"""
def __init__(self, path, ngrams=[1]):
self.path = path
self.ngrams = ngrams
# create dictionary = mapping for documents => sparse vectors
self.dictionary = gensim.corpora.Dictionary(readbook(path, ngrams))
def __iter__(self):
"""
Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable.
"""
for tokens in readbook(self.path, self.ngrams):
# transform tokens (strings) into a sparse vector, one at a time
yield self.dictionary.doc2bow(tokens)
def SaveCSC2(csc, filename):
s = csc.shape
m = s[0]
n = s[1]
body = []
for i in range(m):
row = []
for j in range(n):
row.append(csc[i, j])
body.append(row)
fio.WriteMatrix(filename, body, header=None)
def SaveCSC(csc, filename):
A = csc.toarray()
s = csc.shape
m = s[0]
n = s[1]
data = []
for i in range(m):
row = []
for j in range(n):
x = A[i][j]
if x != 0:
row.append([j, A[i][j]])
data.append(row)
with open(filename, 'w') as fin:
json.dump(data, fin, indent = 2)
def SaveSparseMatrix(A, filename):
m = len(A)
n = len(A[0])
data = []
for i in range(m):
row = []
for j in range(n):
x = A[i][j]
if x != 0:
row.append([j, A[i][j]])
data.append(row)
with open(filename, 'w') as fin:
json.dump(data, fin, indent = 2)
def SaveNewA(A, dict, path, ngrams, prefixname="", sheets = range(0,25), np='sentence', types=['POI', 'MP', 'LP']):
TotoalLine = 0
for i in sheets:
week = i + 1
dir = path + str(week) + '/'
for type in types:
prefix = dir + type + "." + np
print prefix
if not fio.IsExist(prefix + phraseext):
print prefix + phraseext
continue
document = open(prefix + phraseext).readlines()
LineRange = range(TotoalLine, TotoalLine + len(document))
TotoalLine = TotoalLine + len(document)
Bigrams = []
for line in document:
line = ProcessLine(line, ngrams)
tokens = list(gensim.utils.tokenize(line, lower=True, errors='ignore'))
Bigrams = Bigrams + tokens
PartA = {}
for bigram in set(Bigrams):
if bigram not in dict:
print "error", bigram
id = dict[bigram]
row = A[id]
PartA[bigram] = [row[x] for x in LineRange]
svdAname = dir + type + '.' +prefixname + '.softA'
print svdAname
with open(svdAname, 'w') as fout:
json.dump(PartA, fout, indent=2)
def ToBinary(csc):
A = csc.toarray()
s = csc.shape
m = s[0]
n = s[1]
m = len(A)
n = len(A[0])
for i in range(m):
row = []
for j in range(n):
if A[i][j] >= 1:
A[i][j] = 1
return A
def CheckBinary(A):
m = len(A)
n = len(A[0])
for i in range(m):
row = []
for j in range(n):
if A[i][j] != 0 and A[i][j] != 1: return False
return True
def getSVD(prefix, np, corpusname, ngrams, rank_max, softImpute_lambda, binary_matrix, output, types = ['POI', 'MP', 'LP']):
#types = ['POI', 'MP', 'LP']
path = prefix
sheets = range(0,26)
dictname = output + "_".join(types) + '_' + corpusname + corpusdictexe
# # that's it! the streamed corpus of sparse vectors is ready
# if corpusname=='book':
# corpus = BookCorpus(np, ngrams)
# elif corpusname == 'tac':
# corpus = TacCorpus(prefix, ngrams)
# dictname = path + '_' + corpusname + corpusdictexe
# else:
# corpus = TxtSubdirsCorpus(prefix, types, sheets, np, ngrams)
#
# fio.SaveDict2Json(corpus.dictionary.token2id, dictname)
#
# # or run truncated Singular Value Decomposition (SVD) on the streamed corpus
# #from gensim.models.lsimodel import stochastic_svd as svd
# #u, s = svd(corpus, rank=300, num_terms=len(corpus.dictionary), chunksize=5000)
#
# #https://pypi.python.org/pypi/sparsesvd/
# scipy_csc_matrix = gensim.mat | random_line_split | |
SVD_getMatrixCompletion.py | gram
return " ".join(new_tokens)
def iter_folder(folder, extension, ngrams=[1]):
for subdir, dirs, files in os.walk(folder):
for file in sorted(files):
if not file.endswith(extension): continue
print file
document = open(file).readlines()
for line in document:
line = ProcessLine(line, ngrams)
#print line
# break document into utf8 tokens
yield gensim.utils.tokenize(line, lower=True, errors='ignore')
def iter_documents(outdir, types, sheets = range(0,25), np='syntax', ngrams=[1]):
"""
Generator: iterate over all relevant documents, yielding one
document (=list of utf8 tokens) at a time.
"""
print "types:", types
# find all .txt documents, no matter how deep under top_directory
for i, sheet in enumerate(sheets):
week = i + 1
dir = outdir + str(week) + '/'
for question in types:
prefix = dir + question + "." + np
filename = prefix + phraseext
if not fio.IsExist(filename): continue
document = open(prefix + phraseext).readlines()
for line in document:
line = ProcessLine(line,ngrams)
#print line
# break document into utf8 tokens
yield gensim.utils.tokenize(line, lower=True, errors='ignore')
def readbook(path, ngrams=[1]):
|
class TxtSubdirsCorpus(object):
"""
Iterable: on each iteration, return bag-of-words vectors,
one vector for each document.
Process one document at a time using generators, never
load the entire corpus into RAM.
"""
def __init__(self, top_dir, types=['POI', 'MP', 'LP'], sheets = range(0,25), np='syntax', ngrams=[1]):
self.types = types
self.top_dir = top_dir
self.np = np
self.ngrams = ngrams
self.sheets = sheets
# create dictionary = mapping for documents => sparse vectors
self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, types, sheets, np, ngrams))
def __iter__(self):
"""
Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable.
"""
for tokens in iter_documents(self.top_dir, self.types, self.sheets, self.np, self.ngrams):
# transform tokens (strings) into a sparse vector, one at a time
yield self.dictionary.doc2bow(tokens)
class TacCorpus(object):
def __init__(self, top_dir, ngrams=[1]):
self.top_dir = top_dir
self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, ngrams))
def __iter__(self):
"""
Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable.
"""
for tokens in iter_folder(self.top_dir, self.ngrams):
# transform tokens (strings) into a sparse vector, one at a time
yield self.dictionary.doc2bow(tokens)
class BookCorpus(object):
"""
Iterable: on each iteration, return bag-of-words vectors,
one vector for each document.
Process one document at a time using generators, never
load the entire corpus into RAM.
"""
def __init__(self, path, ngrams=[1]):
self.path = path
self.ngrams = ngrams
# create dictionary = mapping for documents => sparse vectors
self.dictionary = gensim.corpora.Dictionary(readbook(path, ngrams))
def __iter__(self):
"""
Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable.
"""
for tokens in readbook(self.path, self.ngrams):
# transform tokens (strings) into a sparse vector, one at a time
yield self.dictionary.doc2bow(tokens)
def SaveCSC2(csc, filename):
s = csc.shape
m = s[0]
n = s[1]
body = []
for i in range(m):
row = []
for j in range(n):
row.append(csc[i, j])
body.append(row)
fio.WriteMatrix(filename, body, header=None)
def SaveCSC(csc, filename):
A = csc.toarray()
s = csc.shape
m = s[0]
n = s[1]
data = []
for i in range(m):
row = []
for j in range(n):
x = A[i][j]
if x != 0:
row.append([j, A[i][j]])
data.append(row)
with open(filename, 'w') as fin:
json.dump(data, fin, indent = 2)
def SaveSparseMatrix(A, filename):
m = len(A)
n = len(A[0])
data = []
for i in range(m):
row = []
for j in range(n):
x = A[i][j]
if x != 0:
row.append([j, A[i][j]])
data.append(row)
with open(filename, 'w') as fin:
json.dump(data, fin, indent = 2)
def SaveNewA(A, dict, path, ngrams, prefixname="", sheets = range(0,25), np='sentence', types=['POI', 'MP', 'LP']):
TotoalLine = 0
for i in sheets:
week = i + 1
dir = path + str(week) + '/'
for type in types:
prefix = dir + type + "." + np
print prefix
if not fio.IsExist(prefix + phraseext):
print prefix + phraseext
continue
document = open(prefix + phraseext).readlines()
LineRange = range(TotoalLine, TotoalLine + len(document))
TotoalLine = TotoalLine + len(document)
Bigrams = []
for line in document:
line = ProcessLine(line, ngrams)
tokens = list(gensim.utils.tokenize(line, lower=True, errors='ignore'))
Bigrams = Bigrams + tokens
PartA = {}
for bigram in set(Bigrams):
if bigram not in dict:
print "error", bigram
id = dict[bigram]
row = A[id]
PartA[bigram] = [row[x] for x in LineRange]
svdAname = dir + type + '.' +prefixname + '.softA'
print svdAname
with open(svdAname, 'w') as fout:
json.dump(PartA, fout, indent=2)
def ToBinary(csc):
A = csc.toarray()
s = csc.shape
m = s[0]
n = s[1]
m = len(A)
n = len(A[0])
for i in range(m):
row = []
for j in range(n):
if A[i][j] >= 1:
A[i][j] = 1
return A
def CheckBinary(A):
m = len(A)
n = len(A[0])
for i in range(m):
row = []
for j in range(n):
if A[i][j] != 0 and A[i][j] != 1: return False
return True
def getSVD(prefix, np, corpusname, ngrams, rank_max, softImpute_lambda, binary_matrix, output, types = ['POI', 'MP', 'LP']):
#types = ['POI', 'MP', 'LP']
path = prefix
sheets = range(0,26)
dictname = output + "_".join(types) + '_' + corpusname + corpusdictexe
# # that's it! the streamed corpus of sparse vectors is ready
# if corpusname=='book':
# corpus = BookCorpus(np, ngrams)
# elif corpusname == 'tac':
# corpus = TacCorpus(prefix, ngrams)
# dictname = path + '_' + corpusname + corpusdictexe
# else:
# corpus = TxtSubdirsCorpus(prefix, types, sheets, np, ngrams)
#
# fio.SaveDict2Json(corpus.dictionary.token2id, dictname)
#
# # or run truncated Singular Value Decomposition (SVD) on the streamed corpus
# #from gensim.models.lsimodel import stochastic_svd as svd
# #u, s = svd(corpus, rank=300, num_terms=len(corpus.dictionary), chunksize=5000)
#
# #https://pypi.python.org/pypi/sparsesvd/
# scipy_csc_matrix = gensim.mat | document = open(path).readlines()
for line in document:
line = re.sub( '\s+', ' ', line).strip()
if len(line) == 0: continue
line = ProcessLine(line, ngrams)
# break document into utf8 tokens
yield gensim.utils.tokenize(line, lower=True, errors='ignore') | identifier_body |
SVD_getMatrixCompletion.py | gram
return " ".join(new_tokens)
def iter_folder(folder, extension, ngrams=[1]):
for subdir, dirs, files in os.walk(folder):
for file in sorted(files):
if not file.endswith(extension): continue
print file
document = open(file).readlines()
for line in document:
line = ProcessLine(line, ngrams)
#print line
# break document into utf8 tokens
yield gensim.utils.tokenize(line, lower=True, errors='ignore')
def iter_documents(outdir, types, sheets = range(0,25), np='syntax', ngrams=[1]):
"""
Generator: iterate over all relevant documents, yielding one
document (=list of utf8 tokens) at a time.
"""
print "types:", types
# find all .txt documents, no matter how deep under top_directory
for i, sheet in enumerate(sheets):
week = i + 1
dir = outdir + str(week) + '/'
for question in types:
prefix = dir + question + "." + np
filename = prefix + phraseext
if not fio.IsExist(filename): continue
document = open(prefix + phraseext).readlines()
for line in document:
line = ProcessLine(line,ngrams)
#print line
# break document into utf8 tokens
yield gensim.utils.tokenize(line, lower=True, errors='ignore')
def readbook(path, ngrams=[1]):
document = open(path).readlines()
for line in document:
line = re.sub( '\s+', ' ', line).strip()
if len(line) == 0: continue
line = ProcessLine(line, ngrams)
# break document into utf8 tokens
yield gensim.utils.tokenize(line, lower=True, errors='ignore')
class TxtSubdirsCorpus(object):
"""
Iterable: on each iteration, return bag-of-words vectors,
one vector for each document.
Process one document at a time using generators, never
load the entire corpus into RAM.
"""
def __init__(self, top_dir, types=['POI', 'MP', 'LP'], sheets = range(0,25), np='syntax', ngrams=[1]):
self.types = types
self.top_dir = top_dir
self.np = np
self.ngrams = ngrams
self.sheets = sheets
# create dictionary = mapping for documents => sparse vectors
self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, types, sheets, np, ngrams))
def __iter__(self):
"""
Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable.
"""
for tokens in iter_documents(self.top_dir, self.types, self.sheets, self.np, self.ngrams):
# transform tokens (strings) into a sparse vector, one at a time
yield self.dictionary.doc2bow(tokens)
class TacCorpus(object):
def __init__(self, top_dir, ngrams=[1]):
self.top_dir = top_dir
self.dictionary = gensim.corpora.Dictionary(iter_documents(top_dir, ngrams))
def __iter__(self):
"""
Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable.
"""
for tokens in iter_folder(self.top_dir, self.ngrams):
# transform tokens (strings) into a sparse vector, one at a time
yield self.dictionary.doc2bow(tokens)
class BookCorpus(object):
"""
Iterable: on each iteration, return bag-of-words vectors,
one vector for each document.
Process one document at a time using generators, never
load the entire corpus into RAM.
"""
def __init__(self, path, ngrams=[1]):
self.path = path
self.ngrams = ngrams
# create dictionary = mapping for documents => sparse vectors
self.dictionary = gensim.corpora.Dictionary(readbook(path, ngrams))
def __iter__(self):
"""
Again, __iter__ is a generator => TxtSubdirsCorpus is a streamed iterable.
"""
for tokens in readbook(self.path, self.ngrams):
# transform tokens (strings) into a sparse vector, one at a time
yield self.dictionary.doc2bow(tokens)
def SaveCSC2(csc, filename):
s = csc.shape
m = s[0]
n = s[1]
body = []
for i in range(m):
row = []
for j in range(n):
row.append(csc[i, j])
body.append(row)
fio.WriteMatrix(filename, body, header=None)
def SaveCSC(csc, filename):
A = csc.toarray()
s = csc.shape
m = s[0]
n = s[1]
data = []
for i in range(m):
row = []
for j in range(n):
x = A[i][j]
if x != 0:
row.append([j, A[i][j]])
data.append(row)
with open(filename, 'w') as fin:
json.dump(data, fin, indent = 2)
def SaveSparseMatrix(A, filename):
m = len(A)
n = len(A[0])
data = []
for i in range(m):
row = []
for j in range(n):
x = A[i][j]
if x != 0:
row.append([j, A[i][j]])
data.append(row)
with open(filename, 'w') as fin:
json.dump(data, fin, indent = 2)
def SaveNewA(A, dict, path, ngrams, prefixname="", sheets = range(0,25), np='sentence', types=['POI', 'MP', 'LP']):
TotoalLine = 0
for i in sheets:
week = i + 1
dir = path + str(week) + '/'
for type in types:
prefix = dir + type + "." + np
print prefix
if not fio.IsExist(prefix + phraseext):
print prefix + phraseext
continue
document = open(prefix + phraseext).readlines()
LineRange = range(TotoalLine, TotoalLine + len(document))
TotoalLine = TotoalLine + len(document)
Bigrams = []
for line in document:
line = ProcessLine(line, ngrams)
tokens = list(gensim.utils.tokenize(line, lower=True, errors='ignore'))
Bigrams = Bigrams + tokens
PartA = {}
for bigram in set(Bigrams):
if bigram not in dict:
print "error", bigram
id = dict[bigram]
row = A[id]
PartA[bigram] = [row[x] for x in LineRange]
svdAname = dir + type + '.' +prefixname + '.softA'
print svdAname
with open(svdAname, 'w') as fout:
json.dump(PartA, fout, indent=2)
def ToBinary(csc):
A = csc.toarray()
s = csc.shape
m = s[0]
n = s[1]
m = len(A)
n = len(A[0])
for i in range(m):
row = []
for j in range(n):
if A[i][j] >= 1:
A[i][j] = 1
return A
def CheckBinary(A):
m = len(A)
n = len(A[0])
for i in range(m):
row = []
for j in range(n):
if A[i][j] != 0 and A[i][j] != 1: return False
return True
def | (prefix, np, corpusname, ngrams, rank_max, softImpute_lambda, binary_matrix, output, types = ['POI', 'MP', 'LP']):
#types = ['POI', 'MP', 'LP']
path = prefix
sheets = range(0,26)
dictname = output + "_".join(types) + '_' + corpusname + corpusdictexe
# # that's it! the streamed corpus of sparse vectors is ready
# if corpusname=='book':
# corpus = BookCorpus(np, ngrams)
# elif corpusname == 'tac':
# corpus = TacCorpus(prefix, ngrams)
# dictname = path + '_' + corpusname + corpusdictexe
# else:
# corpus = TxtSubdirsCorpus(prefix, types, sheets, np, ngrams)
#
# fio.SaveDict2Json(corpus.dictionary.token2id, dictname)
#
# # or run truncated Singular Value Decomposition (SVD) on the streamed corpus
# #from gensim.models.lsimodel import stochastic_svd as svd
# #u, s = svd(corpus, rank=300, num_terms=len(corpus.dictionary), chunksize=5000)
#
# #https://pypi.python.org/pypi/sparsesvd/
# scipy_csc_matrix = gens | getSVD | identifier_name |
get.go |
return status, nil
}
// GetDiscoveredResources ... loops through all specified resourceTypes
// Lists all resources by Type (Will need to loop over all cfg.ResourceType* types)
// https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources
func (c *CfgSvc) GetDiscoveredResources() ([]*configservice.ResourceIdentifier, error) {
// List of resource types pulled from
// github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json
var resourceTypes = [...]string{
"AWS::AppStream::DirectoryConfig",
"AWS::AppStream::Application",
"AWS::AppFlow::Flow",
"AWS::ApiGateway::Stage",
"AWS::ApiGateway::RestApi",
"AWS::ApiGatewayV2::Stage",
"AWS::ApiGatewayV2::Api",
"AWS::Athena::WorkGroup",
"AWS::Athena::DataCatalog",
"AWS::CloudFront::Distribution",
"AWS::CloudFront::StreamingDistribution",
"AWS::CloudWatch::Alarm",
"AWS::CloudWatch::MetricStream",
"AWS::RUM::AppMonitor",
"AWS::Evidently::Project",
"AWS::CodeGuruReviewer::RepositoryAssociation",
"AWS::Connect::PhoneNumber",
"AWS::CustomerProfiles::Domain",
"AWS::Detective::Graph",
"AWS::DynamoDB::Table",
"AWS::EC2::Host",
"AWS::EC2::EIP",
"AWS::EC2::Instance",
"AWS::EC2::NetworkInterface",
"AWS::EC2::SecurityGroup",
"AWS::EC2::NatGateway",
"AWS::EC2::EgressOnlyInternetGateway",
"AWS::EC2::EC2Fleet",
"AWS::EC2::SpotFleet",
"AWS::EC2::PrefixList",
"AWS::EC2::FlowLog",
"AWS::EC2::TransitGateway",
"AWS::EC2::TransitGatewayAttachment",
"AWS::EC2::TransitGatewayRouteTable",
"AWS::EC2::VPCEndpoint",
"AWS::EC2::VPCEndpointService",
"AWS::EC2::VPCPeeringConnection",
"AWS::EC2::RegisteredHAInstance",
"AWS::EC2::SubnetRouteTableAssociation",
"AWS::EC2::LaunchTemplate",
"AWS::EC2::NetworkInsightsAccessScopeAnalysis",
"AWS::EC2::TrafficMirrorTarget",
"AWS::EC2::TrafficMirrorSession",
"AWS::EC2::DHCPOptions",
"AWS::EC2::IPAM",
"AWS::EC2::NetworkInsightsPath",
"AWS::EC2::TrafficMirrorFilter",
"AWS::EC2::Volume",
"AWS::ImageBuilder::ImagePipeline",
"AWS::ImageBuilder::DistributionConfiguration",
"AWS::ImageBuilder::InfrastructureConfiguration",
"AWS::ECR::Repository",
"AWS::ECR::RegistryPolicy",
"AWS::ECR::PullThroughCacheRule",
"AWS::ECR::PublicRepository",
"AWS::ECS::Cluster",
"AWS::ECS::TaskDefinition",
"AWS::ECS::Service",
"AWS::ECS::TaskSet",
"AWS::EFS::FileSystem",
"AWS::EFS::AccessPoint",
"AWS::EKS::Cluster",
"AWS::EKS::FargateProfile",
"AWS::EKS::IdentityProviderConfig",
"AWS::EKS::Addon",
"AWS::EMR::SecurityConfiguration",
"AWS::Events::EventBus",
"AWS::Events::ApiDestination",
"AWS::Events::Archive",
"AWS::Events::Endpoint",
"AWS::Events::Connection",
"AWS::Events::Rule",
"AWS::EC2::TrafficMirrorSession",
"AWS::EventSchemas::RegistryPolicy",
"AWS::EventSchemas::Discoverer",
"AWS::EventSchemas::Schema",
"AWS::Forecast::Dataset",
"AWS::FraudDetector::Label",
"AWS::FraudDetector::EntityType",
"AWS::FraudDetector::Variable",
"AWS::FraudDetector::Outcome",
"AWS::GuardDuty::Detector",
"AWS::GuardDuty::ThreatIntelSet",
"AWS::GuardDuty::IPSet",
"AWS::GuardDuty::Filter",
"AWS::HealthLake::FHIRDatastore",
"AWS::Cassandra::Keyspace",
"AWS::IVS::Channel",
"AWS::IVS::RecordingConfiguration",
"AWS::IVS::PlaybackKeyPair",
"AWS::Elasticsearch::Domain",
"AWS::OpenSearch::Domain",
"AWS::Elasticsearch::Domain",
"AWS::Pinpoint::ApplicationSettings",
"AWS::Pinpoint::Segment",
"AWS::Pinpoint::App",
"AWS::Pinpoint::Campaign",
"AWS::Pinpoint::InAppTemplate",
"AWS::QLDB::Ledger",
"AWS::Kinesis::Stream",
"AWS::Kinesis::StreamConsumer",
"AWS::KinesisAnalyticsV2::Application",
"AWS::KinesisFirehose::DeliveryStream",
"AWS::KinesisVideo::SignalingChannel",
"AWS::Lex::BotAlias",
"AWS::Lex::Bot",
"AWS::Lightsail::Disk",
"AWS::Lightsail::Certificate",
"AWS::Lightsail::Bucket",
"AWS::Lightsail::StaticIp",
"AWS::LookoutMetrics::Alert",
"AWS::LookoutVision::Project",
"AWS::AmazonMQ::Broker",
"AWS::MSK::Cluster",
"AWS::Redshift::Cluster",
"AWS::Redshift::ClusterParameterGroup",
"AWS::Redshift::ClusterSecurityGroup",
"AWS::Redshift::ScheduledAction",
"AWS::Redshift::ClusterSnapshot",
"AWS::Redshift::ClusterSubnetGroup",
"AWS::Redshift::EventSubscription",
"AWS::RDS::DBInstance",
"AWS::RDS::DBSecurityGroup",
"AWS::RDS::DBSnapshot",
"AWS::RDS::DBSubnetGroup",
"AWS::RDS::EventSubscription",
"AWS::RDS::DBCluster",
"AWS::RDS::DBClusterSnapshot",
"AWS::RDS::GlobalCluster",
"AWS::Route53::HostedZone",
"AWS::Route53::HealthCheck",
"AWS::Route53Resolver::ResolverEndpoint",
"AWS::Route53Resolver::ResolverRule",
"AWS::Route53Resolver::ResolverRuleAssociation",
"AWS::Route53Resolver::FirewallDomainList",
"AWS::AWS::Route53Resolver::FirewallRuleGroupAssociation",
"AWS::Route53RecoveryReadiness::Cell",
"AWS::Route53RecoveryReadiness::ReadinessCheck",
"AWS::Route53RecoveryReadiness::RecoveryGroup",
"AWS::Route53RecoveryControl::Cluster",
"AWS::Route53RecoveryControl::ControlPanel",
"AWS::Route53RecoveryControl::RoutingControl",
"AWS::Route53RecoveryControl::SafetyRule",
"AWS::Route53RecoveryReadiness::ResourceSet",
"AWS::SageMaker::CodeRepository",
"AWS::SageMaker::Domain",
"AWS::SageMaker::AppImageConfig",
"AWS::SageMaker::Image",
"AWS::SageMaker::Model",
"AWS::SageMaker::NotebookInstance",
"AWS::SageMaker::NotebookInstanceLifecycleConfig",
"AWS::SageMaker::EndpointConfig",
"AWS::SageMaker::Workteam",
"AWS::SES::ConfigurationSet",
"AWS::SES::ContactList",
"AWS::SES::Template",
"AWS::SES::ReceiptFilter",
"AWS::SES::ReceiptRuleSet",
"AWS::SNS::Topic",
"AWS::SQS::Queue",
"AWS::S3::Bucket",
"AWS::S3::AccountPublic | {
params.NextToken = result.NextToken
result, err = c.Client.DescribeConfigRuleEvaluationStatus(¶ms)
if err != nil {
return nil, err
}
status = append(status, result.ConfigRulesEvaluationStatus...)
} | conditional_block | |
get.go | ",
"AWS::EFS::AccessPoint",
"AWS::EKS::Cluster",
"AWS::EKS::FargateProfile",
"AWS::EKS::IdentityProviderConfig",
"AWS::EKS::Addon",
"AWS::EMR::SecurityConfiguration",
"AWS::Events::EventBus",
"AWS::Events::ApiDestination",
"AWS::Events::Archive",
"AWS::Events::Endpoint",
"AWS::Events::Connection",
"AWS::Events::Rule",
"AWS::EC2::TrafficMirrorSession",
"AWS::EventSchemas::RegistryPolicy",
"AWS::EventSchemas::Discoverer",
"AWS::EventSchemas::Schema",
"AWS::Forecast::Dataset",
"AWS::FraudDetector::Label",
"AWS::FraudDetector::EntityType",
"AWS::FraudDetector::Variable",
"AWS::FraudDetector::Outcome",
"AWS::GuardDuty::Detector",
"AWS::GuardDuty::ThreatIntelSet",
"AWS::GuardDuty::IPSet",
"AWS::GuardDuty::Filter",
"AWS::HealthLake::FHIRDatastore",
"AWS::Cassandra::Keyspace",
"AWS::IVS::Channel",
"AWS::IVS::RecordingConfiguration",
"AWS::IVS::PlaybackKeyPair",
"AWS::Elasticsearch::Domain",
"AWS::OpenSearch::Domain",
"AWS::Elasticsearch::Domain",
"AWS::Pinpoint::ApplicationSettings",
"AWS::Pinpoint::Segment",
"AWS::Pinpoint::App",
"AWS::Pinpoint::Campaign",
"AWS::Pinpoint::InAppTemplate",
"AWS::QLDB::Ledger",
"AWS::Kinesis::Stream",
"AWS::Kinesis::StreamConsumer",
"AWS::KinesisAnalyticsV2::Application",
"AWS::KinesisFirehose::DeliveryStream",
"AWS::KinesisVideo::SignalingChannel",
"AWS::Lex::BotAlias",
"AWS::Lex::Bot",
"AWS::Lightsail::Disk",
"AWS::Lightsail::Certificate",
"AWS::Lightsail::Bucket",
"AWS::Lightsail::StaticIp",
"AWS::LookoutMetrics::Alert",
"AWS::LookoutVision::Project",
"AWS::AmazonMQ::Broker",
"AWS::MSK::Cluster",
"AWS::Redshift::Cluster",
"AWS::Redshift::ClusterParameterGroup",
"AWS::Redshift::ClusterSecurityGroup",
"AWS::Redshift::ScheduledAction",
"AWS::Redshift::ClusterSnapshot",
"AWS::Redshift::ClusterSubnetGroup",
"AWS::Redshift::EventSubscription",
"AWS::RDS::DBInstance",
"AWS::RDS::DBSecurityGroup",
"AWS::RDS::DBSnapshot",
"AWS::RDS::DBSubnetGroup",
"AWS::RDS::EventSubscription",
"AWS::RDS::DBCluster",
"AWS::RDS::DBClusterSnapshot",
"AWS::RDS::GlobalCluster",
"AWS::Route53::HostedZone",
"AWS::Route53::HealthCheck",
"AWS::Route53Resolver::ResolverEndpoint",
"AWS::Route53Resolver::ResolverRule",
"AWS::Route53Resolver::ResolverRuleAssociation",
"AWS::Route53Resolver::FirewallDomainList",
"AWS::AWS::Route53Resolver::FirewallRuleGroupAssociation",
"AWS::Route53RecoveryReadiness::Cell",
"AWS::Route53RecoveryReadiness::ReadinessCheck",
"AWS::Route53RecoveryReadiness::RecoveryGroup",
"AWS::Route53RecoveryControl::Cluster",
"AWS::Route53RecoveryControl::ControlPanel",
"AWS::Route53RecoveryControl::RoutingControl",
"AWS::Route53RecoveryControl::SafetyRule",
"AWS::Route53RecoveryReadiness::ResourceSet",
"AWS::SageMaker::CodeRepository",
"AWS::SageMaker::Domain",
"AWS::SageMaker::AppImageConfig",
"AWS::SageMaker::Image",
"AWS::SageMaker::Model",
"AWS::SageMaker::NotebookInstance",
"AWS::SageMaker::NotebookInstanceLifecycleConfig",
"AWS::SageMaker::EndpointConfig",
"AWS::SageMaker::Workteam",
"AWS::SES::ConfigurationSet",
"AWS::SES::ContactList",
"AWS::SES::Template",
"AWS::SES::ReceiptFilter",
"AWS::SES::ReceiptRuleSet",
"AWS::SNS::Topic",
"AWS::SQS::Queue",
"AWS::S3::Bucket",
"AWS::S3::AccountPublicAccessBlock",
"AWS::S3::MultiRegionAccessPoint",
"AWS::S3::StorageLens",
"AWS::EC2::CustomerGateway",
"AWS::EC2::InternetGateway",
"AWS::EC2::NetworkAcl",
"AWS::EC2::RouteTable",
"AWS::EC2::Subnet",
"AWS::EC2::VPC",
"AWS::EC2::VPNConnection",
"AWS::EC2::VPNGateway",
"AWS::NetworkManager::TransitGatewayRegistration",
"AWS::NetworkManager::Site",
"AWS::NetworkManager::Device",
"AWS::NetworkManager::Link",
"AWS::NetworkManager::GlobalNetwork",
"AWS::WorkSpaces::ConnectionAlias",
"AWS::WorkSpaces::Workspace",
"AWS::Amplify::App",
"AWS::AppConfig::Application",
"AWS::AppConfig::Environment",
"AWS::AppConfig::ConfigurationProfile",
"AWS::AppConfig::DeploymentStrategy",
"AWS::AppRunner::VpcConnector",
"AWS::AppMesh::VirtualNode",
"AWS::AppMesh::VirtualService",
"AWS::AppSync::GraphQLApi",
"AWS::AuditManager::Assessment",
"AWS::AutoScaling::AutoScalingGroup",
"AWS::AutoScaling::LaunchConfiguration",
"AWS::AutoScaling::ScalingPolicy",
"AWS::AutoScaling::ScheduledAction",
"AWS::AutoScaling::WarmPool",
"AWS::Backup::BackupPlan",
"AWS::Backup::BackupSelection",
"AWS::Backup::BackupVault",
"AWS::Backup::RecoveryPoint",
"AWS::Backup::ReportPlan",
"AWS::Backup::BackupPlan",
"AWS::Backup::BackupSelection",
"AWS::Backup::BackupVault",
"AWS::Backup::RecoveryPoint",
"AWS::Batch::JobQueue",
"AWS::Batch::ComputeEnvironment",
"AWS::Budgets::BudgetsAction",
"AWS::ACM::Certificate",
"AWS::CloudFormation::Stack",
"AWS::CloudTrail::Trail",
"AWS::Cloud9::EnvironmentEC2",
"AWS::ServiceDiscovery::Service",
"AWS::ServiceDiscovery::PublicDnsNamespace",
"AWS::ServiceDiscovery::HttpNamespace",
"AWS::CodeArtifact::Repository",
"AWS::CodeBuild::Project",
"AWS::CodeDeploy::Application",
"AWS::CodeDeploy::DeploymentConfig",
"AWS::CodeDeploy::DeploymentGroup",
"AWS::CodePipeline::Pipeline",
"AWS::Config::ResourceCompliance",
"AWS::Config::ConformancePackCompliance",
"AWS::Config::ConfigurationRecorder",
"AWS::Config::ResourceCompliance",
"AWS::Config::ConfigurationRecorder",
"AWS::Config::ConformancePackCompliance",
"AWS::Config::ConfigurationRecorder",
"AWS::DMS::EventSubscription",
"AWS::DMS::ReplicationSubnetGroup",
"AWS::DMS::ReplicationInstance",
"AWS::DMS::ReplicationTask",
"AWS::DMS::Certificate",
"AWS::DataSync::LocationSMB",
"AWS::DataSync::LocationFSxLustre",
"AWS::DataSync::LocationFSxWindows",
"AWS::DataSync::LocationS3",
"AWS::DataSync::LocationEFS",
"AWS::DataSync::LocationNFS",
"AWS::DataSync::LocationHDFS", | "AWS::DataSync::LocationObjectStorage",
"AWS::DataSync::Task", | random_line_split | |
get.go |
// GetLastExecution ... gets the time of the most recent execution of all config services
func (c *CfgSvc) GetLastExecution() (time.Time, error) {
t := time.Time{}
stats, err := c.GetStatus()
if err != nil {
return t, err
}
if len(stats) == 0 {
return t, errors.New("empty config rule evaluation status array")
}
for _, s := range stats {
if t.Before(aws.TimeValue(s.LastSuccessfulEvaluationTime)) {
t = aws.TimeValue(s.LastSuccessfulEvaluationTime)
}
}
return t, nil
}
// GetItems ... gets AWS Config Service Configuration Items from resource history pages
func (c *CfgSvc) GetItems(lastExecution time.Time) (items []*configservice.ConfigurationItem, err error) {
res, err := c.GetDiscoveredResources()
if err != nil {
log.Fatalf("Error getting discovered resources: %v\n", err)
return nil, err
}
for _, r := range res {
var results []*configservice.ConfigurationItem
input := &configservice.GetResourceConfigHistoryInput{
ResourceType: r.ResourceType,
ResourceId: r.ResourceId,
EarlierTime: aws.Time(lastExecution.Add(time.Minute * time.Duration(-window))),
LaterTime: aws.Time(lastExecution.Add(time.Minute * time.Duration(window))),
}
err := c.Client.GetResourceConfigHistoryPages(input,
func(page *configservice.GetResourceConfigHistoryOutput, lastPage bool) bool {
results = append(results, page.ConfigurationItems...)
return !lastPage
})
if err != nil {
log.Fatalf("error getting resource config history (Input: %v):\n%v\n", input, err)
return nil, err
}
items = append(items, results...)
}
sortItemSlices(items)
return items, nil
}
// GetStatus ... performs DescribeConfigRuleEvaluationStatus for all config rules
func (c *CfgSvc) GetStatus() ([]*configservice.ConfigRuleEvaluationStatus, error) {
params := configservice.DescribeConfigRuleEvaluationStatusInput{}
result, err := c.Client.DescribeConfigRuleEvaluationStatus(¶ms)
if err != nil {
return nil, err
}
status := result.ConfigRulesEvaluationStatus
for aws.StringValue(result.NextToken) != "" {
params.NextToken = result.NextToken
result, err = c.Client.DescribeConfigRuleEvaluationStatus(¶ms)
if err != nil {
return nil, err
}
status = append(status, result.ConfigRulesEvaluationStatus...)
}
return status, nil
}
// GetDiscoveredResources ... loops through all specified resourceTypes
// Lists all resources by Type (Will need to loop over all cfg.ResourceType* types)
// https://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources
func (c *CfgSvc) GetDiscoveredResources() ([]*configservice.ResourceIdentifier, error) {
// List of resource types pulled from
// github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json
var resourceTypes = [...]string{
"AWS::AppStream::DirectoryConfig",
"AWS::AppStream::Application",
"AWS::AppFlow::Flow",
"AWS::ApiGateway::Stage",
"AWS::ApiGateway::RestApi",
"AWS::ApiGatewayV2::Stage",
"AWS::ApiGatewayV2::Api",
"AWS::Athena::WorkGroup",
"AWS::Athena::DataCatalog",
"AWS::CloudFront::Distribution",
"AWS::CloudFront::StreamingDistribution",
"AWS::CloudWatch::Alarm",
"AWS::CloudWatch::MetricStream",
"AWS::RUM::AppMonitor",
"AWS::Evidently::Project",
"AWS::CodeGuruReviewer::RepositoryAssociation",
"AWS::Connect::PhoneNumber",
"AWS::CustomerProfiles::Domain",
"AWS::Detective::Graph",
"AWS::DynamoDB::Table",
"AWS::EC2::Host",
"AWS::EC2::EIP",
"AWS::EC2::Instance",
"AWS::EC2::NetworkInterface",
"AWS::EC2::SecurityGroup",
"AWS::EC2::NatGateway",
"AWS::EC2::EgressOnlyInternetGateway",
"AWS::EC2::EC2Fleet",
"AWS::EC2::SpotFleet",
"AWS::EC2::PrefixList",
"AWS::EC2::FlowLog",
"AWS::EC2::TransitGateway",
"AWS::EC2::TransitGatewayAttachment",
"AWS::EC2::TransitGatewayRouteTable",
"AWS::EC2::VPCEndpoint",
"AWS::EC2::VPCEndpointService",
"AWS::EC2::VPCPeeringConnection",
"AWS::EC2::RegisteredHAInstance",
"AWS::EC2::SubnetRouteTableAssociation",
"AWS::EC2::LaunchTemplate",
"AWS::EC2::NetworkInsightsAccessScopeAnalysis",
"AWS::EC2::TrafficMirrorTarget",
"AWS::EC2::TrafficMirrorSession",
"AWS::EC2::DHCPOptions",
"AWS::EC2::IPAM",
"AWS::EC2::NetworkInsightsPath",
"AWS::EC2::TrafficMirrorFilter",
"AWS::EC2::Volume",
"AWS::ImageBuilder::ImagePipeline",
"AWS::ImageBuilder::DistributionConfiguration",
"AWS::ImageBuilder::InfrastructureConfiguration",
"AWS::ECR::Repository",
"AWS::ECR::RegistryPolicy",
"AWS::ECR::PullThroughCacheRule",
"AWS::ECR::PublicRepository",
"AWS::ECS::Cluster",
"AWS::ECS::TaskDefinition",
"AWS::ECS::Service",
"AWS::ECS::TaskSet",
"AWS::EFS::FileSystem",
"AWS::EFS::AccessPoint",
"AWS::EKS::Cluster",
"AWS::EKS::FargateProfile",
"AWS::EKS::IdentityProviderConfig",
"AWS::EKS::Addon",
"AWS::EMR::SecurityConfiguration",
"AWS::Events::EventBus",
"AWS::Events::ApiDestination",
"AWS::Events::Archive",
"AWS::Events::Endpoint",
"AWS::Events::Connection",
"AWS::Events::Rule",
"AWS::EC2::TrafficMirrorSession",
"AWS::EventSchemas::RegistryPolicy",
"AWS::EventSchemas::Discoverer",
"AWS::EventSchemas::Schema",
"AWS::Forecast::Dataset",
"AWS::FraudDetector::Label",
"AWS::FraudDetector::EntityType",
"AWS::FraudDetector::Variable",
"AWS::FraudDetector::Outcome",
"AWS::GuardDuty::Detector",
"AWS::GuardDuty::ThreatIntelSet",
"AWS::GuardDuty::IPSet",
"AWS::GuardDuty::Filter",
"AWS::HealthLake::FHIRDatastore",
"AWS::Cassandra::Keyspace",
"AWS::IVS::Channel",
"AWS::IVS::RecordingConfiguration",
"AWS::IVS::PlaybackKeyPair",
"AWS::Elasticsearch::Domain",
"AWS::OpenSearch::Domain",
"AWS::Elasticsearch::Domain",
"AWS::Pinpoint::ApplicationSettings",
"AWS::Pinpoint::Segment",
"AWS::Pinpoint::App",
"AWS::Pinpoint::Campaign",
"AWS::Pinpoint::InAppTemplate",
"AWS::QLDB::Ledger",
"AWS::Kinesis::Stream",
"AWS::Kinesis::StreamConsumer",
"AWS::KinesisAnalyticsV2::Application",
"AWS::KinesisFirehose::DeliveryStream",
"AWS::KinesisVideo::SignalingChannel",
"AWS::Lex::BotAlias",
"AWS::Lex::Bot",
"AWS::Lightsail::Disk",
"AWS::Lightsail::Certificate",
"AWS::Lightsail::Bucket",
"AWS::Lightsail::StaticIp",
"AWS::LookoutMetrics::Alert",
"AWS::LookoutVision::Project",
"AWS::AmazonMQ::Broker",
"A | {
for _, v := range i {
e := v.RelatedEvents
r := v.Relationships
sort.SliceStable(e, func(i, j int) bool {
return sliceSorter(e[i], e[j])
})
sort.SliceStable(r, func(i, j int) bool {
return sliceSorter(r[i], r[j])
})
v.RelatedEvents = e
v.Relationships = r
}
} | identifier_body | |
get.go | essment",
"AWS::AutoScaling::AutoScalingGroup",
"AWS::AutoScaling::LaunchConfiguration",
"AWS::AutoScaling::ScalingPolicy",
"AWS::AutoScaling::ScheduledAction",
"AWS::AutoScaling::WarmPool",
"AWS::Backup::BackupPlan",
"AWS::Backup::BackupSelection",
"AWS::Backup::BackupVault",
"AWS::Backup::RecoveryPoint",
"AWS::Backup::ReportPlan",
"AWS::Backup::BackupPlan",
"AWS::Backup::BackupSelection",
"AWS::Backup::BackupVault",
"AWS::Backup::RecoveryPoint",
"AWS::Batch::JobQueue",
"AWS::Batch::ComputeEnvironment",
"AWS::Budgets::BudgetsAction",
"AWS::ACM::Certificate",
"AWS::CloudFormation::Stack",
"AWS::CloudTrail::Trail",
"AWS::Cloud9::EnvironmentEC2",
"AWS::ServiceDiscovery::Service",
"AWS::ServiceDiscovery::PublicDnsNamespace",
"AWS::ServiceDiscovery::HttpNamespace",
"AWS::CodeArtifact::Repository",
"AWS::CodeBuild::Project",
"AWS::CodeDeploy::Application",
"AWS::CodeDeploy::DeploymentConfig",
"AWS::CodeDeploy::DeploymentGroup",
"AWS::CodePipeline::Pipeline",
"AWS::Config::ResourceCompliance",
"AWS::Config::ConformancePackCompliance",
"AWS::Config::ConfigurationRecorder",
"AWS::Config::ResourceCompliance",
"AWS::Config::ConfigurationRecorder",
"AWS::Config::ConformancePackCompliance",
"AWS::Config::ConfigurationRecorder",
"AWS::DMS::EventSubscription",
"AWS::DMS::ReplicationSubnetGroup",
"AWS::DMS::ReplicationInstance",
"AWS::DMS::ReplicationTask",
"AWS::DMS::Certificate",
"AWS::DataSync::LocationSMB",
"AWS::DataSync::LocationFSxLustre",
"AWS::DataSync::LocationFSxWindows",
"AWS::DataSync::LocationS3",
"AWS::DataSync::LocationEFS",
"AWS::DataSync::LocationNFS",
"AWS::DataSync::LocationHDFS",
"AWS::DataSync::LocationObjectStorage",
"AWS::DataSync::Task",
"AWS::DeviceFarm::TestGridProject",
"AWS::DeviceFarm::InstanceProfile",
"AWS::DeviceFarm::Project",
"AWS::ElasticBeanstalk::Application",
"AWS::ElasticBeanstalk::ApplicationVersion",
"AWS::ElasticBeanstalk::Environment",
"AWS::FIS::ExperimentTemplate",
"AWS::GlobalAccelerator::Listener",
"AWS::GlobalAccelerator::EndpointGroup",
"AWS::GlobalAccelerator::Accelerator",
"AWS::Glue::Job",
"AWS::Glue::Classifier",
"AWS::Glue::MLTransform",
"AWS::GroundStation::Config",
"AWS::IAM::User",
"AWS::IAM::SAMLProvider",
"AWS::IAM::ServerCertificate",
"AWS::IAM::Group",
"AWS::IAM::Role",
"AWS::IAM::Policy",
"AWS::AccessAnalyzer::Analyzer",
"AWS::IoT::Authorizer",
"AWS::IoT::SecurityProfile",
"AWS::IoT::RoleAlias",
"AWS::IoT::Dimension",
"AWS::IoT::Policy",
"AWS::IoT::MitigationAction",
"AWS::IoT::ScheduledAudit",
"AWS::IoT::AccountAuditConfiguration",
"AWS::IoTSiteWise::Gateway",
"AWS::IoT::CustomMetric",
"AWS::IoTWireless::ServiceProfile",
"AWS::IoT::FleetMetric",
"AWS::IoTAnalytics::Datastore",
"AWS::IoTAnalytics::Dataset",
"AWS::IoTAnalytics::Pipeline",
"AWS::IoTAnalytics::Channel",
"AWS::IoTEvents::Input",
"AWS::IoTEvents::DetectorModel",
"AWS::IoTEvents::AlarmModel",
"AWS::IoTTwinMaker::Workspace",
"AWS::IoTTwinMaker::Entity",
"AWS::IoTTwinMaker::Scene",
"AWS::IoTSiteWise::Dashboard",
"AWS::IoTSiteWise::Project",
"AWS::IoTSiteWise::Portal",
"AWS::IoTSiteWise::AssetModel",
"AWS::KMS::Key",
"AWS::KMS::Alias",
"AWS::Lambda::Function",
"AWS::Lambda::Alias",
"AWS::NetworkFirewall::Firewall",
"AWS::NetworkFirewall::FirewallPolicy",
"AWS::NetworkFirewall::RuleGroup",
"AWS::NetworkFirewall::TLSInspectionConfiguration",
"AWS:Panorama::Package",
"AWS::ResilienceHub::ResiliencyPolicy",
"AWS::RoboMaker::RobotApplicationVersion",
"AWS::RoboMaker::RobotApplication",
"AWS::RoboMaker::SimulationApplication",
"AWS::Signer::SigningProfile",
"AWS::SecretsManager::Secret",
"AWS::ServiceCatalog::CloudFormationProduct",
"AWS::ServiceCatalog::CloudFormationProvisionedProduct",
"AWS::ServiceCatalog::Portfolio",
"AWS::Shield::Protection",
"AWS::ShieldRegional::Protection",
"AWS::StepFunctions::Activity",
"AWS::StepFunctions::StateMachine",
"AWS::SSM::ManagedInstanceInventory",
"AWS::SSM::PatchCompliance",
"AWS::SSM::AssociationCompliance",
"AWS::SSM::FileData",
"AWS::Transfer::Agreement",
"AWS::Transfer::Connector",
"AWS::Transfer::Workflow",
"AWS::WAF::RateBasedRule",
"AWS::WAF::Rule",
"AWS::WAF::WebACL",
"AWS::WAF::RuleGroup",
"AWS::WAFRegional::RateBasedRule",
"AWS::WAFRegional::Rule",
"AWS::WAFRegional::WebACL",
"AWS::WAFRegional::RuleGroup",
"AWS::WAFv2::WebACL",
"AWS::WAFv2::RuleGroup",
"AWS::WAFv2::ManagedRuleSet",
"AWS::WAFv2::IPSet",
"AWS::WAFv2::RegexPatternSet",
"AWS::XRay::EncryptionConfig",
"AWS::ElasticLoadBalancingV2::LoadBalancer",
"AWS::ElasticLoadBalancingV2::Listener",
"AWS::ElasticLoadBalancing::LoadBalancer",
"AWS::ElasticLoadBalancingV2::LoadBalancer",
"AWS::MediaPackage::PackagingGroup",
"AWS::MediaPackage::PackagingConfiguration",
}
// nolint: prealloc
var res []*configservice.ResourceIdentifier
for _, t := range &resourceTypes {
t := t
input := &configservice.ListDiscoveredResourcesInput{
ResourceType: aws.String(t),
}
result, err := c.Client.ListDiscoveredResources(input)
if err != nil {
log.Fatalf("Error ListDiscoveredResources (ResourceType: %s): %v\n", t, err)
return nil, err
}
res = append(res, result.ResourceIdentifiers...)
for aws.StringValue(result.NextToken) != "" {
input.NextToken = result.NextToken
result, err = c.Client.ListDiscoveredResources(input)
if err != nil {
log.Fatalf("Error ListDiscoveredResources (Input: %v): %v\n", input, err)
return nil, err
}
res = append(res, result.ResourceIdentifiers...)
}
}
return res, nil
}
// getSnapshotOfItem ... finds ConfigurationItem in Snaphot with matching ResourceId and ResourceType
func getSnapshotOfItem(item map[string]interface{}, snapshots []map[string]interface{}) map[string]interface{} {
id := item["ResourceId"]
resType := item["ResourceType"]
for _, s := range snapshots {
m := s
if id == m["ResourceId"].(string) && resType == m["ResourceType"].(string) {
return m
}
}
return nil
}
// getPreviousSnapshot ... gets the name of the config snapshot bucket object
// created prior to the lastExecution time
// Assumes snapshots are taken every three hours - gets snapshot older than
// lastExecution time but less than three hours before lastExecution time
func | getPreviousSnapshot | identifier_name | |
Server.py | :
# process the login of the client
self.process_login(msg)
else:
# process command
if self.process_command(msg):
# if the client exited, break the loop
break
except socket.error:
pass
# close the connection
self.connection.close()
# processes login command
def process_login(self, msg):
# if it is a login message ('login|username|password')
if msg.startswith(Constants.MSG_LOGIN):
# extract username and password
cmd, user, password = msg.split('|')
print 'login:', user, password
# verify the username and password
if self.server.verify_user(user, password):
if not self.server.is_online(user):
# if the user is not online
# record his login time
self.server.logins[user] = time.time()
self.user = user
self.login = True
# send back a success message
self.connection.send(Constants.MSG_SUCCESS)
# send offline messages to him
self.server.send_offline_messages(user, self.connection)
# tell other clients
self.server.broadcast('server', user + ' login', user)
else:
# if the user is already online, send back the message
self.connection.send(Constants.MSG_USER_ALREADY_LOGINED)
else:
# increment the failed times
self.failed_login_attempts += 1
# if it exceeds the maximum retry times,
if self.failed_login_attempts >= Constants.MAX_LOGIN_ATTEMPTS:
# tell the client
self.connection.send(Constants.MSG_LOGIN_EXCEED_MAX_TIMES)
# block the ip
self.server.block_client(self.address)
# disconnect the client
self.server.disconnect(self.address)
return True
else:
# send back a failed message
self.connection.send(Constants.MSG_FAILED)
else:
# send back a failed message
self.connection.send(Constants.MSG_FAILED)
return False
# processes the command
def process_command(self, msg):
exited = False
if msg == Constants.MSG_EXIT:
# client exits
exited = True
elif msg == Constants.MSG_WHO_ELSE:
# send back who else
self.connection.send('[who else] ' + ', '.join(self.server.who_else(self.address)))
elif msg == Constants.MSG_WHO_LAST_HOUR:
# send back who logined in the last hour
self.connection.send('[who last hour] ' + ', '.join(self.server.who_last_hour()))
elif msg.startswith(Constants.MSG_BROADCAST):
# extract the message
cmd, msg = msg.split('|', 1)
# broadcast the message
self.server.broadcast(self.user, msg)
elif msg.startswith(Constants.MSG_MESSAGE):
# extract the target user and message
cmd, user, msg = msg.split('|', 2)
# send message to the target user
if not self.server.message(self.user, user, msg):
if user in self.server.passwords:
self.connection.send(user + ' is offline now, and will see the message when login.')
else:
self.connection.send(user + ' doesn\'t exist.')
elif msg == Constants.MSG_LOGOUT:
# if the user want to logout, tell the other clients
self.server.broadcast('server', self.user + ' logout')
# disconnect
self.server.disconnect(self.address)
exited = True
return exited
# Server class
class Server:
# constructor
def __init__(self, port): | self.port = port
# {client address -> client threads}
self.clients = {}
# {username -> password}
self.passwords = {}
# {username -> last login time}
self.logins = {}
# {ip -> blocked time}
self.blocked_ips = {}
# {username -> [messages]}
self.offline_messages = {}
# starts the server
def start(self):
# load the password file, exit if failed.
if not self.load_passwords():
return
# start a thread to check the timeout for inactive clients.
t = threading.Thread(target=self.check_inactive_user)
t.setDaemon(True)
t.start()
# create a server socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# force to reuse the address
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind the address
s.bind(('127.0.0.1', self.port))
# listen to at most 10 clients
s.listen(10)
try:
# server loop
while True:
# wait for the connection of client
connection, address = s.accept()
# if a client connects, check its blocked time
block_t = self.remaining_block_time(address)
if block_t == 0:
# if no blocked time left, send an accept message to it
connection.send(Constants.MEG_ACCEPTED)
# start a thread for it
self.clients[address] = ServerThread(self, address, connection)
self.clients[address].start()
else:
# if the client is blocked, send back its remaining blocked seconds.
connection.send('|'.join([Constants.MSG_BLOCKED, str(block_t)]))
# wait for acknowledge
connection.recv(Constants.MAX_MSG_LENGTH)
# close the connection
connection.close()
except KeyboardInterrupt:
# press ctrl-c to stop the server.
self.stop_server()
# stop the server
def stop_server(self):
print 'Stop server...'
# disconnect all the clients
for address in self.clients.keys():
self.disconnect(address)
# disconnect a client
def disconnect(self, address):
# if the address is present,
if address in self.clients:
# get the client thread
t = self.clients[address]
if t.user != '':
print 'logout:', t.user
try:
# send an exit message
t.connection.send(Constants.MSG_EXIT)
# close the connection
t.connection.close()
except socket.error:
pass
# remove its thread
del self.clients[address]
# returns the remaining blocked time of the client address
def remaining_block_time(self, address):
# get the ip from the address
ip = address[0]
# if it is not in the blocked dict, return 0
if ip not in self.blocked_ips:
return 0
current_time = time.time()
block_time = self.blocked_ips[ip]
if current_time - block_time > Constants.BLOCK_TIME:
# if the difference exceeds the block time, return 0
return 0
else:
# otherwise return the remaining blocked time
return Constants.BLOCK_TIME - (current_time - block_time)
# blocks the ip of the client
def block_client(self, address):
# add the ip and blocked time to the blocked dict
self.blocked_ips[address[0]] = time.time()
# loads usernames and passwords from the password file
# return True if success or False otherwise.
def load_passwords(self):
print 'load users'
try:
# open the file
f = open(Constants.PASSWORD_FILE)
# for each line in the file
for line in f:
# remove leading and trailing spaces
line = line.strip()
# if the line contains exactly one space
if line.count(' ') == 1:
# extract the username and password
user, pwd = line.split(' ')
# add them to the password dict
self.passwords[user] = pwd
# close the file
f.close()
return True
except IOError:
print '[Error] user_pass.txt is missing.'
return False
# returns True iff the username and password are correct.
def verify_user(self, user, password):
return user in self.passwords and self.passwords[user] == password
# returns a list of online users excluding the current user
def who_else(self, current_address):
# create an empty list
users = []
# for each address of online clients
for address in self.clients:
# if it is not the address of the current client
if address != current_address:
# add its username to the list
users.append(self.clients[address].user)
return users
# returns a list of users who logined in the last hour
def who_last_hour(self):
# get the current time
current_time = time.time()
# for each user logined, if its last login time is in the last hour,
# add it to the list.
return [user for user in self.logins
if current_time - self.logins[user] <=
Constants.SEC_PER_MIN * Constants.LAST_HOUR]
# sends a message the a specified user.
# returns True iff the user is online.
def message(self, from_user, to_user, msg):
found = False
# add a message header
msg = '[' + from_user + ']: ' + msg
# for each online client
for address in self.clients:
t = self.clients[address]
# if the target user is found, send the message to him.
if t.user == to_user:
t.connection.send(msg)
found = True
if not found:
# if the user is not present, add the message to the offline messages
if to_user not in self.offline_messages | # server port | random_line_split |
Server.py | # process the login of the client
self.process_login(msg)
else:
# process command
if self.process_command(msg):
# if the client exited, break the loop
break
except socket.error:
pass
# close the connection
self.connection.close()
# processes login command
def process_login(self, msg):
# if it is a login message ('login|username|password')
if msg.startswith(Constants.MSG_LOGIN):
# extract username and password
cmd, user, password = msg.split('|')
print 'login:', user, password
# verify the username and password
if self.server.verify_user(user, password):
if not self.server.is_online(user):
# if the user is not online
# record his login time
self.server.logins[user] = time.time()
self.user = user
self.login = True
# send back a success message
self.connection.send(Constants.MSG_SUCCESS)
# send offline messages to him
self.server.send_offline_messages(user, self.connection)
# tell other clients
self.server.broadcast('server', user + ' login', user)
else:
# if the user is already online, send back the message
self.connection.send(Constants.MSG_USER_ALREADY_LOGINED)
else:
# increment the failed times
self.failed_login_attempts += 1
# if it exceeds the maximum retry times,
if self.failed_login_attempts >= Constants.MAX_LOGIN_ATTEMPTS:
# tell the client
self.connection.send(Constants.MSG_LOGIN_EXCEED_MAX_TIMES)
# block the ip
self.server.block_client(self.address)
# disconnect the client
self.server.disconnect(self.address)
return True
else:
# send back a failed message
self.connection.send(Constants.MSG_FAILED)
else:
# send back a failed message
self.connection.send(Constants.MSG_FAILED)
return False
# processes the command
def process_command(self, msg):
exited = False
if msg == Constants.MSG_EXIT:
# client exits
exited = True
elif msg == Constants.MSG_WHO_ELSE:
# send back who else
self.connection.send('[who else] ' + ', '.join(self.server.who_else(self.address)))
elif msg == Constants.MSG_WHO_LAST_HOUR:
# send back who logined in the last hour
self.connection.send('[who last hour] ' + ', '.join(self.server.who_last_hour()))
elif msg.startswith(Constants.MSG_BROADCAST):
# extract the message
cmd, msg = msg.split('|', 1)
# broadcast the message
self.server.broadcast(self.user, msg)
elif msg.startswith(Constants.MSG_MESSAGE):
# extract the target user and message
cmd, user, msg = msg.split('|', 2)
# send message to the target user
if not self.server.message(self.user, user, msg):
if user in self.server.passwords:
self.connection.send(user + ' is offline now, and will see the message when login.')
else:
self.connection.send(user + ' doesn\'t exist.')
elif msg == Constants.MSG_LOGOUT:
# if the user want to logout, tell the other clients
self.server.broadcast('server', self.user + ' logout')
# disconnect
self.server.disconnect(self.address)
exited = True
return exited
# Server class
class Server:
# constructor
def __init__(self, port):
# server port
self.port = port
# {client address -> client threads}
self.clients = {}
# {username -> password}
self.passwords = {}
# {username -> last login time}
self.logins = {}
# {ip -> blocked time}
self.blocked_ips = {}
# {username -> [messages]}
self.offline_messages = {}
# starts the server
def start(self):
# load the password file, exit if failed.
if not self.load_passwords():
return
# start a thread to check the timeout for inactive clients.
t = threading.Thread(target=self.check_inactive_user)
t.setDaemon(True)
t.start()
# create a server socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# force to reuse the address
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind the address
s.bind(('127.0.0.1', self.port))
# listen to at most 10 clients
s.listen(10)
try:
# server loop
while True:
# wait for the connection of client
connection, address = s.accept()
# if a client connects, check its blocked time
block_t = self.remaining_block_time(address)
if block_t == 0:
# if no blocked time left, send an accept message to it
connection.send(Constants.MEG_ACCEPTED)
# start a thread for it
self.clients[address] = ServerThread(self, address, connection)
self.clients[address].start()
else:
# if the client is blocked, send back its remaining blocked seconds.
connection.send('|'.join([Constants.MSG_BLOCKED, str(block_t)]))
# wait for acknowledge
connection.recv(Constants.MAX_MSG_LENGTH)
# close the connection
connection.close()
except KeyboardInterrupt:
# press ctrl-c to stop the server.
self.stop_server()
# stop the server
def stop_server(self):
print 'Stop server...'
# disconnect all the clients
for address in self.clients.keys():
self.disconnect(address)
# disconnect a client
def disconnect(self, address):
# if the address is present,
if address in self.clients:
# get the client thread
t = self.clients[address]
if t.user != '':
print 'logout:', t.user
try:
# send an exit message
t.connection.send(Constants.MSG_EXIT)
# close the connection
t.connection.close()
except socket.error:
pass
# remove its thread
del self.clients[address]
# returns the remaining blocked time of the client address
def remaining_block_time(self, address):
# get the ip from the address
ip = address[0]
# if it is not in the blocked dict, return 0
if ip not in self.blocked_ips:
return 0
current_time = time.time()
block_time = self.blocked_ips[ip]
if current_time - block_time > Constants.BLOCK_TIME:
# if the difference exceeds the block time, return 0
return 0
else:
# otherwise return the remaining blocked time
return Constants.BLOCK_TIME - (current_time - block_time)
# blocks the ip of the client
def block_client(self, address):
# add the ip and blocked time to the blocked dict
|
# loads usernames and passwords from the password file
# return True if success or False otherwise.
def load_passwords(self):
print 'load users'
try:
# open the file
f = open(Constants.PASSWORD_FILE)
# for each line in the file
for line in f:
# remove leading and trailing spaces
line = line.strip()
# if the line contains exactly one space
if line.count(' ') == 1:
# extract the username and password
user, pwd = line.split(' ')
# add them to the password dict
self.passwords[user] = pwd
# close the file
f.close()
return True
except IOError:
print '[Error] user_pass.txt is missing.'
return False
# returns True iff the username and password are correct.
def verify_user(self, user, password):
return user in self.passwords and self.passwords[user] == password
# returns a list of online users excluding the current user
def who_else(self, current_address):
# create an empty list
users = []
# for each address of online clients
for address in self.clients:
# if it is not the address of the current client
if address != current_address:
# add its username to the list
users.append(self.clients[address].user)
return users
# returns a list of users who logined in the last hour
def who_last_hour(self):
# get the current time
current_time = time.time()
# for each user logined, if its last login time is in the last hour,
# add it to the list.
return [user for user in self.logins
if current_time - self.logins[user] <=
Constants.SEC_PER_MIN * Constants.LAST_HOUR]
# sends a message the a specified user.
# returns True iff the user is online.
def message(self, from_user, to_user, msg):
found = False
# add a message header
msg = '[' + from_user + ']: ' + msg
# for each online client
for address in self.clients:
t = self.clients[address]
# if the target user is found, send the message to him.
if t.user == to_user:
t.connection.send(msg)
found = True
if not found:
# if the user is not present, add the message to the offline messages
if to_user not in self.offline | self.blocked_ips[address[0]] = time.time() | identifier_body |
Server.py | # process the login of the client
self.process_login(msg)
else:
# process command
if self.process_command(msg):
# if the client exited, break the loop
break
except socket.error:
pass
# close the connection
self.connection.close()
# processes login command
def process_login(self, msg):
# if it is a login message ('login|username|password')
if msg.startswith(Constants.MSG_LOGIN):
# extract username and password
cmd, user, password = msg.split('|')
print 'login:', user, password
# verify the username and password
if self.server.verify_user(user, password):
if not self.server.is_online(user):
# if the user is not online
# record his login time
self.server.logins[user] = time.time()
self.user = user
self.login = True
# send back a success message
self.connection.send(Constants.MSG_SUCCESS)
# send offline messages to him
self.server.send_offline_messages(user, self.connection)
# tell other clients
self.server.broadcast('server', user + ' login', user)
else:
# if the user is already online, send back the message
|
else:
# increment the failed times
self.failed_login_attempts += 1
# if it exceeds the maximum retry times,
if self.failed_login_attempts >= Constants.MAX_LOGIN_ATTEMPTS:
# tell the client
self.connection.send(Constants.MSG_LOGIN_EXCEED_MAX_TIMES)
# block the ip
self.server.block_client(self.address)
# disconnect the client
self.server.disconnect(self.address)
return True
else:
# send back a failed message
self.connection.send(Constants.MSG_FAILED)
else:
# send back a failed message
self.connection.send(Constants.MSG_FAILED)
return False
# processes the command
def process_command(self, msg):
exited = False
if msg == Constants.MSG_EXIT:
# client exits
exited = True
elif msg == Constants.MSG_WHO_ELSE:
# send back who else
self.connection.send('[who else] ' + ', '.join(self.server.who_else(self.address)))
elif msg == Constants.MSG_WHO_LAST_HOUR:
# send back who logined in the last hour
self.connection.send('[who last hour] ' + ', '.join(self.server.who_last_hour()))
elif msg.startswith(Constants.MSG_BROADCAST):
# extract the message
cmd, msg = msg.split('|', 1)
# broadcast the message
self.server.broadcast(self.user, msg)
elif msg.startswith(Constants.MSG_MESSAGE):
# extract the target user and message
cmd, user, msg = msg.split('|', 2)
# send message to the target user
if not self.server.message(self.user, user, msg):
if user in self.server.passwords:
self.connection.send(user + ' is offline now, and will see the message when login.')
else:
self.connection.send(user + ' doesn\'t exist.')
elif msg == Constants.MSG_LOGOUT:
# if the user want to logout, tell the other clients
self.server.broadcast('server', self.user + ' logout')
# disconnect
self.server.disconnect(self.address)
exited = True
return exited
# Server class
class Server:
# constructor
def __init__(self, port):
# server port
self.port = port
# {client address -> client threads}
self.clients = {}
# {username -> password}
self.passwords = {}
# {username -> last login time}
self.logins = {}
# {ip -> blocked time}
self.blocked_ips = {}
# {username -> [messages]}
self.offline_messages = {}
# starts the server
def start(self):
# load the password file, exit if failed.
if not self.load_passwords():
return
# start a thread to check the timeout for inactive clients.
t = threading.Thread(target=self.check_inactive_user)
t.setDaemon(True)
t.start()
# create a server socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# force to reuse the address
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind the address
s.bind(('127.0.0.1', self.port))
# listen to at most 10 clients
s.listen(10)
try:
# server loop
while True:
# wait for the connection of client
connection, address = s.accept()
# if a client connects, check its blocked time
block_t = self.remaining_block_time(address)
if block_t == 0:
# if no blocked time left, send an accept message to it
connection.send(Constants.MEG_ACCEPTED)
# start a thread for it
self.clients[address] = ServerThread(self, address, connection)
self.clients[address].start()
else:
# if the client is blocked, send back its remaining blocked seconds.
connection.send('|'.join([Constants.MSG_BLOCKED, str(block_t)]))
# wait for acknowledge
connection.recv(Constants.MAX_MSG_LENGTH)
# close the connection
connection.close()
except KeyboardInterrupt:
# press ctrl-c to stop the server.
self.stop_server()
# stop the server
def stop_server(self):
print 'Stop server...'
# disconnect all the clients
for address in self.clients.keys():
self.disconnect(address)
# disconnect a client
def disconnect(self, address):
# if the address is present,
if address in self.clients:
# get the client thread
t = self.clients[address]
if t.user != '':
print 'logout:', t.user
try:
# send an exit message
t.connection.send(Constants.MSG_EXIT)
# close the connection
t.connection.close()
except socket.error:
pass
# remove its thread
del self.clients[address]
# returns the remaining blocked time of the client address
def remaining_block_time(self, address):
# get the ip from the address
ip = address[0]
# if it is not in the blocked dict, return 0
if ip not in self.blocked_ips:
return 0
current_time = time.time()
block_time = self.blocked_ips[ip]
if current_time - block_time > Constants.BLOCK_TIME:
# if the difference exceeds the block time, return 0
return 0
else:
# otherwise return the remaining blocked time
return Constants.BLOCK_TIME - (current_time - block_time)
# blocks the ip of the client
def block_client(self, address):
# add the ip and blocked time to the blocked dict
self.blocked_ips[address[0]] = time.time()
# loads usernames and passwords from the password file
# return True if success or False otherwise.
def load_passwords(self):
print 'load users'
try:
# open the file
f = open(Constants.PASSWORD_FILE)
# for each line in the file
for line in f:
# remove leading and trailing spaces
line = line.strip()
# if the line contains exactly one space
if line.count(' ') == 1:
# extract the username and password
user, pwd = line.split(' ')
# add them to the password dict
self.passwords[user] = pwd
# close the file
f.close()
return True
except IOError:
print '[Error] user_pass.txt is missing.'
return False
# returns True iff the username and password are correct.
def verify_user(self, user, password):
return user in self.passwords and self.passwords[user] == password
# returns a list of online users excluding the current user
def who_else(self, current_address):
# create an empty list
users = []
# for each address of online clients
for address in self.clients:
# if it is not the address of the current client
if address != current_address:
# add its username to the list
users.append(self.clients[address].user)
return users
# returns a list of users who logined in the last hour
def who_last_hour(self):
# get the current time
current_time = time.time()
# for each user logined, if its last login time is in the last hour,
# add it to the list.
return [user for user in self.logins
if current_time - self.logins[user] <=
Constants.SEC_PER_MIN * Constants.LAST_HOUR]
# sends a message the a specified user.
# returns True iff the user is online.
def message(self, from_user, to_user, msg):
found = False
# add a message header
msg = '[' + from_user + ']: ' + msg
# for each online client
for address in self.clients:
t = self.clients[address]
# if the target user is found, send the message to him.
if t.user == to_user:
t.connection.send(msg)
found = True
if not found:
# if the user is not present, add the message to the offline messages
if to_user not in self.offline | self.connection.send(Constants.MSG_USER_ALREADY_LOGINED) | conditional_block |
Server.py | # process the login of the client
self.process_login(msg)
else:
# process command
if self.process_command(msg):
# if the client exited, break the loop
break
except socket.error:
pass
# close the connection
self.connection.close()
# processes login command
def process_login(self, msg):
# if it is a login message ('login|username|password')
if msg.startswith(Constants.MSG_LOGIN):
# extract username and password
cmd, user, password = msg.split('|')
print 'login:', user, password
# verify the username and password
if self.server.verify_user(user, password):
if not self.server.is_online(user):
# if the user is not online
# record his login time
self.server.logins[user] = time.time()
self.user = user
self.login = True
# send back a success message
self.connection.send(Constants.MSG_SUCCESS)
# send offline messages to him
self.server.send_offline_messages(user, self.connection)
# tell other clients
self.server.broadcast('server', user + ' login', user)
else:
# if the user is already online, send back the message
self.connection.send(Constants.MSG_USER_ALREADY_LOGINED)
else:
# increment the failed times
self.failed_login_attempts += 1
# if it exceeds the maximum retry times,
if self.failed_login_attempts >= Constants.MAX_LOGIN_ATTEMPTS:
# tell the client
self.connection.send(Constants.MSG_LOGIN_EXCEED_MAX_TIMES)
# block the ip
self.server.block_client(self.address)
# disconnect the client
self.server.disconnect(self.address)
return True
else:
# send back a failed message
self.connection.send(Constants.MSG_FAILED)
else:
# send back a failed message
self.connection.send(Constants.MSG_FAILED)
return False
# processes the command
def process_command(self, msg):
exited = False
if msg == Constants.MSG_EXIT:
# client exits
exited = True
elif msg == Constants.MSG_WHO_ELSE:
# send back who else
self.connection.send('[who else] ' + ', '.join(self.server.who_else(self.address)))
elif msg == Constants.MSG_WHO_LAST_HOUR:
# send back who logined in the last hour
self.connection.send('[who last hour] ' + ', '.join(self.server.who_last_hour()))
elif msg.startswith(Constants.MSG_BROADCAST):
# extract the message
cmd, msg = msg.split('|', 1)
# broadcast the message
self.server.broadcast(self.user, msg)
elif msg.startswith(Constants.MSG_MESSAGE):
# extract the target user and message
cmd, user, msg = msg.split('|', 2)
# send message to the target user
if not self.server.message(self.user, user, msg):
if user in self.server.passwords:
self.connection.send(user + ' is offline now, and will see the message when login.')
else:
self.connection.send(user + ' doesn\'t exist.')
elif msg == Constants.MSG_LOGOUT:
# if the user want to logout, tell the other clients
self.server.broadcast('server', self.user + ' logout')
# disconnect
self.server.disconnect(self.address)
exited = True
return exited
# Server class
class Server:
# constructor
def __init__(self, port):
# server port
self.port = port
# {client address -> client threads}
self.clients = {}
# {username -> password}
self.passwords = {}
# {username -> last login time}
self.logins = {}
# {ip -> blocked time}
self.blocked_ips = {}
# {username -> [messages]}
self.offline_messages = {}
# starts the server
def start(self):
# load the password file, exit if failed.
if not self.load_passwords():
return
# start a thread to check the timeout for inactive clients.
t = threading.Thread(target=self.check_inactive_user)
t.setDaemon(True)
t.start()
# create a server socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# force to reuse the address
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind the address
s.bind(('127.0.0.1', self.port))
# listen to at most 10 clients
s.listen(10)
try:
# server loop
while True:
# wait for the connection of client
connection, address = s.accept()
# if a client connects, check its blocked time
block_t = self.remaining_block_time(address)
if block_t == 0:
# if no blocked time left, send an accept message to it
connection.send(Constants.MEG_ACCEPTED)
# start a thread for it
self.clients[address] = ServerThread(self, address, connection)
self.clients[address].start()
else:
# if the client is blocked, send back its remaining blocked seconds.
connection.send('|'.join([Constants.MSG_BLOCKED, str(block_t)]))
# wait for acknowledge
connection.recv(Constants.MAX_MSG_LENGTH)
# close the connection
connection.close()
except KeyboardInterrupt:
# press ctrl-c to stop the server.
self.stop_server()
# stop the server
def | (self):
print 'Stop server...'
# disconnect all the clients
for address in self.clients.keys():
self.disconnect(address)
# disconnect a client
def disconnect(self, address):
# if the address is present,
if address in self.clients:
# get the client thread
t = self.clients[address]
if t.user != '':
print 'logout:', t.user
try:
# send an exit message
t.connection.send(Constants.MSG_EXIT)
# close the connection
t.connection.close()
except socket.error:
pass
# remove its thread
del self.clients[address]
# returns the remaining blocked time of the client address
def remaining_block_time(self, address):
# get the ip from the address
ip = address[0]
# if it is not in the blocked dict, return 0
if ip not in self.blocked_ips:
return 0
current_time = time.time()
block_time = self.blocked_ips[ip]
if current_time - block_time > Constants.BLOCK_TIME:
# if the difference exceeds the block time, return 0
return 0
else:
# otherwise return the remaining blocked time
return Constants.BLOCK_TIME - (current_time - block_time)
# blocks the ip of the client
def block_client(self, address):
# add the ip and blocked time to the blocked dict
self.blocked_ips[address[0]] = time.time()
# loads usernames and passwords from the password file
# return True if success or False otherwise.
def load_passwords(self):
print 'load users'
try:
# open the file
f = open(Constants.PASSWORD_FILE)
# for each line in the file
for line in f:
# remove leading and trailing spaces
line = line.strip()
# if the line contains exactly one space
if line.count(' ') == 1:
# extract the username and password
user, pwd = line.split(' ')
# add them to the password dict
self.passwords[user] = pwd
# close the file
f.close()
return True
except IOError:
print '[Error] user_pass.txt is missing.'
return False
# returns True iff the username and password are correct.
def verify_user(self, user, password):
return user in self.passwords and self.passwords[user] == password
# returns a list of online users excluding the current user
def who_else(self, current_address):
# create an empty list
users = []
# for each address of online clients
for address in self.clients:
# if it is not the address of the current client
if address != current_address:
# add its username to the list
users.append(self.clients[address].user)
return users
# returns a list of users who logined in the last hour
def who_last_hour(self):
# get the current time
current_time = time.time()
# for each user logined, if its last login time is in the last hour,
# add it to the list.
return [user for user in self.logins
if current_time - self.logins[user] <=
Constants.SEC_PER_MIN * Constants.LAST_HOUR]
# sends a message the a specified user.
# returns True iff the user is online.
def message(self, from_user, to_user, msg):
found = False
# add a message header
msg = '[' + from_user + ']: ' + msg
# for each online client
for address in self.clients:
t = self.clients[address]
# if the target user is found, send the message to him.
if t.user == to_user:
t.connection.send(msg)
found = True
if not found:
# if the user is not present, add the message to the offline messages
if to_user not in self.offline | stop_server | identifier_name |
train_stage1.py | cluded.type(torch.cuda.FloatTensor), requires_grad=False).cuda()
img = Variable(img.type(torch.cuda.FloatTensor), requires_grad=False).cuda()
lmk = Variable(lmk.type(torch.cuda.FloatTensor), requires_grad=False).cuda()
flag = Variable(flag.type(torch.cuda.FloatTensor), requires_grad=False).cuda()
coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...])
rendered, landmark, reg_loss, gamma_loss = models['3D'].reconstruct(coef)
rendered = rendered.permute(0,3,1,2).contiguous()[:,[2,1,0],:,:]
# pose_loss = criterions['L1'](angles, ang)
align_loss = torch.sum(torch.sum(torch.square(landmark-lmk), dim=2)*flag*landmark_weight, dim=1) / 68.0
align_loss = torch.sum(align_loss) / lmk.shape[0]
# coefficients regularization 1.7e-3
coef_loss = torch.norm(coef[...,:80]) + 0.1*torch.norm(coef[...,80:144]) + 1.7e-3*torch.norm(coef[...,144:224])
# For skin
parsing_input = F.interpolate((img-mean)/std, (512,512))
parsed = models['Seg'](parsing_input)
parsed = F.interpolate(parsed, (224,224))
parsed = torch.argmax(parsed, dim=1, keepdim=True)
mask = torch.zeros_like(parsed, dtype=torch.float32).cuda()
# skin 1, nose 2, eye_glass 3, r_eye 4, l_eye 5, r_brow 6, l_brow 7, r_ear 8, l_ear 9,
# inner_mouth 10, u_lip 11, l_lip 12, hair 13
indices = ((parsed>=1).type(torch.BoolTensor) & (parsed<=7).type(torch.BoolTensor) & (parsed!=3).type(torch.BoolTensor)) \
| ((parsed>=11).type(torch.BoolTensor) & (parsed<=12).type(torch.BoolTensor))
mask[indices] = 1.0
# Get vector mask
rendered_noise = torch.mean(rendered, dim=1, keepdim=True) > 0.0
vector = torch.zeros_like(rendered_noise, dtype=img.dtype).cuda()
vector[rendered_noise] = 1.0
# Synthesize background
rendered = img*(1.-vector) + rendered*vector
# Perceptual loss
affined_r = F.interpolate(rendered[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True)
affined_i = F.interpolate(img[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True)
emb_r = models['face']((affined_r-mean_f)/std_f)
emb_i = models['face']((affined_i-mean_f)/std_f)
id_loss = torch.mean(1. - criterions['Cos'](emb_r, emb_i))
# Reconstruction loss
rec_loss = torch.sum(torch.abs(img - rendered), dim=1)*mask
rec_loss = torch.sum(rec_loss) / torch.sum(mask)
total_loss = coef_loss*1e-4 + rec_loss*0.01 + reg_loss*0.25 + align_loss*0.007 + gamma_loss*10.0 + id_loss*0.15
total_loss.backward()
optimizer.step()
# logging
if torch.distributed.get_rank() == 0:
scheduler.step()
total_iteration = len(train_loader) * epoch + i
logger.log_training(coef_loss.item(), rec_loss.item(), reg_loss.item(), align_loss.item(), id_loss.item(), total_iteration)
if total_iteration % 250 == 0:
rendered_grid = make_grid(rendered, nrow=args.batch_size//2, normalize=True)
lmk = lmk.type(torch.LongTensor)
landmark = landmark.type(torch.LongTensor)
color1 = torch.FloatTensor([1.0,0.0,0.0]).unsqueeze(-1).unsqueeze(-1)
color2 = torch.FloatTensor([0.0,0.0,1.0]).unsqueeze(-1).unsqueeze(-1)
for b in range(img.size(0)):
for l in range(68):
occluded[b, :, lmk[b,l,1]-2:lmk[b,l,1]+2, lmk[b,l,0]-2:lmk[b,l,0]+2] = color1
occluded[b, :, landmark[b,l,1]-2:landmark[b,l,1]+2, landmark[b,l,0]-2:landmark[b,l,0]+2] = color2
input_grid = make_grid(occluded, nrow=args.batch_size//2, normalize=False)
logger.log_train_image(input_grid, rendered_grid, total_iteration)
sys.stdout.write('\r[Epoch %d/%d][Iter %d/%d][Total_iter %d]' % (epoch, args.epochs, i, len(train_loader), total_iteration))
if i!=0 and total_iteration % args.val_iters == 0:
error = validate(models, val_loader, epoch, args)
logger.log_validation(error, epoch)
torch.save(models['3D'].regressor.module.state_dict(), args.save_path+"/reg_it%d_%.4f_stage1.pth" % (total_iteration, error))
def validate(models, val_loader, epoch, args):
with torch.no_grad():
align_error = 0.0
for i, (occluded, lmk) in enumerate(val_loader):
print('\rval %d...' % (i+1), end='')
occluded = Variable(occluded.type(torch.cuda.FloatTensor)).cuda()
lmk = Variable(lmk.type(torch.cuda.FloatTensor)).cuda()
coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...])
_, landmark = models['3D'].reconstruct(coef, test=True)
align_error += torch.mean(torch.abs(landmark - lmk))
align_error /= len(val_loader)
return align_error
def | (args):
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
print('rank', args.rank)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.cuda.set_device(args.gpu)
# Load models
estimator3d = Estimator3D(is_cuda=True, batch_size=args.batch_size, model_path=args.checkpoint, test=False, back_white=False, device_id=args.gpu)
estimator3d.regressor.cuda(args.gpu)
parsing_net = BiSeNet(n_classes=19)
parsing_net.cuda(args.gpu)
parsing_net.load_state_dict(torch.load('faceParsing/model_final_diss.pth', map_location='cuda:'+str(args.gpu)))
parsing_net.eval()
face_encoder = IR_SE_50([112,112])
face_encoder.load_state_dict(torch.load('saved_models/face_res_50.pth', map_location='cuda:'+str(args.gpu)))
face_encoder.cuda(args.gpu)
face_encoder.eval()
args.batch_size = int | main | identifier_name |
train_stage1.py | cluded.type(torch.cuda.FloatTensor), requires_grad=False).cuda()
img = Variable(img.type(torch.cuda.FloatTensor), requires_grad=False).cuda()
lmk = Variable(lmk.type(torch.cuda.FloatTensor), requires_grad=False).cuda()
flag = Variable(flag.type(torch.cuda.FloatTensor), requires_grad=False).cuda()
coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...])
rendered, landmark, reg_loss, gamma_loss = models['3D'].reconstruct(coef)
rendered = rendered.permute(0,3,1,2).contiguous()[:,[2,1,0],:,:]
# pose_loss = criterions['L1'](angles, ang)
align_loss = torch.sum(torch.sum(torch.square(landmark-lmk), dim=2)*flag*landmark_weight, dim=1) / 68.0
align_loss = torch.sum(align_loss) / lmk.shape[0]
# coefficients regularization 1.7e-3
coef_loss = torch.norm(coef[...,:80]) + 0.1*torch.norm(coef[...,80:144]) + 1.7e-3*torch.norm(coef[...,144:224])
# For skin
parsing_input = F.interpolate((img-mean)/std, (512,512))
parsed = models['Seg'](parsing_input)
parsed = F.interpolate(parsed, (224,224))
parsed = torch.argmax(parsed, dim=1, keepdim=True)
mask = torch.zeros_like(parsed, dtype=torch.float32).cuda()
# skin 1, nose 2, eye_glass 3, r_eye 4, l_eye 5, r_brow 6, l_brow 7, r_ear 8, l_ear 9,
# inner_mouth 10, u_lip 11, l_lip 12, hair 13
indices = ((parsed>=1).type(torch.BoolTensor) & (parsed<=7).type(torch.BoolTensor) & (parsed!=3).type(torch.BoolTensor)) \
| ((parsed>=11).type(torch.BoolTensor) & (parsed<=12).type(torch.BoolTensor))
mask[indices] = 1.0
# Get vector mask
rendered_noise = torch.mean(rendered, dim=1, keepdim=True) > 0.0
vector = torch.zeros_like(rendered_noise, dtype=img.dtype).cuda()
vector[rendered_noise] = 1.0
# Synthesize background
rendered = img*(1.-vector) + rendered*vector
# Perceptual loss
affined_r = F.interpolate(rendered[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True)
affined_i = F.interpolate(img[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True)
emb_r = models['face']((affined_r-mean_f)/std_f)
emb_i = models['face']((affined_i-mean_f)/std_f)
id_loss = torch.mean(1. - criterions['Cos'](emb_r, emb_i))
# Reconstruction loss
rec_loss = torch.sum(torch.abs(img - rendered), dim=1)*mask
rec_loss = torch.sum(rec_loss) / torch.sum(mask)
total_loss = coef_loss*1e-4 + rec_loss*0.01 + reg_loss*0.25 + align_loss*0.007 + gamma_loss*10.0 + id_loss*0.15
total_loss.backward()
optimizer.step()
# logging
if torch.distributed.get_rank() == 0:
scheduler.step()
total_iteration = len(train_loader) * epoch + i
logger.log_training(coef_loss.item(), rec_loss.item(), reg_loss.item(), align_loss.item(), id_loss.item(), total_iteration)
if total_iteration % 250 == 0:
rendered_grid = make_grid(rendered, nrow=args.batch_size//2, normalize=True)
lmk = lmk.type(torch.LongTensor)
landmark = landmark.type(torch.LongTensor)
color1 = torch.FloatTensor([1.0,0.0,0.0]).unsqueeze(-1).unsqueeze(-1)
color2 = torch.FloatTensor([0.0,0.0,1.0]).unsqueeze(-1).unsqueeze(-1)
for b in range(img.size(0)):
for l in range(68):
occluded[b, :, lmk[b,l,1]-2:lmk[b,l,1]+2, lmk[b,l,0]-2:lmk[b,l,0]+2] = color1
occluded[b, :, landmark[b,l,1]-2:landmark[b,l,1]+2, landmark[b,l,0]-2:landmark[b,l,0]+2] = color2
input_grid = make_grid(occluded, nrow=args.batch_size//2, normalize=False)
logger.log_train_image(input_grid, rendered_grid, total_iteration)
sys.stdout.write('\r[Epoch %d/%d][Iter %d/%d][Total_iter %d]' % (epoch, args.epochs, i, len(train_loader), total_iteration))
if i!=0 and total_iteration % args.val_iters == 0:
error = validate(models, val_loader, epoch, args)
logger.log_validation(error, epoch)
torch.save(models['3D'].regressor.module.state_dict(), args.save_path+"/reg_it%d_%.4f_stage1.pth" % (total_iteration, error))
def validate(models, val_loader, epoch, args):
with torch.no_grad():
align_error = 0.0
for i, (occluded, lmk) in enumerate(val_loader):
print('\rval %d...' % (i+1), end='')
occluded = Variable(occluded.type(torch.cuda.FloatTensor)).cuda()
lmk = Variable(lmk.type(torch.cuda.FloatTensor)).cuda()
coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...])
_, landmark = models['3D'].reconstruct(coef, test=True)
align_error += torch.mean(torch.abs(landmark - lmk))
align_error /= len(val_loader)
return align_error
def main(args):
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
| def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
print('rank', args.rank)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.cuda.set_device(args.gpu)
# Load models
estimator3d = Estimator3D(is_cuda=True, batch_size=args.batch_size, model_path=args.checkpoint, test=False, back_white=False, device_id=args.gpu)
estimator3d.regressor.cuda(args.gpu)
parsing_net = BiSeNet(n_classes=19)
parsing_net.cuda(args.gpu)
parsing_net.load_state_dict(torch.load('faceParsing/model_final_diss.pth', map_location='cuda:'+str(args.gpu)))
parsing_net.eval()
face_encoder = IR_SE_50([112,112])
face_encoder.load_state_dict(torch.load('saved_models/face_res_50.pth', map_location='cuda:'+str(args.gpu)))
face_encoder.cuda(args.gpu)
face_encoder.eval()
args.batch_size = int(args | random_line_split | |
train_stage1.py | Synthesize background
rendered = img*(1.-vector) + rendered*vector
# Perceptual loss
affined_r = F.interpolate(rendered[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True)
affined_i = F.interpolate(img[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True)
emb_r = models['face']((affined_r-mean_f)/std_f)
emb_i = models['face']((affined_i-mean_f)/std_f)
id_loss = torch.mean(1. - criterions['Cos'](emb_r, emb_i))
# Reconstruction loss
rec_loss = torch.sum(torch.abs(img - rendered), dim=1)*mask
rec_loss = torch.sum(rec_loss) / torch.sum(mask)
total_loss = coef_loss*1e-4 + rec_loss*0.01 + reg_loss*0.25 + align_loss*0.007 + gamma_loss*10.0 + id_loss*0.15
total_loss.backward()
optimizer.step()
# logging
if torch.distributed.get_rank() == 0:
scheduler.step()
total_iteration = len(train_loader) * epoch + i
logger.log_training(coef_loss.item(), rec_loss.item(), reg_loss.item(), align_loss.item(), id_loss.item(), total_iteration)
if total_iteration % 250 == 0:
rendered_grid = make_grid(rendered, nrow=args.batch_size//2, normalize=True)
lmk = lmk.type(torch.LongTensor)
landmark = landmark.type(torch.LongTensor)
color1 = torch.FloatTensor([1.0,0.0,0.0]).unsqueeze(-1).unsqueeze(-1)
color2 = torch.FloatTensor([0.0,0.0,1.0]).unsqueeze(-1).unsqueeze(-1)
for b in range(img.size(0)):
for l in range(68):
occluded[b, :, lmk[b,l,1]-2:lmk[b,l,1]+2, lmk[b,l,0]-2:lmk[b,l,0]+2] = color1
occluded[b, :, landmark[b,l,1]-2:landmark[b,l,1]+2, landmark[b,l,0]-2:landmark[b,l,0]+2] = color2
input_grid = make_grid(occluded, nrow=args.batch_size//2, normalize=False)
logger.log_train_image(input_grid, rendered_grid, total_iteration)
sys.stdout.write('\r[Epoch %d/%d][Iter %d/%d][Total_iter %d]' % (epoch, args.epochs, i, len(train_loader), total_iteration))
if i!=0 and total_iteration % args.val_iters == 0:
error = validate(models, val_loader, epoch, args)
logger.log_validation(error, epoch)
torch.save(models['3D'].regressor.module.state_dict(), args.save_path+"/reg_it%d_%.4f_stage1.pth" % (total_iteration, error))
def validate(models, val_loader, epoch, args):
with torch.no_grad():
align_error = 0.0
for i, (occluded, lmk) in enumerate(val_loader):
print('\rval %d...' % (i+1), end='')
occluded = Variable(occluded.type(torch.cuda.FloatTensor)).cuda()
lmk = Variable(lmk.type(torch.cuda.FloatTensor)).cuda()
coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...])
_, landmark = models['3D'].reconstruct(coef, test=True)
align_error += torch.mean(torch.abs(landmark - lmk))
align_error /= len(val_loader)
return align_error
def main(args):
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
print('rank', args.rank)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.cuda.set_device(args.gpu)
# Load models
estimator3d = Estimator3D(is_cuda=True, batch_size=args.batch_size, model_path=args.checkpoint, test=False, back_white=False, device_id=args.gpu)
estimator3d.regressor.cuda(args.gpu)
parsing_net = BiSeNet(n_classes=19)
parsing_net.cuda(args.gpu)
parsing_net.load_state_dict(torch.load('faceParsing/model_final_diss.pth', map_location='cuda:'+str(args.gpu)))
parsing_net.eval()
face_encoder = IR_SE_50([112,112])
face_encoder.load_state_dict(torch.load('saved_models/face_res_50.pth', map_location='cuda:'+str(args.gpu)))
face_encoder.cuda(args.gpu)
face_encoder.eval()
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
estimator3d.regressor = DDP(estimator3d.regressor, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True)
parsing_net = DDP(parsing_net, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True)
face_encoder = DDP(face_encoder, device_ids=[args.gpu], broadcast_buffers=False, find_unused_parameters=True)
models = {}
models['3D'] = estimator3d
models['Seg'] = parsing_net
models['face'] = face_encoder
# Losses
criterions = {}
criterions['L2'] = torch.nn.MSELoss().cuda(args.gpu)
criterions['L1'] = torch.nn.L1Loss().cuda(args.gpu)
criterions['Cos'] = torch.nn.CosineSimilarity().cuda(args.gpu)
cudnn.benchmark = True
dataset = FirstStageDataset(occ_path=args.train_data_path + '/occluded', \
img_path=args.train_data_path + '/ori_img', \
lmk_path=args.train_data_path + '/landmarks')
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
train_sampler = None
train_loader = DataLoader(
dataset, batch_size = args.batch_size,
shuffle = (train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True
)
val_dataset = LP_Dataset(args.val_data_path+'/occluded', args.val_data_path+'/landmarks')
val_loader = DataLoader(
val_dataset, batch_size = args.batch_size, shuffle = False,
drop_last=True, num_workers=args.workers, pin_memory=True
)
optimizer = torch.optim.AdamW(estimator3d.regressor.parameters(), lr=args.lr, betas=(0.5,0.999))
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs*(len(train_loader)))
print(len(train_loader))
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train(models, criterions, optimizer, scheduler, train_loader, val_loader, epoch, args)
if torch.distributed.get_rank() == 0:
| error = validate(models, val_loader, epoch, args)
logger.log_validation(error, epoch)
torch.save(estimator3d.regressor.module.state_dict(), args.save_path+"/reg_ep%d_%.4f_stage1.pth" % (epoch+1, error)) | conditional_block | |
train_stage1.py | cluded.type(torch.cuda.FloatTensor), requires_grad=False).cuda()
img = Variable(img.type(torch.cuda.FloatTensor), requires_grad=False).cuda()
lmk = Variable(lmk.type(torch.cuda.FloatTensor), requires_grad=False).cuda()
flag = Variable(flag.type(torch.cuda.FloatTensor), requires_grad=False).cuda()
coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...])
rendered, landmark, reg_loss, gamma_loss = models['3D'].reconstruct(coef)
rendered = rendered.permute(0,3,1,2).contiguous()[:,[2,1,0],:,:]
# pose_loss = criterions['L1'](angles, ang)
align_loss = torch.sum(torch.sum(torch.square(landmark-lmk), dim=2)*flag*landmark_weight, dim=1) / 68.0
align_loss = torch.sum(align_loss) / lmk.shape[0]
# coefficients regularization 1.7e-3
coef_loss = torch.norm(coef[...,:80]) + 0.1*torch.norm(coef[...,80:144]) + 1.7e-3*torch.norm(coef[...,144:224])
# For skin
parsing_input = F.interpolate((img-mean)/std, (512,512))
parsed = models['Seg'](parsing_input)
parsed = F.interpolate(parsed, (224,224))
parsed = torch.argmax(parsed, dim=1, keepdim=True)
mask = torch.zeros_like(parsed, dtype=torch.float32).cuda()
# skin 1, nose 2, eye_glass 3, r_eye 4, l_eye 5, r_brow 6, l_brow 7, r_ear 8, l_ear 9,
# inner_mouth 10, u_lip 11, l_lip 12, hair 13
indices = ((parsed>=1).type(torch.BoolTensor) & (parsed<=7).type(torch.BoolTensor) & (parsed!=3).type(torch.BoolTensor)) \
| ((parsed>=11).type(torch.BoolTensor) & (parsed<=12).type(torch.BoolTensor))
mask[indices] = 1.0
# Get vector mask
rendered_noise = torch.mean(rendered, dim=1, keepdim=True) > 0.0
vector = torch.zeros_like(rendered_noise, dtype=img.dtype).cuda()
vector[rendered_noise] = 1.0
# Synthesize background
rendered = img*(1.-vector) + rendered*vector
# Perceptual loss
affined_r = F.interpolate(rendered[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True)
affined_i = F.interpolate(img[:,:,15:-40,15:-15], (112,112), mode='bilinear', align_corners=True)
emb_r = models['face']((affined_r-mean_f)/std_f)
emb_i = models['face']((affined_i-mean_f)/std_f)
id_loss = torch.mean(1. - criterions['Cos'](emb_r, emb_i))
# Reconstruction loss
rec_loss = torch.sum(torch.abs(img - rendered), dim=1)*mask
rec_loss = torch.sum(rec_loss) / torch.sum(mask)
total_loss = coef_loss*1e-4 + rec_loss*0.01 + reg_loss*0.25 + align_loss*0.007 + gamma_loss*10.0 + id_loss*0.15
total_loss.backward()
optimizer.step()
# logging
if torch.distributed.get_rank() == 0:
scheduler.step()
total_iteration = len(train_loader) * epoch + i
logger.log_training(coef_loss.item(), rec_loss.item(), reg_loss.item(), align_loss.item(), id_loss.item(), total_iteration)
if total_iteration % 250 == 0:
rendered_grid = make_grid(rendered, nrow=args.batch_size//2, normalize=True)
lmk = lmk.type(torch.LongTensor)
landmark = landmark.type(torch.LongTensor)
color1 = torch.FloatTensor([1.0,0.0,0.0]).unsqueeze(-1).unsqueeze(-1)
color2 = torch.FloatTensor([0.0,0.0,1.0]).unsqueeze(-1).unsqueeze(-1)
for b in range(img.size(0)):
for l in range(68):
occluded[b, :, lmk[b,l,1]-2:lmk[b,l,1]+2, lmk[b,l,0]-2:lmk[b,l,0]+2] = color1
occluded[b, :, landmark[b,l,1]-2:landmark[b,l,1]+2, landmark[b,l,0]-2:landmark[b,l,0]+2] = color2
input_grid = make_grid(occluded, nrow=args.batch_size//2, normalize=False)
logger.log_train_image(input_grid, rendered_grid, total_iteration)
sys.stdout.write('\r[Epoch %d/%d][Iter %d/%d][Total_iter %d]' % (epoch, args.epochs, i, len(train_loader), total_iteration))
if i!=0 and total_iteration % args.val_iters == 0:
error = validate(models, val_loader, epoch, args)
logger.log_validation(error, epoch)
torch.save(models['3D'].regressor.module.state_dict(), args.save_path+"/reg_it%d_%.4f_stage1.pth" % (total_iteration, error))
def validate(models, val_loader, epoch, args):
with torch.no_grad():
align_error = 0.0
for i, (occluded, lmk) in enumerate(val_loader):
print('\rval %d...' % (i+1), end='')
occluded = Variable(occluded.type(torch.cuda.FloatTensor)).cuda()
lmk = Variable(lmk.type(torch.cuda.FloatTensor)).cuda()
coef = models['3D'].regress_3dmm(occluded[:,[2,1,0],...])
_, landmark = models['3D'].reconstruct(coef, test=True)
align_error += torch.mean(torch.abs(landmark - lmk))
align_error /= len(val_loader)
return align_error
def main(args):
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
| estimator3d.regressor.cuda(args.gpu)
parsing_net = BiSeNet(n_classes=19)
parsing_net.cuda(args.gpu)
parsing_net.load_state_dict(torch.load('faceParsing/model_final_diss.pth', map_location='cuda:'+str(args.gpu)))
parsing_net.eval()
face_encoder = IR_SE_50([112,112])
face_encoder.load_state_dict(torch.load('saved_models/face_res_50.pth', map_location='cuda:'+str(args.gpu)))
face_encoder.cuda(args.gpu)
face_encoder.eval()
args.batch_size = int(args | args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
print('rank', args.rank)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.cuda.set_device(args.gpu)
# Load models
estimator3d = Estimator3D(is_cuda=True, batch_size=args.batch_size, model_path=args.checkpoint, test=False, back_white=False, device_id=args.gpu) | identifier_body |
atariclip.py | , self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(400, 6)
for layer in self.parameters():
layer.requires_grad = False
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = F.max_pool2d(x, 3, 3)
x = F.max_pool2d(x, 3, 3)
x = x.view(-1, 400)
x = F.relu(self.fc1(x))
return F.log_softmax(x, dim=1)
class ModelSimple(nn.Module):
def __init__(self):
super(ModelSimple, self).__init__()
#self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
#self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
#self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(100800, 6)
# self.fc2 = nn.Linear(50, 6)
for layer in self.parameters():
layer.requires_grad = False
def forward(self, x):
#x = F.relu(F.max_pool2d(self.conv1(x), 2))
#x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
#x = F.max_pool2d(x, 3, 3)
#x = F.max_pool2d(x, 3, 3)
x = x.view(-1, 100800)
x = F.relu(self.fc1(x))
#x = F.dropout(x, training=self.training)
#x = self.fc2(x)
return F.log_softmax(x, dim=1)
class ModelDummy(nn.Module):
def __init__(self):
super(ModelDummy, self).__init__()
self.fc1 = nn.Linear(1, 1)
# self.fc2 = nn.Linear(50, 6)
for layer in self.parameters():
layer.requires_grad = False
def forward(self, x):
num = x.size(0)
return convert_torch(np.array([1 for i in range(num)]))
def evaluate_model(model, xs):
"""
Args:
xs: (N, shape)
"""
res = model(convert_torch(xs))
npar = from_torch(res)
if len(npar.shape) == 1:
return npar # for dummy eval
return from_torch(res).argmax(axis=1)
def get_model(model_name="simple"):
if model_name == "big":
return ModelBig()
elif model_name == "simple":
return ModelSimple()
elif model_name == "dummy":
return ModelDummy()
class Simulator(object):
def __init__(self, args):
self._env = gym.make(args.env)
_state = self._env.reset()
self._init_state = np.array([preprocess(_state) for i in range(args.batch)])
if args.batch == 0:
self._init_state = np.array([0], dtype=np.float32)
def onestep(self, arr, start=False):
self._init_state += 0.001
# if start:
# return self._init_state
# state = self._env.step(arr)[0]
return self._init_state
| return self._init_state
class Clip(object):
def __init__(self, shape, model_name):
from clipper_admin import ClipperConnection, DockerContainerManager
from clipper_admin.deployers import python as python_deployer
from clipper_admin.deployers import pytorch as pytorch_deployer
self.clipper_conn = ClipperConnection(DockerContainerManager())
try:
self.clipper_conn.connect()
self.clipper_conn.stop_all()
except Exception:
pass
self.clipper_conn.start_clipper()
self.clipper_conn.register_application(
name="hello-world", input_type="strings",
default_output="-1.0", slo_micros=10**8)
ptmodel = get_model(model_name)
def policy(model, x):
print(len(x))
batch = (len(x))
arr = []
for j in x:
print(type(j), len(j))
res = np.frombuffer(base64.decodestring(j), dtype=np.float32)
print(res.shape)
arr += [res]
x = np.array(arr)
x = x.reshape((-1,) + shape[1:])
print("new shape", x.shape)
return evaluate_model(model, x).reshape((batch, shape[0]))
pytorch_deployer.deploy_pytorch_model(
self.clipper_conn, name="policy", version=1,
input_type="strings", func=policy, pytorch_model=ptmodel)
self.clipper_conn.link_model_to_app(
app_name="hello-world", model_name="policy")
# class PolicyActor(object):
# def __init__(self):
# self.ptmodel = Model()
#
# def query(self, state):
# state = [state]
# return evaluate_model(self.ptmodel, state)
class ClipperRunner(Simulator):
def __init__(self, args):
super(ClipperRunner, self).__init__(args)
self.shape = self.initial_state().shape
self._headers = {"Content-type": "application/json"}
def run(self, steps):
state = self.initial_state()
serialize_timer = TimerStat()
step_timer = TimerStat()
for i in range(steps):
with step_timer:
with serialize_timer:
s = base64.b64encode(state)
data = json.dumps({"input": s})
res = requests.post(
"http://localhost:1337/hello-world/predict",
headers=self._headers,
data=data).json()
out = res['output']
state = self.onestep(out)
print("Serialize", serialize_timer.mean)
print("Step", step_timer.mean)
# class RayRunner(Simulator):
# def __init__(self, env):
# super(RayRunner, self).__init__(env)
# self.shape = self.initial_state().shape
# self.timers = {"query": TimerStat(), "step": TimerStat()}
# def run(self, steps, policy_actor):
# state = self.initial_state()
# for i in range(steps):
# with self.timers["query"]:
# out = ray.get(policy_actor.query.remote(state))
# with self.timers["step"]:
# state = self.onestep(out)
# def stats(self):
# return {k: v.mean for k, v in self.timers.items()}
def eval_ray_batch(args):
model = get_model(args.model)
RemoteSimulator = ray.remote(Simulator)
simulators = [RemoteSimulator.remote(args) for i in range(args.num_sims)]
ac = [None for i in range(args.num_sims)]
init_shape = ray.get(simulators[0].initial_state.remote()).shape
start = time.time()
remaining = {sim.onestep.remote(a, i == 0): sim for a, sim in zip(ac, simulators)}
counter = {sim: 0 for sim in simulators}
timers = {k: TimerStat() for k in ["fwd", "wait", "get", "step"]}
while any(v < args.iters for v in counter.values()):
# TODO: consider evaluating as ray.wait
with timers["step"]:
with timers["wait"]:
[data_fut], _ = ray.wait(list(remaining))
with timers["get"]:
xs = ray.get(data_fut)
sim = remaining.pop(data_fut)
counter[sim] += 1
with timers["fwd"]:
ac = evaluate_model(model, xs)
if counter[sim] < args.iters:
remaining[sim.onestep.remote(ac[0], i == 0)] = sim
print("Took %f sec..." % (time.time() - start))
print(xs.shape)
print("\n".join(["%s: %0.5f" % (k, t.mean) for k, t in timers.items()]))
def eval_simple(args):
model = get_model(args.model)
sim = Simulator(args)
fwd = TimerStat()
start = time.time()
ac = [None]
for i in range(args.iters):
xs = sim.onestep(ac[0], i == 0)
with fwd:
ac = evaluate_model(model, xs)
print("Took %f sec..." % (time.time() - start))
print(fwd.mean, "Avg Fwd pass..")
# def eval_ray(args):
# RemoteRayRunner = ray.remote(RayRunner)
# simulators = [RemoteRayRunner.remote(args) for i in range(args.num_sims)]
# RemotePolicy = ray.remote(PolicyActor)
# p = RemotePolicy.remote | def initial_state(self): | random_line_split |
atariclip.py | x = x.view(-1, 400)
x = F.relu(self.fc1(x))
return F.log_softmax(x, dim=1)
class ModelSimple(nn.Module):
def __init__(self):
super(ModelSimple, self).__init__()
#self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
#self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
#self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(100800, 6)
# self.fc2 = nn.Linear(50, 6)
for layer in self.parameters():
layer.requires_grad = False
def forward(self, x):
#x = F.relu(F.max_pool2d(self.conv1(x), 2))
#x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
#x = F.max_pool2d(x, 3, 3)
#x = F.max_pool2d(x, 3, 3)
x = x.view(-1, 100800)
x = F.relu(self.fc1(x))
#x = F.dropout(x, training=self.training)
#x = self.fc2(x)
return F.log_softmax(x, dim=1)
class ModelDummy(nn.Module):
def __init__(self):
super(ModelDummy, self).__init__()
self.fc1 = nn.Linear(1, 1)
# self.fc2 = nn.Linear(50, 6)
for layer in self.parameters():
layer.requires_grad = False
def forward(self, x):
num = x.size(0)
return convert_torch(np.array([1 for i in range(num)]))
def evaluate_model(model, xs):
"""
Args:
xs: (N, shape)
"""
res = model(convert_torch(xs))
npar = from_torch(res)
if len(npar.shape) == 1:
return npar # for dummy eval
return from_torch(res).argmax(axis=1)
def get_model(model_name="simple"):
if model_name == "big":
return ModelBig()
elif model_name == "simple":
return ModelSimple()
elif model_name == "dummy":
return ModelDummy()
class Simulator(object):
def __init__(self, args):
self._env = gym.make(args.env)
_state = self._env.reset()
self._init_state = np.array([preprocess(_state) for i in range(args.batch)])
if args.batch == 0:
self._init_state = np.array([0], dtype=np.float32)
def onestep(self, arr, start=False):
self._init_state += 0.001
# if start:
# return self._init_state
# state = self._env.step(arr)[0]
return self._init_state
def initial_state(self):
return self._init_state
class Clip(object):
def __init__(self, shape, model_name):
from clipper_admin import ClipperConnection, DockerContainerManager
from clipper_admin.deployers import python as python_deployer
from clipper_admin.deployers import pytorch as pytorch_deployer
self.clipper_conn = ClipperConnection(DockerContainerManager())
try:
self.clipper_conn.connect()
self.clipper_conn.stop_all()
except Exception:
pass
self.clipper_conn.start_clipper()
self.clipper_conn.register_application(
name="hello-world", input_type="strings",
default_output="-1.0", slo_micros=10**8)
ptmodel = get_model(model_name)
def policy(model, x):
print(len(x))
batch = (len(x))
arr = []
for j in x:
print(type(j), len(j))
res = np.frombuffer(base64.decodestring(j), dtype=np.float32)
print(res.shape)
arr += [res]
x = np.array(arr)
x = x.reshape((-1,) + shape[1:])
print("new shape", x.shape)
return evaluate_model(model, x).reshape((batch, shape[0]))
pytorch_deployer.deploy_pytorch_model(
self.clipper_conn, name="policy", version=1,
input_type="strings", func=policy, pytorch_model=ptmodel)
self.clipper_conn.link_model_to_app(
app_name="hello-world", model_name="policy")
# class PolicyActor(object):
# def __init__(self):
# self.ptmodel = Model()
#
# def query(self, state):
# state = [state]
# return evaluate_model(self.ptmodel, state)
class ClipperRunner(Simulator):
def __init__(self, args):
super(ClipperRunner, self).__init__(args)
self.shape = self.initial_state().shape
self._headers = {"Content-type": "application/json"}
def run(self, steps):
state = self.initial_state()
serialize_timer = TimerStat()
step_timer = TimerStat()
for i in range(steps):
with step_timer:
with serialize_timer:
s = base64.b64encode(state)
data = json.dumps({"input": s})
res = requests.post(
"http://localhost:1337/hello-world/predict",
headers=self._headers,
data=data).json()
out = res['output']
state = self.onestep(out)
print("Serialize", serialize_timer.mean)
print("Step", step_timer.mean)
# class RayRunner(Simulator):
# def __init__(self, env):
# super(RayRunner, self).__init__(env)
# self.shape = self.initial_state().shape
# self.timers = {"query": TimerStat(), "step": TimerStat()}
# def run(self, steps, policy_actor):
# state = self.initial_state()
# for i in range(steps):
# with self.timers["query"]:
# out = ray.get(policy_actor.query.remote(state))
# with self.timers["step"]:
# state = self.onestep(out)
# def stats(self):
# return {k: v.mean for k, v in self.timers.items()}
def eval_ray_batch(args):
model = get_model(args.model)
RemoteSimulator = ray.remote(Simulator)
simulators = [RemoteSimulator.remote(args) for i in range(args.num_sims)]
ac = [None for i in range(args.num_sims)]
init_shape = ray.get(simulators[0].initial_state.remote()).shape
start = time.time()
remaining = {sim.onestep.remote(a, i == 0): sim for a, sim in zip(ac, simulators)}
counter = {sim: 0 for sim in simulators}
timers = {k: TimerStat() for k in ["fwd", "wait", "get", "step"]}
while any(v < args.iters for v in counter.values()):
# TODO: consider evaluating as ray.wait
with timers["step"]:
with timers["wait"]:
[data_fut], _ = ray.wait(list(remaining))
with timers["get"]:
xs = ray.get(data_fut)
sim = remaining.pop(data_fut)
counter[sim] += 1
with timers["fwd"]:
ac = evaluate_model(model, xs)
if counter[sim] < args.iters:
remaining[sim.onestep.remote(ac[0], i == 0)] = sim
print("Took %f sec..." % (time.time() - start))
print(xs.shape)
print("\n".join(["%s: %0.5f" % (k, t.mean) for k, t in timers.items()]))
def eval_simple(args):
model = get_model(args.model)
sim = Simulator(args)
fwd = TimerStat()
start = time.time()
ac = [None]
for i in range(args.iters):
xs = sim.onestep(ac[0], i == 0)
with fwd:
ac = evaluate_model(model, xs)
print("Took %f sec..." % (time.time() - start))
print(fwd.mean, "Avg Fwd pass..")
# def eval_ray(args):
# RemoteRayRunner = ray.remote(RayRunner)
# simulators = [RemoteRayRunner.remote(args) for i in range(args.num_sims)]
# RemotePolicy = ray.remote(PolicyActor)
# p = RemotePolicy.remote()
# start = time.time()
# ray.get([sim.run.remote(args.iters, p) for sim in simulators])
# print("Took %0.4f sec..." % (time.time() - start))
# stats = ray.get(simulators[0].stats.remote())
# print(stats)
def eval_clipper(args):
| RemoteClipperRunner = ray.remote(ClipperRunner)
simulators = [RemoteClipperRunner.remote(args) for i in range(args.num_sims)]
c = Clip(ray.get(simulators[0].initial_state.remote()).shape, args.model)
start = time.time()
ray.get([sim.run.remote(args.iters) for sim in simulators])
print("Took %f sec..." % (time.time() - start)) | identifier_body | |
atariclip.py | (self.conv2_drop(self.conv2(x)), 2))
#x = F.max_pool2d(x, 3, 3)
#x = F.max_pool2d(x, 3, 3)
x = x.view(-1, 100800)
x = F.relu(self.fc1(x))
#x = F.dropout(x, training=self.training)
#x = self.fc2(x)
return F.log_softmax(x, dim=1)
class ModelDummy(nn.Module):
def __init__(self):
super(ModelDummy, self).__init__()
self.fc1 = nn.Linear(1, 1)
# self.fc2 = nn.Linear(50, 6)
for layer in self.parameters():
layer.requires_grad = False
def forward(self, x):
num = x.size(0)
return convert_torch(np.array([1 for i in range(num)]))
def evaluate_model(model, xs):
"""
Args:
xs: (N, shape)
"""
res = model(convert_torch(xs))
npar = from_torch(res)
if len(npar.shape) == 1:
return npar # for dummy eval
return from_torch(res).argmax(axis=1)
def get_model(model_name="simple"):
if model_name == "big":
return ModelBig()
elif model_name == "simple":
return ModelSimple()
elif model_name == "dummy":
return ModelDummy()
class Simulator(object):
def __init__(self, args):
self._env = gym.make(args.env)
_state = self._env.reset()
self._init_state = np.array([preprocess(_state) for i in range(args.batch)])
if args.batch == 0:
self._init_state = np.array([0], dtype=np.float32)
def onestep(self, arr, start=False):
self._init_state += 0.001
# if start:
# return self._init_state
# state = self._env.step(arr)[0]
return self._init_state
def initial_state(self):
return self._init_state
class Clip(object):
def __init__(self, shape, model_name):
from clipper_admin import ClipperConnection, DockerContainerManager
from clipper_admin.deployers import python as python_deployer
from clipper_admin.deployers import pytorch as pytorch_deployer
self.clipper_conn = ClipperConnection(DockerContainerManager())
try:
self.clipper_conn.connect()
self.clipper_conn.stop_all()
except Exception:
pass
self.clipper_conn.start_clipper()
self.clipper_conn.register_application(
name="hello-world", input_type="strings",
default_output="-1.0", slo_micros=10**8)
ptmodel = get_model(model_name)
def policy(model, x):
print(len(x))
batch = (len(x))
arr = []
for j in x:
print(type(j), len(j))
res = np.frombuffer(base64.decodestring(j), dtype=np.float32)
print(res.shape)
arr += [res]
x = np.array(arr)
x = x.reshape((-1,) + shape[1:])
print("new shape", x.shape)
return evaluate_model(model, x).reshape((batch, shape[0]))
pytorch_deployer.deploy_pytorch_model(
self.clipper_conn, name="policy", version=1,
input_type="strings", func=policy, pytorch_model=ptmodel)
self.clipper_conn.link_model_to_app(
app_name="hello-world", model_name="policy")
# class PolicyActor(object):
# def __init__(self):
# self.ptmodel = Model()
#
# def query(self, state):
# state = [state]
# return evaluate_model(self.ptmodel, state)
class ClipperRunner(Simulator):
def __init__(self, args):
super(ClipperRunner, self).__init__(args)
self.shape = self.initial_state().shape
self._headers = {"Content-type": "application/json"}
def run(self, steps):
state = self.initial_state()
serialize_timer = TimerStat()
step_timer = TimerStat()
for i in range(steps):
with step_timer:
with serialize_timer:
s = base64.b64encode(state)
data = json.dumps({"input": s})
res = requests.post(
"http://localhost:1337/hello-world/predict",
headers=self._headers,
data=data).json()
out = res['output']
state = self.onestep(out)
print("Serialize", serialize_timer.mean)
print("Step", step_timer.mean)
# class RayRunner(Simulator):
# def __init__(self, env):
# super(RayRunner, self).__init__(env)
# self.shape = self.initial_state().shape
# self.timers = {"query": TimerStat(), "step": TimerStat()}
# def run(self, steps, policy_actor):
# state = self.initial_state()
# for i in range(steps):
# with self.timers["query"]:
# out = ray.get(policy_actor.query.remote(state))
# with self.timers["step"]:
# state = self.onestep(out)
# def stats(self):
# return {k: v.mean for k, v in self.timers.items()}
def eval_ray_batch(args):
model = get_model(args.model)
RemoteSimulator = ray.remote(Simulator)
simulators = [RemoteSimulator.remote(args) for i in range(args.num_sims)]
ac = [None for i in range(args.num_sims)]
init_shape = ray.get(simulators[0].initial_state.remote()).shape
start = time.time()
remaining = {sim.onestep.remote(a, i == 0): sim for a, sim in zip(ac, simulators)}
counter = {sim: 0 for sim in simulators}
timers = {k: TimerStat() for k in ["fwd", "wait", "get", "step"]}
while any(v < args.iters for v in counter.values()):
# TODO: consider evaluating as ray.wait
with timers["step"]:
with timers["wait"]:
[data_fut], _ = ray.wait(list(remaining))
with timers["get"]:
xs = ray.get(data_fut)
sim = remaining.pop(data_fut)
counter[sim] += 1
with timers["fwd"]:
ac = evaluate_model(model, xs)
if counter[sim] < args.iters:
remaining[sim.onestep.remote(ac[0], i == 0)] = sim
print("Took %f sec..." % (time.time() - start))
print(xs.shape)
print("\n".join(["%s: %0.5f" % (k, t.mean) for k, t in timers.items()]))
def eval_simple(args):
model = get_model(args.model)
sim = Simulator(args)
fwd = TimerStat()
start = time.time()
ac = [None]
for i in range(args.iters):
xs = sim.onestep(ac[0], i == 0)
with fwd:
ac = evaluate_model(model, xs)
print("Took %f sec..." % (time.time() - start))
print(fwd.mean, "Avg Fwd pass..")
# def eval_ray(args):
# RemoteRayRunner = ray.remote(RayRunner)
# simulators = [RemoteRayRunner.remote(args) for i in range(args.num_sims)]
# RemotePolicy = ray.remote(PolicyActor)
# p = RemotePolicy.remote()
# start = time.time()
# ray.get([sim.run.remote(args.iters, p) for sim in simulators])
# print("Took %0.4f sec..." % (time.time() - start))
# stats = ray.get(simulators[0].stats.remote())
# print(stats)
def eval_clipper(args):
RemoteClipperRunner = ray.remote(ClipperRunner)
simulators = [RemoteClipperRunner.remote(args) for i in range(args.num_sims)]
c = Clip(ray.get(simulators[0].initial_state.remote()).shape, args.model)
start = time.time()
ray.get([sim.run.remote(args.iters) for sim in simulators])
print("Took %f sec..." % (time.time() - start))
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--runtime", type=str, choices=["ray", "clipper", "simple"],
help="Choose between Ray or Clipper")
parser.add_argument("--env", type=str, default="Pong-v0",
help="Env Keyword for starting a simulator")
parser.add_argument("--batch", type=int, default=1,
help="Size of data")
parser.add_argument("--num-sims", type=int, default=1,
help="Number of simultaneous simulations to evaluate")
parser.add_argument("--iters", type=int, default=500,
help="Number of steps per sim to evaluate")
parser.add_argument("--model", type=str, default="simple",
help="Use a bigger CNN model.")
if __name__ == "__main__":
args = parser.parse_args()
if args.runtime == "ray":
| import ray
ray.init()
eval_ray_batch(args) | conditional_block | |
atariclip.py | )
for layer in self.parameters():
layer.requires_grad = False
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = F.max_pool2d(x, 3, 3)
x = F.max_pool2d(x, 3, 3)
x = x.view(-1, 400)
x = F.relu(self.fc1(x))
return F.log_softmax(x, dim=1)
class ModelSimple(nn.Module):
def __init__(self):
super(ModelSimple, self).__init__()
#self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
#self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
#self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(100800, 6)
# self.fc2 = nn.Linear(50, 6)
for layer in self.parameters():
layer.requires_grad = False
def forward(self, x):
#x = F.relu(F.max_pool2d(self.conv1(x), 2))
#x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
#x = F.max_pool2d(x, 3, 3)
#x = F.max_pool2d(x, 3, 3)
x = x.view(-1, 100800)
x = F.relu(self.fc1(x))
#x = F.dropout(x, training=self.training)
#x = self.fc2(x)
return F.log_softmax(x, dim=1)
class ModelDummy(nn.Module):
def __init__(self):
super(ModelDummy, self).__init__()
self.fc1 = nn.Linear(1, 1)
# self.fc2 = nn.Linear(50, 6)
for layer in self.parameters():
layer.requires_grad = False
def forward(self, x):
num = x.size(0)
return convert_torch(np.array([1 for i in range(num)]))
def evaluate_model(model, xs):
"""
Args:
xs: (N, shape)
"""
res = model(convert_torch(xs))
npar = from_torch(res)
if len(npar.shape) == 1:
return npar # for dummy eval
return from_torch(res).argmax(axis=1)
def get_model(model_name="simple"):
if model_name == "big":
return ModelBig()
elif model_name == "simple":
return ModelSimple()
elif model_name == "dummy":
return ModelDummy()
class Simulator(object):
def __init__(self, args):
self._env = gym.make(args.env)
_state = self._env.reset()
self._init_state = np.array([preprocess(_state) for i in range(args.batch)])
if args.batch == 0:
self._init_state = np.array([0], dtype=np.float32)
def onestep(self, arr, start=False):
self._init_state += 0.001
# if start:
# return self._init_state
# state = self._env.step(arr)[0]
return self._init_state
def initial_state(self):
return self._init_state
class Clip(object):
def __init__(self, shape, model_name):
from clipper_admin import ClipperConnection, DockerContainerManager
from clipper_admin.deployers import python as python_deployer
from clipper_admin.deployers import pytorch as pytorch_deployer
self.clipper_conn = ClipperConnection(DockerContainerManager())
try:
self.clipper_conn.connect()
self.clipper_conn.stop_all()
except Exception:
pass
self.clipper_conn.start_clipper()
self.clipper_conn.register_application(
name="hello-world", input_type="strings",
default_output="-1.0", slo_micros=10**8)
ptmodel = get_model(model_name)
def policy(model, x):
print(len(x))
batch = (len(x))
arr = []
for j in x:
print(type(j), len(j))
res = np.frombuffer(base64.decodestring(j), dtype=np.float32)
print(res.shape)
arr += [res]
x = np.array(arr)
x = x.reshape((-1,) + shape[1:])
print("new shape", x.shape)
return evaluate_model(model, x).reshape((batch, shape[0]))
pytorch_deployer.deploy_pytorch_model(
self.clipper_conn, name="policy", version=1,
input_type="strings", func=policy, pytorch_model=ptmodel)
self.clipper_conn.link_model_to_app(
app_name="hello-world", model_name="policy")
# class PolicyActor(object):
# def __init__(self):
# self.ptmodel = Model()
#
# def query(self, state):
# state = [state]
# return evaluate_model(self.ptmodel, state)
class ClipperRunner(Simulator):
def __init__(self, args):
super(ClipperRunner, self).__init__(args)
self.shape = self.initial_state().shape
self._headers = {"Content-type": "application/json"}
def run(self, steps):
state = self.initial_state()
serialize_timer = TimerStat()
step_timer = TimerStat()
for i in range(steps):
with step_timer:
with serialize_timer:
s = base64.b64encode(state)
data = json.dumps({"input": s})
res = requests.post(
"http://localhost:1337/hello-world/predict",
headers=self._headers,
data=data).json()
out = res['output']
state = self.onestep(out)
print("Serialize", serialize_timer.mean)
print("Step", step_timer.mean)
# class RayRunner(Simulator):
# def __init__(self, env):
# super(RayRunner, self).__init__(env)
# self.shape = self.initial_state().shape
# self.timers = {"query": TimerStat(), "step": TimerStat()}
# def run(self, steps, policy_actor):
# state = self.initial_state()
# for i in range(steps):
# with self.timers["query"]:
# out = ray.get(policy_actor.query.remote(state))
# with self.timers["step"]:
# state = self.onestep(out)
# def stats(self):
# return {k: v.mean for k, v in self.timers.items()}
def eval_ray_batch(args):
model = get_model(args.model)
RemoteSimulator = ray.remote(Simulator)
simulators = [RemoteSimulator.remote(args) for i in range(args.num_sims)]
ac = [None for i in range(args.num_sims)]
init_shape = ray.get(simulators[0].initial_state.remote()).shape
start = time.time()
remaining = {sim.onestep.remote(a, i == 0): sim for a, sim in zip(ac, simulators)}
counter = {sim: 0 for sim in simulators}
timers = {k: TimerStat() for k in ["fwd", "wait", "get", "step"]}
while any(v < args.iters for v in counter.values()):
# TODO: consider evaluating as ray.wait
with timers["step"]:
with timers["wait"]:
[data_fut], _ = ray.wait(list(remaining))
with timers["get"]:
xs = ray.get(data_fut)
sim = remaining.pop(data_fut)
counter[sim] += 1
with timers["fwd"]:
ac = evaluate_model(model, xs)
if counter[sim] < args.iters:
remaining[sim.onestep.remote(ac[0], i == 0)] = sim
print("Took %f sec..." % (time.time() - start))
print(xs.shape)
print("\n".join(["%s: %0.5f" % (k, t.mean) for k, t in timers.items()]))
def eval_simple(args):
model = get_model(args.model)
sim = Simulator(args)
fwd = TimerStat()
start = time.time()
ac = [None]
for i in range(args.iters):
xs = sim.onestep(ac[0], i == 0)
with fwd:
ac = evaluate_model(model, xs)
print("Took %f sec..." % (time.time() - start))
print(fwd.mean, "Avg Fwd pass..")
# def eval_ray(args):
# RemoteRayRunner = ray.remote(RayRunner)
# simulators = [RemoteRayRunner.remote(args) for i in range(args.num_sims)]
# RemotePolicy = ray.remote(PolicyActor)
# p = RemotePolicy.remote()
# start = time.time()
# ray.get([sim.run.remote(args.iters, p) for sim in simulators])
# print("Took %0.4f sec..." % (time.time() - start))
# stats = ray.get(simulators[0].stats.remote())
# print(stats)
def | eval_clipper | identifier_name | |
olm_parser.rs | ]
}
fn sub_images(image: RgbImage, chunk_size: usize) -> impl Iterator<Item=RgbImage> {
let chunk_size_32: u32 = TryFrom::try_from(chunk_size)
.expect("chunk_size too large, cannot convert to u32");
let height_iter = 0..(image.dimensions().1) - (chunk_size_32 - 1);
let width_iter = 0..(image.dimensions().0) - (chunk_size_32 - 1);
height_iter
.cartesian_product(width_iter)
.map(move |(y, x)| {
imageops::crop_imm(&image, x, y, chunk_size_32, chunk_size_32).to_image()
})
}
fn alias_sub_image(image: RgbImage, pixel_aliases: &PixelKeys) -> Vec<usize> {
image
.pixels()
.map(|p| *pixel_aliases.get_by_right(&p).unwrap())
.collect()
}
fn alias_pixels(image: &RgbImage) -> PixelKeys {
image
.pixels()
.unique()
.copied()
.enumerate()
.collect()
}
// returns the input image in unique chunks and frequencies of those chunks
fn | (
image: RgbImage,
chunk_size: usize,
pixel_aliases: &PixelKeys,
rotate: bool,
reflect_vertical: bool,
reflect_horizontal: bool,
reflect_diagonal: bool,
) -> IndexMap<Chunk, u16> {
sub_images(image, chunk_size)
.map(|sub_image| alias_sub_image(sub_image, pixel_aliases))
.fold(IndexMap::new(), |mut acc, aliases| {
let chunk = DMatrix::from_row_slice(chunk_size, chunk_size, &aliases);
if rotate {
let mut rot_chunk = chunk.clone();
for _ in 0..3 {
rot_chunk = rot_chunk.rotate_90();
push_chunk_frequency(rot_chunk.clone(), &mut acc);
}
}
if reflect_vertical {
push_chunk_frequency(chunk.reflect_vertical(), &mut acc);
}
if reflect_horizontal {
push_chunk_frequency(chunk.reflect_horizontal(), &mut acc);
}
if reflect_diagonal {
push_chunk_frequency(chunk.reflect_top_left(), &mut acc);
push_chunk_frequency(chunk.reflect_bottom_left(), &mut acc);
}
push_chunk_frequency(chunk, &mut acc);
acc
})
}
fn push_chunk_frequency(chunk: Chunk, frequencies: &mut IndexMap<Chunk, u16>) {
frequencies.entry(chunk).and_modify(|f| *f += 1).or_insert(1);
}
type Position = (usize, usize);
type Size = (usize, usize);
type Direction = u16;
fn sub_chunk_positions(chunk_size: usize) -> Vec<(Position, Size, Direction)> {
let period = chunk_size * 2 - 1;
let positions = Limit::new(chunk_size).zip(TriWave::new(chunk_size)).take(period);
let pos_cart_prod = positions.clone().cartesian_product(positions);
pos_cart_prod
.map(|((y_position, y_size), (x_position, x_size))| (
(x_position, y_position),
(x_size + 1, y_size + 1)
))
.filter(|(_, (width, height))| width != &chunk_size || height != &chunk_size)
.enumerate()
.map(|(direction, (position, size))| (
position,
size,
direction as u16
))
.collect()
}
fn overlaps(chunks: &IndexMap<Chunk, u16>, chunk_size: usize) -> Rules {
chunks
.keys()
.enumerate()
.fold(HashMap::new(), |mut rules, (label, chunk)| {
let sub_positions = sub_chunk_positions(chunk_size);
sub_positions
.iter()
.for_each(|(position, size, direction)| {
let sub_chunk = chunk.sub_matrix(*position, *size);
let reverse_index = sub_positions.len() - 1 - *direction as usize;
let (rev_pos, rev_size, _) = sub_positions[reverse_index];
chunks
.keys()
.enumerate()
.for_each(|(other_label, other_chunk)| {
// find mirrored sub chunk
let other_sub_chunk = other_chunk.sub_matrix(rev_pos, rev_size);
if sub_chunk == other_sub_chunk {
let mut set = MSu16xNU::empty();
set.insert(other_label, 1);
rules
.entry((*direction, label))
.and_modify(|l| l.add_assign(set))
.or_insert(set);
}
})
});
rules
})
}
// Create a raw graph for pruning
fn create_raw_graph(all_labels: &MSu16xNU, chunk_size: usize, (height, width): (usize, usize)) -> Graph {
// pixel based graph dimensions
let v_dim_x = (width * chunk_size) - (chunk_size - 1);
let v_dim_y = (height * chunk_size) - (chunk_size - 1);
let vertices_len = v_dim_x * v_dim_y;
let vertices: Vec<MSu16xNU> = vec![*all_labels; vertices_len];
// create negative indexed range to offset vertex centered directional field by N
let signed_chunk_size: i32 = TryFrom::try_from(chunk_size)
.expect("Cannot convert chunk_size to i32");
let range = 1 - signed_chunk_size..signed_chunk_size;
// calculate real cartesian space offest coordinates
let range_cart_prod = range.clone()
.cartesian_product(range)
.filter(|i| i != &(0, 0)); // remove 0 offset for correct directional mapping
let edges: Edges = (0..vertices_len)
.fold(HashMap::new(), |mut acc, index| {
let (x, y) = index_to_coords(index, v_dim_x);
range_cart_prod
.clone()
.map(|(y_offset, x_offset)| (y as i32 + y_offset, x as i32 + x_offset))
.enumerate()
// remove coordinates outside of graph
.filter(|(_, offsets)| is_inside(*offsets, (v_dim_x, v_dim_y)))
.for_each(|(direction, (y_offset, x_offset))| {
let other_index = coords_to_index(x_offset as usize, y_offset as usize, v_dim_x);
acc
.entry(index as u32)
.and_modify(|v| v.push((other_index as u32, direction as u16)))
.or_insert(vec![(other_index as u32, direction as u16)]);
});
acc
});
Graph::new(vertices, edges, *all_labels)
}
fn propagate_overlaps(mut graph: Graph, rules: &Rules, label: usize) -> Graph {
let central_vertex = (graph.vertices.len() - 1) / 2;
graph.vertices.index_mut(central_vertex).choose(label);
collapse::collapse(rules, &graph, None, Some(1))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::hash_map;
use image::ImageBuffer;
#[test]
fn test_alias_pixels() {
let pixels = vec![255, 255, 255, 0, 0, 0, 122, 122, 122, 96, 96, 96];
let img = ImageBuffer::from_vec(2, 2, pixels).unwrap();
let pixel_aliases = alias_pixels(&img);
assert_eq!(pixel_aliases.len(), 4);
}
#[test]
fn test_chunk_image() {
let img = image::open("resources/test/chunk_image_test.png").unwrap().to_rgb8();
let mut pixel_aliases: PixelKeys = BiMap::new();
pixel_aliases.insert(0, Rgb::from([255, 255, 255]));
pixel_aliases.insert(1, Rgb::from([0, 0, 0]));
let chunk_map = chunk_image(img, 2, &pixel_aliases, true, false, false, false);
let mut expected_map: IndexMap<Chunk, u16> = IndexMap::new();
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 1, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 0, 1]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, | chunk_image | identifier_name |
olm_parser.rs | ]
}
fn sub_images(image: RgbImage, chunk_size: usize) -> impl Iterator<Item=RgbImage> {
let chunk_size_32: u32 = TryFrom::try_from(chunk_size)
.expect("chunk_size too large, cannot convert to u32");
let height_iter = 0..(image.dimensions().1) - (chunk_size_32 - 1);
let width_iter = 0..(image.dimensions().0) - (chunk_size_32 - 1);
height_iter
.cartesian_product(width_iter)
.map(move |(y, x)| {
imageops::crop_imm(&image, x, y, chunk_size_32, chunk_size_32).to_image()
})
}
fn alias_sub_image(image: RgbImage, pixel_aliases: &PixelKeys) -> Vec<usize> {
image
.pixels()
.map(|p| *pixel_aliases.get_by_right(&p).unwrap())
.collect()
}
fn alias_pixels(image: &RgbImage) -> PixelKeys {
image
.pixels()
.unique()
.copied()
.enumerate()
.collect()
}
// returns the input image in unique chunks and frequencies of those chunks
fn chunk_image(
image: RgbImage,
chunk_size: usize,
pixel_aliases: &PixelKeys,
rotate: bool,
reflect_vertical: bool,
reflect_horizontal: bool,
reflect_diagonal: bool,
) -> IndexMap<Chunk, u16> {
sub_images(image, chunk_size)
.map(|sub_image| alias_sub_image(sub_image, pixel_aliases))
.fold(IndexMap::new(), |mut acc, aliases| {
let chunk = DMatrix::from_row_slice(chunk_size, chunk_size, &aliases);
if rotate {
let mut rot_chunk = chunk.clone();
for _ in 0..3 {
rot_chunk = rot_chunk.rotate_90();
push_chunk_frequency(rot_chunk.clone(), &mut acc);
}
}
if reflect_vertical {
push_chunk_frequency(chunk.reflect_vertical(), &mut acc);
}
if reflect_horizontal {
push_chunk_frequency(chunk.reflect_horizontal(), &mut acc);
}
if reflect_diagonal {
push_chunk_frequency(chunk.reflect_top_left(), &mut acc);
push_chunk_frequency(chunk.reflect_bottom_left(), &mut acc);
}
push_chunk_frequency(chunk, &mut acc);
acc
})
}
fn push_chunk_frequency(chunk: Chunk, frequencies: &mut IndexMap<Chunk, u16>) {
frequencies.entry(chunk).and_modify(|f| *f += 1).or_insert(1);
}
type Position = (usize, usize);
type Size = (usize, usize);
type Direction = u16;
fn sub_chunk_positions(chunk_size: usize) -> Vec<(Position, Size, Direction)> {
let period = chunk_size * 2 - 1;
let positions = Limit::new(chunk_size).zip(TriWave::new(chunk_size)).take(period);
let pos_cart_prod = positions.clone().cartesian_product(positions);
pos_cart_prod
.map(|((y_position, y_size), (x_position, x_size))| (
(x_position, y_position),
(x_size + 1, y_size + 1)
))
.filter(|(_, (width, height))| width != &chunk_size || height != &chunk_size)
.enumerate()
.map(|(direction, (position, size))| (
position,
size,
direction as u16
))
.collect()
}
fn overlaps(chunks: &IndexMap<Chunk, u16>, chunk_size: usize) -> Rules {
chunks
.keys()
.enumerate()
.fold(HashMap::new(), |mut rules, (label, chunk)| {
let sub_positions = sub_chunk_positions(chunk_size);
sub_positions
.iter()
.for_each(|(position, size, direction)| {
let sub_chunk = chunk.sub_matrix(*position, *size);
let reverse_index = sub_positions.len() - 1 - *direction as usize;
let (rev_pos, rev_size, _) = sub_positions[reverse_index];
chunks
.keys()
.enumerate()
.for_each(|(other_label, other_chunk)| {
// find mirrored sub chunk
let other_sub_chunk = other_chunk.sub_matrix(rev_pos, rev_size);
if sub_chunk == other_sub_chunk |
})
});
rules
})
}
// Create a raw graph for pruning
fn create_raw_graph(all_labels: &MSu16xNU, chunk_size: usize, (height, width): (usize, usize)) -> Graph {
// pixel based graph dimensions
let v_dim_x = (width * chunk_size) - (chunk_size - 1);
let v_dim_y = (height * chunk_size) - (chunk_size - 1);
let vertices_len = v_dim_x * v_dim_y;
let vertices: Vec<MSu16xNU> = vec![*all_labels; vertices_len];
// create negative indexed range to offset vertex centered directional field by N
let signed_chunk_size: i32 = TryFrom::try_from(chunk_size)
.expect("Cannot convert chunk_size to i32");
let range = 1 - signed_chunk_size..signed_chunk_size;
// calculate real cartesian space offest coordinates
let range_cart_prod = range.clone()
.cartesian_product(range)
.filter(|i| i != &(0, 0)); // remove 0 offset for correct directional mapping
let edges: Edges = (0..vertices_len)
.fold(HashMap::new(), |mut acc, index| {
let (x, y) = index_to_coords(index, v_dim_x);
range_cart_prod
.clone()
.map(|(y_offset, x_offset)| (y as i32 + y_offset, x as i32 + x_offset))
.enumerate()
// remove coordinates outside of graph
.filter(|(_, offsets)| is_inside(*offsets, (v_dim_x, v_dim_y)))
.for_each(|(direction, (y_offset, x_offset))| {
let other_index = coords_to_index(x_offset as usize, y_offset as usize, v_dim_x);
acc
.entry(index as u32)
.and_modify(|v| v.push((other_index as u32, direction as u16)))
.or_insert(vec![(other_index as u32, direction as u16)]);
});
acc
});
Graph::new(vertices, edges, *all_labels)
}
fn propagate_overlaps(mut graph: Graph, rules: &Rules, label: usize) -> Graph {
let central_vertex = (graph.vertices.len() - 1) / 2;
graph.vertices.index_mut(central_vertex).choose(label);
collapse::collapse(rules, &graph, None, Some(1))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::hash_map;
use image::ImageBuffer;
#[test]
fn test_alias_pixels() {
let pixels = vec![255, 255, 255, 0, 0, 0, 122, 122, 122, 96, 96, 96];
let img = ImageBuffer::from_vec(2, 2, pixels).unwrap();
let pixel_aliases = alias_pixels(&img);
assert_eq!(pixel_aliases.len(), 4);
}
#[test]
fn test_chunk_image() {
let img = image::open("resources/test/chunk_image_test.png").unwrap().to_rgb8();
let mut pixel_aliases: PixelKeys = BiMap::new();
pixel_aliases.insert(0, Rgb::from([255, 255, 255]));
pixel_aliases.insert(1, Rgb::from([0, 0, 0]));
let chunk_map = chunk_image(img, 2, &pixel_aliases, true, false, false, false);
let mut expected_map: IndexMap<Chunk, u16> = IndexMap::new();
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 1, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 0, 1]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2 | {
let mut set = MSu16xNU::empty();
set.insert(other_label, 1);
rules
.entry((*direction, label))
.and_modify(|l| l.add_assign(set))
.or_insert(set);
} | conditional_block |
olm_parser.rs |
]
}
fn sub_images(image: RgbImage, chunk_size: usize) -> impl Iterator<Item=RgbImage> {
let chunk_size_32: u32 = TryFrom::try_from(chunk_size)
.expect("chunk_size too large, cannot convert to u32");
let height_iter = 0..(image.dimensions().1) - (chunk_size_32 - 1);
let width_iter = 0..(image.dimensions().0) - (chunk_size_32 - 1);
height_iter
.cartesian_product(width_iter)
.map(move |(y, x)| {
imageops::crop_imm(&image, x, y, chunk_size_32, chunk_size_32).to_image()
})
}
fn alias_sub_image(image: RgbImage, pixel_aliases: &PixelKeys) -> Vec<usize> {
image
.pixels()
.map(|p| *pixel_aliases.get_by_right(&p).unwrap())
.collect()
}
fn alias_pixels(image: &RgbImage) -> PixelKeys {
image
.pixels()
.unique()
.copied()
.enumerate()
.collect()
}
// returns the input image in unique chunks and frequencies of those chunks
fn chunk_image(
image: RgbImage,
chunk_size: usize,
pixel_aliases: &PixelKeys,
rotate: bool,
reflect_vertical: bool,
reflect_horizontal: bool,
reflect_diagonal: bool,
) -> IndexMap<Chunk, u16> {
sub_images(image, chunk_size)
.map(|sub_image| alias_sub_image(sub_image, pixel_aliases))
.fold(IndexMap::new(), |mut acc, aliases| {
let chunk = DMatrix::from_row_slice(chunk_size, chunk_size, &aliases);
if rotate {
let mut rot_chunk = chunk.clone();
for _ in 0..3 {
rot_chunk = rot_chunk.rotate_90();
push_chunk_frequency(rot_chunk.clone(), &mut acc);
}
}
if reflect_vertical {
push_chunk_frequency(chunk.reflect_vertical(), &mut acc);
}
if reflect_horizontal {
push_chunk_frequency(chunk.reflect_horizontal(), &mut acc);
}
if reflect_diagonal {
push_chunk_frequency(chunk.reflect_top_left(), &mut acc);
push_chunk_frequency(chunk.reflect_bottom_left(), &mut acc);
}
push_chunk_frequency(chunk, &mut acc);
acc
})
}
fn push_chunk_frequency(chunk: Chunk, frequencies: &mut IndexMap<Chunk, u16>) {
frequencies.entry(chunk).and_modify(|f| *f += 1).or_insert(1);
}
type Position = (usize, usize);
type Size = (usize, usize);
type Direction = u16;
fn sub_chunk_positions(chunk_size: usize) -> Vec<(Position, Size, Direction)> {
let period = chunk_size * 2 - 1;
let positions = Limit::new(chunk_size).zip(TriWave::new(chunk_size)).take(period);
let pos_cart_prod = positions.clone().cartesian_product(positions);
pos_cart_prod
.map(|((y_position, y_size), (x_position, x_size))| (
(x_position, y_position),
(x_size + 1, y_size + 1)
))
.filter(|(_, (width, height))| width != &chunk_size || height != &chunk_size)
.enumerate()
.map(|(direction, (position, size))| (
position,
size,
direction as u16
))
.collect()
}
fn overlaps(chunks: &IndexMap<Chunk, u16>, chunk_size: usize) -> Rules {
chunks
.keys()
.enumerate()
.fold(HashMap::new(), |mut rules, (label, chunk)| {
let sub_positions = sub_chunk_positions(chunk_size);
sub_positions
.iter()
.for_each(|(position, size, direction)| {
let sub_chunk = chunk.sub_matrix(*position, *size);
let reverse_index = sub_positions.len() - 1 - *direction as usize;
let (rev_pos, rev_size, _) = sub_positions[reverse_index];
chunks
.keys()
.enumerate()
.for_each(|(other_label, other_chunk)| {
// find mirrored sub chunk
let other_sub_chunk = other_chunk.sub_matrix(rev_pos, rev_size);
if sub_chunk == other_sub_chunk {
let mut set = MSu16xNU::empty();
set.insert(other_label, 1);
rules
.entry((*direction, label))
.and_modify(|l| l.add_assign(set)) | })
}
// Create a raw graph for pruning
fn create_raw_graph(all_labels: &MSu16xNU, chunk_size: usize, (height, width): (usize, usize)) -> Graph {
// pixel based graph dimensions
let v_dim_x = (width * chunk_size) - (chunk_size - 1);
let v_dim_y = (height * chunk_size) - (chunk_size - 1);
let vertices_len = v_dim_x * v_dim_y;
let vertices: Vec<MSu16xNU> = vec![*all_labels; vertices_len];
// create negative indexed range to offset vertex centered directional field by N
let signed_chunk_size: i32 = TryFrom::try_from(chunk_size)
.expect("Cannot convert chunk_size to i32");
let range = 1 - signed_chunk_size..signed_chunk_size;
// calculate real cartesian space offest coordinates
let range_cart_prod = range.clone()
.cartesian_product(range)
.filter(|i| i != &(0, 0)); // remove 0 offset for correct directional mapping
let edges: Edges = (0..vertices_len)
.fold(HashMap::new(), |mut acc, index| {
let (x, y) = index_to_coords(index, v_dim_x);
range_cart_prod
.clone()
.map(|(y_offset, x_offset)| (y as i32 + y_offset, x as i32 + x_offset))
.enumerate()
// remove coordinates outside of graph
.filter(|(_, offsets)| is_inside(*offsets, (v_dim_x, v_dim_y)))
.for_each(|(direction, (y_offset, x_offset))| {
let other_index = coords_to_index(x_offset as usize, y_offset as usize, v_dim_x);
acc
.entry(index as u32)
.and_modify(|v| v.push((other_index as u32, direction as u16)))
.or_insert(vec![(other_index as u32, direction as u16)]);
});
acc
});
Graph::new(vertices, edges, *all_labels)
}
fn propagate_overlaps(mut graph: Graph, rules: &Rules, label: usize) -> Graph {
let central_vertex = (graph.vertices.len() - 1) / 2;
graph.vertices.index_mut(central_vertex).choose(label);
collapse::collapse(rules, &graph, None, Some(1))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::hash_map;
use image::ImageBuffer;
#[test]
fn test_alias_pixels() {
let pixels = vec![255, 255, 255, 0, 0, 0, 122, 122, 122, 96, 96, 96];
let img = ImageBuffer::from_vec(2, 2, pixels).unwrap();
let pixel_aliases = alias_pixels(&img);
assert_eq!(pixel_aliases.len(), 4);
}
#[test]
fn test_chunk_image() {
let img = image::open("resources/test/chunk_image_test.png").unwrap().to_rgb8();
let mut pixel_aliases: PixelKeys = BiMap::new();
pixel_aliases.insert(0, Rgb::from([255, 255, 255]));
pixel_aliases.insert(1, Rgb::from([0, 0, 0]));
let chunk_map = chunk_image(img, 2, &pixel_aliases, true, false, false, false);
let mut expected_map: IndexMap<Chunk, u16> = IndexMap::new();
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 1, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 0, 1]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, & | .or_insert(set);
}
})
});
rules | random_line_split |
olm_parser.rs | height_iter = 0..(image.dimensions().1) - (chunk_size_32 - 1);
let width_iter = 0..(image.dimensions().0) - (chunk_size_32 - 1);
height_iter
.cartesian_product(width_iter)
.map(move |(y, x)| {
imageops::crop_imm(&image, x, y, chunk_size_32, chunk_size_32).to_image()
})
}
fn alias_sub_image(image: RgbImage, pixel_aliases: &PixelKeys) -> Vec<usize> {
image
.pixels()
.map(|p| *pixel_aliases.get_by_right(&p).unwrap())
.collect()
}
fn alias_pixels(image: &RgbImage) -> PixelKeys {
image
.pixels()
.unique()
.copied()
.enumerate()
.collect()
}
// returns the input image in unique chunks and frequencies of those chunks
fn chunk_image(
image: RgbImage,
chunk_size: usize,
pixel_aliases: &PixelKeys,
rotate: bool,
reflect_vertical: bool,
reflect_horizontal: bool,
reflect_diagonal: bool,
) -> IndexMap<Chunk, u16> {
sub_images(image, chunk_size)
.map(|sub_image| alias_sub_image(sub_image, pixel_aliases))
.fold(IndexMap::new(), |mut acc, aliases| {
let chunk = DMatrix::from_row_slice(chunk_size, chunk_size, &aliases);
if rotate {
let mut rot_chunk = chunk.clone();
for _ in 0..3 {
rot_chunk = rot_chunk.rotate_90();
push_chunk_frequency(rot_chunk.clone(), &mut acc);
}
}
if reflect_vertical {
push_chunk_frequency(chunk.reflect_vertical(), &mut acc);
}
if reflect_horizontal {
push_chunk_frequency(chunk.reflect_horizontal(), &mut acc);
}
if reflect_diagonal {
push_chunk_frequency(chunk.reflect_top_left(), &mut acc);
push_chunk_frequency(chunk.reflect_bottom_left(), &mut acc);
}
push_chunk_frequency(chunk, &mut acc);
acc
})
}
fn push_chunk_frequency(chunk: Chunk, frequencies: &mut IndexMap<Chunk, u16>) {
frequencies.entry(chunk).and_modify(|f| *f += 1).or_insert(1);
}
type Position = (usize, usize);
type Size = (usize, usize);
type Direction = u16;
fn sub_chunk_positions(chunk_size: usize) -> Vec<(Position, Size, Direction)> {
let period = chunk_size * 2 - 1;
let positions = Limit::new(chunk_size).zip(TriWave::new(chunk_size)).take(period);
let pos_cart_prod = positions.clone().cartesian_product(positions);
pos_cart_prod
.map(|((y_position, y_size), (x_position, x_size))| (
(x_position, y_position),
(x_size + 1, y_size + 1)
))
.filter(|(_, (width, height))| width != &chunk_size || height != &chunk_size)
.enumerate()
.map(|(direction, (position, size))| (
position,
size,
direction as u16
))
.collect()
}
fn overlaps(chunks: &IndexMap<Chunk, u16>, chunk_size: usize) -> Rules {
chunks
.keys()
.enumerate()
.fold(HashMap::new(), |mut rules, (label, chunk)| {
let sub_positions = sub_chunk_positions(chunk_size);
sub_positions
.iter()
.for_each(|(position, size, direction)| {
let sub_chunk = chunk.sub_matrix(*position, *size);
let reverse_index = sub_positions.len() - 1 - *direction as usize;
let (rev_pos, rev_size, _) = sub_positions[reverse_index];
chunks
.keys()
.enumerate()
.for_each(|(other_label, other_chunk)| {
// find mirrored sub chunk
let other_sub_chunk = other_chunk.sub_matrix(rev_pos, rev_size);
if sub_chunk == other_sub_chunk {
let mut set = MSu16xNU::empty();
set.insert(other_label, 1);
rules
.entry((*direction, label))
.and_modify(|l| l.add_assign(set))
.or_insert(set);
}
})
});
rules
})
}
// Create a raw graph for pruning
fn create_raw_graph(all_labels: &MSu16xNU, chunk_size: usize, (height, width): (usize, usize)) -> Graph {
// pixel based graph dimensions
let v_dim_x = (width * chunk_size) - (chunk_size - 1);
let v_dim_y = (height * chunk_size) - (chunk_size - 1);
let vertices_len = v_dim_x * v_dim_y;
let vertices: Vec<MSu16xNU> = vec![*all_labels; vertices_len];
// create negative indexed range to offset vertex centered directional field by N
let signed_chunk_size: i32 = TryFrom::try_from(chunk_size)
.expect("Cannot convert chunk_size to i32");
let range = 1 - signed_chunk_size..signed_chunk_size;
// calculate real cartesian space offest coordinates
let range_cart_prod = range.clone()
.cartesian_product(range)
.filter(|i| i != &(0, 0)); // remove 0 offset for correct directional mapping
let edges: Edges = (0..vertices_len)
.fold(HashMap::new(), |mut acc, index| {
let (x, y) = index_to_coords(index, v_dim_x);
range_cart_prod
.clone()
.map(|(y_offset, x_offset)| (y as i32 + y_offset, x as i32 + x_offset))
.enumerate()
// remove coordinates outside of graph
.filter(|(_, offsets)| is_inside(*offsets, (v_dim_x, v_dim_y)))
.for_each(|(direction, (y_offset, x_offset))| {
let other_index = coords_to_index(x_offset as usize, y_offset as usize, v_dim_x);
acc
.entry(index as u32)
.and_modify(|v| v.push((other_index as u32, direction as u16)))
.or_insert(vec![(other_index as u32, direction as u16)]);
});
acc
});
Graph::new(vertices, edges, *all_labels)
}
fn propagate_overlaps(mut graph: Graph, rules: &Rules, label: usize) -> Graph {
let central_vertex = (graph.vertices.len() - 1) / 2;
graph.vertices.index_mut(central_vertex).choose(label);
collapse::collapse(rules, &graph, None, Some(1))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::hash_map;
use image::ImageBuffer;
#[test]
fn test_alias_pixels() {
let pixels = vec![255, 255, 255, 0, 0, 0, 122, 122, 122, 96, 96, 96];
let img = ImageBuffer::from_vec(2, 2, pixels).unwrap();
let pixel_aliases = alias_pixels(&img);
assert_eq!(pixel_aliases.len(), 4);
}
#[test]
fn test_chunk_image() | {
let img = image::open("resources/test/chunk_image_test.png").unwrap().to_rgb8();
let mut pixel_aliases: PixelKeys = BiMap::new();
pixel_aliases.insert(0, Rgb::from([255, 255, 255]));
pixel_aliases.insert(1, Rgb::from([0, 0, 0]));
let chunk_map = chunk_image(img, 2, &pixel_aliases, true, false, false, false);
let mut expected_map: IndexMap<Chunk, u16> = IndexMap::new();
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 0, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 1, 0]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 0, 0, 1]), 1);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![0, 1, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 0, 1, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 0, 1]), 2);
expected_map.insert(DMatrix::from_row_slice(2, 2, &vec![1, 1, 1, 0]), 2);
assert_eq!(chunk_map.len(), 8);
expected_map | identifier_body | |
wasm.rs | v: InternalValue::from(v),
}
}
}
impl From<f32> for Value {
fn from(v: f32) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl From<f64> for Value {
fn from(v: f64) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl TryFrom<Value> for u32 {
type Error = Error;
fn try_from(x: Value) -> Result<u32, Error> {
match x.t {
PrimitiveType::I32 => Ok(unsafe { x.v.i32 as u32 }),
_ => Err(Error::Misc("Cannot extract as u32 from incorrect type")),
}
}
}
impl From<&PrimitiveType> for Value {
fn from(x: &PrimitiveType) -> Value {
match x {
PrimitiveType::I32 => Value::new(0_i32),
PrimitiveType::I64 => Value::new(0_i64),
PrimitiveType::F32 => Value::new(0_f32),
PrimitiveType::F64 => Value::new(0_f64),
}
}
}
impl std::fmt::Display for Value {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
unsafe {
match self.t {
PrimitiveType::I32 => {
write!(f, "(i32:{})", self.v.i32)
}
PrimitiveType::I64 => {
write!(f, "(i64:{})", self.v.i64)
}
PrimitiveType::F32 => {
write!(f, "(f32:{})", self.v.f32)
}
PrimitiveType::F64 => {
write!(f, "(f64:{})", self.v.f64)
}
}
}
}
}
/// Represents expected runtime errors, i.e. problems with the program, not the interpreter
pub enum Trap {
MemoryOutOfBounds,
UndefinedDivision,
}
pub enum ControlInfo {
Branch(u32),
Return,
Trap(Trap),
None,
}
/// Representation of a wasm stack.
/// All functions use a new stack when called.
#[derive(Default)]
pub struct Stack {
values: Vec<Value>,
}
impl Stack {
fn new() -> Self {
Self::default()
}
fn push_value(&mut self, v: Value) {
log::debug!("Pushing {}", v);
self.values.push(v);
}
pub fn pop_value(&mut self) -> Result<Value, Error> {
log::debug!("Current stack len {}", self.values.len());
if self.values.is_empty() {
Err(Error::StackViolation)
} else {
unsafe { Ok(self.values.pop().unwrap_unchecked()) }
}
}
/// Return the 0-indexed offset'th value from the stack (such that 0 is the most recently pushed value)
pub fn fetch_value(&self, offset: usize) -> Result<&Value, Error> {
let stack_size = self.values.len();
let offset_to_fetch = stack_size - 1 - offset;
match self.values.get(offset_to_fetch) {
Some(n) => Ok(n),
None => {
log::debug!("Try to read {} stack size {}", offset_to_fetch, stack_size);
Err(Error::StackViolation)
}
}
}
pub fn assert_empty(&self) -> Result<(), Error> {
if self.values.is_empty() {
Ok(())
} else {
Err(Error::StackViolation)
}
}
}
impl std::fmt::Display for Stack {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "Current stack:\n[")?;
for v in self.values.iter() {
writeln!(f, " {}", v)?;
}
write!(f, "]\n\n")?;
Ok(())
}
}
pub trait Instruction {
/// A wasm instruction may modify any state of the program
fn execute(
&self,
stack: &mut Stack,
memory: &mut Memory,
locals: &mut Vec<Value>,
functions: &Vec<Function>,
) -> Result<ControlInfo, Error>;
}
pub mod inst;
#[derive(Default)]
struct Table {
functions: Vec<usize>,
}
pub struct Function {
r#type: FunctionType,
local_types: Vec<PrimitiveType>,
instructions: Vec<Box<dyn Instruction>>,
}
impl Function {
pub fn new(r#type: FunctionType) -> Self {
Self {
r#type,
local_types: Vec::new(),
instructions: Vec::new(),
}
}
pub fn push_inst(&mut self, i: Box<dyn Instruction>) {
self.instructions.push(i);
}
pub fn num_params(&self) -> usize {
self.r#type.num_params()
}
pub fn num_locals(&self) -> usize {
self.local_types.len()
}
pub fn new_locals(&mut self, count: usize, t: PrimitiveType) {
self.local_types.reserve(count);
for _ in 0..count {
self.local_types.push(t);
}
}
fn do_return(mut stack: Stack) -> Result<Value, Error> {
let ret = stack.pop_value();
stack.assert_empty()?;
ret
}
pub fn call(
&self,
functions: &Vec<Function>,
memory: &mut Memory,
args: Vec<Value>,
) -> Result<Value, Error> {
let mut stack = Stack::new();
let mut locals = Vec::with_capacity(self.num_params() + self.num_locals());
for arg in args {
locals.push(arg);
}
for t in &self.local_types {
locals.push(Value::from(t));
}
for instruction in &self.instructions {
match instruction.execute(&mut stack, memory, &mut locals, functions)? {
ControlInfo::Return => {
return Self::do_return(stack);
}
ControlInfo::Trap(Trap::MemoryOutOfBounds) => panic!(), //TODO: don't panic, handle traps gracefully
ControlInfo::Trap(Trap::UndefinedDivision) => panic!(),
_ => (),
};
}
Self::do_return(stack)
}
}
#[derive(Default)]
pub struct Memory {
bytes: Vec<u8>,
virtual_size_pages: u32,
upper_limit_pages: u32,
}
const PAGE_SIZE: u64 = 0x10000;
impl Memory {
pub fn new(min: u32, max: u32) -> Self {
let mut s = Self {
bytes: Vec::with_capacity((PAGE_SIZE * min as u64) as usize),
virtual_size_pages: min,
upper_limit_pages: max,
};
s.write(PAGE_SIZE * min as u64, 32, 4); // It looks like
s
}
pub fn write(&mut self, mut value: u64, bitwidth: u8, address: u64) -> Option<()> {
log::debug!(
"Write to address 0x{:x} with bitwidth {} and value 0x{:x}",
address,
bitwidth,
value
);
if bitwidth % 8 != 0 {
// Probably don't even need to implement this
panic!();
}
let bytes_to_write = bitwidth / 8;
let last_write_address = address + bytes_to_write as u64;
// Check for out of bounds access
if last_write_address > PAGE_SIZE * self.virtual_size_pages as u64 {
return None;
}
// Resize internal vector if needed
if self.bytes.is_empty() || last_write_address > (self.bytes.len() - 1) as u64 {
self.bytes.resize((last_write_address + 1) as usize, 0);
}
for i in (address..(address + bytes_to_write as u64)).rev() {
self.bytes[i as usize] = (value & 0xFF) as u8;
value >>= 8;
}
Some(())
}
pub fn read(
&mut self,
result_type: PrimitiveType,
bitwidth: u8,
address: u64,
) -> Option<Value> {
let bytes_to_read = (bitwidth / 8) as u64;
let mut result = 0_u64;
for i in address..(address + bytes_to_read) {
result <<= 8;
result += self.bytes[i as usize] as u64;
}
log::debug!(
"Read from address 0x{:x} with bitwidth {} and value 0x{:x}",
address,
bitwidth,
result
);
Some(Value::from_explicit_type(result_type, result))
}
}
#[derive(Default, Clone)]
pub struct FunctionType {
pub params: Vec<PrimitiveType>,
pub returns: Vec<PrimitiveType>,
}
impl FunctionType {
pub fn new(params: Vec<PrimitiveType>, returns: Vec<PrimitiveType>) -> Self {
Self { params, returns }
}
pub fn | num_params | identifier_name | |
wasm.rs | }
}
}
impl std::fmt::Display for Value {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
unsafe {
match self.t {
PrimitiveType::I32 => {
write!(f, "(i32:{})", self.v.i32)
}
PrimitiveType::I64 => {
write!(f, "(i64:{})", self.v.i64)
}
PrimitiveType::F32 => {
write!(f, "(f32:{})", self.v.f32)
}
PrimitiveType::F64 => {
write!(f, "(f64:{})", self.v.f64)
}
}
}
}
}
/// Represents expected runtime errors, i.e. problems with the program, not the interpreter
pub enum Trap {
MemoryOutOfBounds,
UndefinedDivision,
}
pub enum ControlInfo {
Branch(u32),
Return,
Trap(Trap),
None,
}
/// Representation of a wasm stack.
/// All functions use a new stack when called.
#[derive(Default)]
pub struct Stack {
values: Vec<Value>,
}
impl Stack {
fn new() -> Self {
Self::default()
}
fn push_value(&mut self, v: Value) {
log::debug!("Pushing {}", v);
self.values.push(v);
}
pub fn pop_value(&mut self) -> Result<Value, Error> {
log::debug!("Current stack len {}", self.values.len());
if self.values.is_empty() {
Err(Error::StackViolation)
} else {
unsafe { Ok(self.values.pop().unwrap_unchecked()) }
}
}
/// Return the 0-indexed offset'th value from the stack (such that 0 is the most recently pushed value)
pub fn fetch_value(&self, offset: usize) -> Result<&Value, Error> {
let stack_size = self.values.len();
let offset_to_fetch = stack_size - 1 - offset;
match self.values.get(offset_to_fetch) {
Some(n) => Ok(n),
None => {
log::debug!("Try to read {} stack size {}", offset_to_fetch, stack_size);
Err(Error::StackViolation)
}
}
}
pub fn assert_empty(&self) -> Result<(), Error> {
if self.values.is_empty() {
Ok(())
} else {
Err(Error::StackViolation)
}
}
}
impl std::fmt::Display for Stack {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "Current stack:\n[")?;
for v in self.values.iter() {
writeln!(f, " {}", v)?;
}
write!(f, "]\n\n")?;
Ok(())
}
}
pub trait Instruction {
/// A wasm instruction may modify any state of the program
fn execute(
&self,
stack: &mut Stack,
memory: &mut Memory,
locals: &mut Vec<Value>,
functions: &Vec<Function>,
) -> Result<ControlInfo, Error>;
}
pub mod inst;
#[derive(Default)]
struct Table {
functions: Vec<usize>,
}
pub struct Function {
r#type: FunctionType,
local_types: Vec<PrimitiveType>,
instructions: Vec<Box<dyn Instruction>>,
}
impl Function {
pub fn new(r#type: FunctionType) -> Self {
Self {
r#type,
local_types: Vec::new(),
instructions: Vec::new(),
}
}
pub fn push_inst(&mut self, i: Box<dyn Instruction>) {
self.instructions.push(i);
}
pub fn num_params(&self) -> usize {
self.r#type.num_params()
}
pub fn num_locals(&self) -> usize {
self.local_types.len()
}
pub fn new_locals(&mut self, count: usize, t: PrimitiveType) {
self.local_types.reserve(count);
for _ in 0..count {
self.local_types.push(t);
}
}
fn do_return(mut stack: Stack) -> Result<Value, Error> {
let ret = stack.pop_value();
stack.assert_empty()?;
ret
}
pub fn call(
&self,
functions: &Vec<Function>,
memory: &mut Memory,
args: Vec<Value>,
) -> Result<Value, Error> {
let mut stack = Stack::new();
let mut locals = Vec::with_capacity(self.num_params() + self.num_locals());
for arg in args {
locals.push(arg);
}
for t in &self.local_types {
locals.push(Value::from(t));
}
for instruction in &self.instructions {
match instruction.execute(&mut stack, memory, &mut locals, functions)? {
ControlInfo::Return => {
return Self::do_return(stack);
}
ControlInfo::Trap(Trap::MemoryOutOfBounds) => panic!(), //TODO: don't panic, handle traps gracefully
ControlInfo::Trap(Trap::UndefinedDivision) => panic!(),
_ => (),
};
}
Self::do_return(stack)
}
}
#[derive(Default)]
pub struct Memory {
bytes: Vec<u8>,
virtual_size_pages: u32,
upper_limit_pages: u32,
}
const PAGE_SIZE: u64 = 0x10000;
impl Memory {
pub fn new(min: u32, max: u32) -> Self {
let mut s = Self {
bytes: Vec::with_capacity((PAGE_SIZE * min as u64) as usize),
virtual_size_pages: min,
upper_limit_pages: max,
};
s.write(PAGE_SIZE * min as u64, 32, 4); // It looks like
s
}
pub fn write(&mut self, mut value: u64, bitwidth: u8, address: u64) -> Option<()> {
log::debug!(
"Write to address 0x{:x} with bitwidth {} and value 0x{:x}",
address,
bitwidth,
value
);
if bitwidth % 8 != 0 {
// Probably don't even need to implement this
panic!();
}
let bytes_to_write = bitwidth / 8;
let last_write_address = address + bytes_to_write as u64;
// Check for out of bounds access
if last_write_address > PAGE_SIZE * self.virtual_size_pages as u64 {
return None;
}
// Resize internal vector if needed
if self.bytes.is_empty() || last_write_address > (self.bytes.len() - 1) as u64 {
self.bytes.resize((last_write_address + 1) as usize, 0);
}
for i in (address..(address + bytes_to_write as u64)).rev() {
self.bytes[i as usize] = (value & 0xFF) as u8;
value >>= 8;
}
Some(())
}
pub fn read(
&mut self,
result_type: PrimitiveType,
bitwidth: u8,
address: u64,
) -> Option<Value> {
let bytes_to_read = (bitwidth / 8) as u64;
let mut result = 0_u64;
for i in address..(address + bytes_to_read) {
result <<= 8;
result += self.bytes[i as usize] as u64;
}
log::debug!(
"Read from address 0x{:x} with bitwidth {} and value 0x{:x}",
address,
bitwidth,
result
);
Some(Value::from_explicit_type(result_type, result))
}
}
#[derive(Default, Clone)]
pub struct FunctionType {
pub params: Vec<PrimitiveType>,
pub returns: Vec<PrimitiveType>,
}
impl FunctionType {
pub fn new(params: Vec<PrimitiveType>, returns: Vec<PrimitiveType>) -> Self {
Self { params, returns }
}
pub fn num_params(&self) -> usize {
self.params.len()
}
pub fn params_iter(&self) -> std::slice::Iter<PrimitiveType> {
self.params.iter()
}
}
pub enum Export {
Function(usize),
Table(usize),
Memory(usize),
Global(usize),
}
#[derive(Default)]
pub struct Module {
function_types: Vec<FunctionType>,
functions: Vec<Function>,
exports: HashMap<String, Export>,
table: Table,
memory: Memory,
globals: Vec<Value>,
}
impl Module {
pub fn new() -> Self {
Self::default()
}
pub fn call(&mut self, function_name: &str, args: Vec<Value>) -> Result<Value, Error> {
let function_index = match self.exports.get(function_name) {
Some(Export::Function(n)) => *n,
_ => return Err(Error::Misc("On module call, given name is not a function")),
};
let function = match self.functions.get(function_index) {
Some(n) => n,
None => {
return Err(Error::Misc(
"Function index given by export section is not valid",
))
}
};
function.call(&self.functions, &mut self.memory, args)
} |
pub fn add_function_type(&mut self, ft: FunctionType) { | random_line_split | |
wasm.rs | InternalValue { f32: x }
}
}
impl From<f64> for InternalValue {
fn from(x: f64) -> InternalValue {
InternalValue { f64: x }
}
}
/// Representation of all wasm values
#[derive(Copy, Clone)]
pub struct Value {
t: PrimitiveType,
v: InternalValue,
}
impl Value {
pub fn new<T: Into<InternalValue> + Into<PrimitiveType> + Copy>(x: T) -> Self {
Self {
t: x.into(),
v: x.into(),
}
}
pub fn from_explicit_type(t: PrimitiveType, v: u64) -> Value {
Self {
t,
v: InternalValue { i64: v as i64 },
}
}
#[inline]
pub fn as_i32_unchecked(&self) -> i32 |
#[inline]
pub fn as_i64_unchecked(&self) -> i64 {
unsafe { self.v.i64 }
}
#[inline]
pub fn as_f32_unchecked(&self) -> f32 {
unsafe { self.v.f32 }
}
#[inline]
pub fn as_f64_unchecked(&self) -> f64 {
unsafe { self.v.f64 }
}
}
impl From<i32> for Value {
fn from(v: i32) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl From<i64> for Value {
fn from(v: i64) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl From<f32> for Value {
fn from(v: f32) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl From<f64> for Value {
fn from(v: f64) -> Self {
Self {
t: PrimitiveType::from(v),
v: InternalValue::from(v),
}
}
}
impl TryFrom<Value> for u32 {
type Error = Error;
fn try_from(x: Value) -> Result<u32, Error> {
match x.t {
PrimitiveType::I32 => Ok(unsafe { x.v.i32 as u32 }),
_ => Err(Error::Misc("Cannot extract as u32 from incorrect type")),
}
}
}
impl From<&PrimitiveType> for Value {
fn from(x: &PrimitiveType) -> Value {
match x {
PrimitiveType::I32 => Value::new(0_i32),
PrimitiveType::I64 => Value::new(0_i64),
PrimitiveType::F32 => Value::new(0_f32),
PrimitiveType::F64 => Value::new(0_f64),
}
}
}
impl std::fmt::Display for Value {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
unsafe {
match self.t {
PrimitiveType::I32 => {
write!(f, "(i32:{})", self.v.i32)
}
PrimitiveType::I64 => {
write!(f, "(i64:{})", self.v.i64)
}
PrimitiveType::F32 => {
write!(f, "(f32:{})", self.v.f32)
}
PrimitiveType::F64 => {
write!(f, "(f64:{})", self.v.f64)
}
}
}
}
}
/// Represents expected runtime errors, i.e. problems with the program, not the interpreter
pub enum Trap {
MemoryOutOfBounds,
UndefinedDivision,
}
pub enum ControlInfo {
Branch(u32),
Return,
Trap(Trap),
None,
}
/// Representation of a wasm stack.
/// All functions use a new stack when called.
#[derive(Default)]
pub struct Stack {
values: Vec<Value>,
}
impl Stack {
fn new() -> Self {
Self::default()
}
fn push_value(&mut self, v: Value) {
log::debug!("Pushing {}", v);
self.values.push(v);
}
pub fn pop_value(&mut self) -> Result<Value, Error> {
log::debug!("Current stack len {}", self.values.len());
if self.values.is_empty() {
Err(Error::StackViolation)
} else {
unsafe { Ok(self.values.pop().unwrap_unchecked()) }
}
}
/// Return the 0-indexed offset'th value from the stack (such that 0 is the most recently pushed value)
pub fn fetch_value(&self, offset: usize) -> Result<&Value, Error> {
let stack_size = self.values.len();
let offset_to_fetch = stack_size - 1 - offset;
match self.values.get(offset_to_fetch) {
Some(n) => Ok(n),
None => {
log::debug!("Try to read {} stack size {}", offset_to_fetch, stack_size);
Err(Error::StackViolation)
}
}
}
pub fn assert_empty(&self) -> Result<(), Error> {
if self.values.is_empty() {
Ok(())
} else {
Err(Error::StackViolation)
}
}
}
impl std::fmt::Display for Stack {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "Current stack:\n[")?;
for v in self.values.iter() {
writeln!(f, " {}", v)?;
}
write!(f, "]\n\n")?;
Ok(())
}
}
pub trait Instruction {
/// A wasm instruction may modify any state of the program
fn execute(
&self,
stack: &mut Stack,
memory: &mut Memory,
locals: &mut Vec<Value>,
functions: &Vec<Function>,
) -> Result<ControlInfo, Error>;
}
pub mod inst;
#[derive(Default)]
struct Table {
functions: Vec<usize>,
}
pub struct Function {
r#type: FunctionType,
local_types: Vec<PrimitiveType>,
instructions: Vec<Box<dyn Instruction>>,
}
impl Function {
pub fn new(r#type: FunctionType) -> Self {
Self {
r#type,
local_types: Vec::new(),
instructions: Vec::new(),
}
}
pub fn push_inst(&mut self, i: Box<dyn Instruction>) {
self.instructions.push(i);
}
pub fn num_params(&self) -> usize {
self.r#type.num_params()
}
pub fn num_locals(&self) -> usize {
self.local_types.len()
}
pub fn new_locals(&mut self, count: usize, t: PrimitiveType) {
self.local_types.reserve(count);
for _ in 0..count {
self.local_types.push(t);
}
}
fn do_return(mut stack: Stack) -> Result<Value, Error> {
let ret = stack.pop_value();
stack.assert_empty()?;
ret
}
pub fn call(
&self,
functions: &Vec<Function>,
memory: &mut Memory,
args: Vec<Value>,
) -> Result<Value, Error> {
let mut stack = Stack::new();
let mut locals = Vec::with_capacity(self.num_params() + self.num_locals());
for arg in args {
locals.push(arg);
}
for t in &self.local_types {
locals.push(Value::from(t));
}
for instruction in &self.instructions {
match instruction.execute(&mut stack, memory, &mut locals, functions)? {
ControlInfo::Return => {
return Self::do_return(stack);
}
ControlInfo::Trap(Trap::MemoryOutOfBounds) => panic!(), //TODO: don't panic, handle traps gracefully
ControlInfo::Trap(Trap::UndefinedDivision) => panic!(),
_ => (),
};
}
Self::do_return(stack)
}
}
#[derive(Default)]
pub struct Memory {
bytes: Vec<u8>,
virtual_size_pages: u32,
upper_limit_pages: u32,
}
const PAGE_SIZE: u64 = 0x10000;
impl Memory {
pub fn new(min: u32, max: u32) -> Self {
let mut s = Self {
bytes: Vec::with_capacity((PAGE_SIZE * min as u64) as usize),
virtual_size_pages: min,
upper_limit_pages: max,
};
s.write(PAGE_SIZE * min as u64, 32, 4); // It looks like
s
}
pub fn write(&mut self, mut value: u64, bitwidth: u8, address: u64) -> Option<()> {
log::debug!(
"Write to address 0x{:x} with bitwidth {} and value 0x{:x}",
address,
bitwidth,
value
);
if bitwidth % 8 != 0 {
// Probably don't even need to implement this
panic!();
}
let bytes_to_write = bitwidth / 8 | {
unsafe { self.v.i32 }
} | identifier_body |
main.go | ())
if len(m) < 2 {
continue
}
acc = m[1]
if len(acc) == 0 {
continue
}
p.accounts = append(p.accounts, acc)
assignForAccount(acc)
}
}
func (p *parser) generateClasses() {
p.classes = make([]bayesian.Class, 0, 10)
tomap := make(map[string]bool)
for _, t := range p.txns {
if t.skipClassification {
continue
}
tomap[t.To] = true
}
for _, a := range p.accounts {
tomap[a] = true
}
// remove this account as it would appear in many relevant transactions
delete(tomap, *account)
for to := range tomap {
p.classes = append(p.classes, bayesian.Class(to))
}
assertf(len(p.classes) > 1, "Expected some categories. Found none.")
if *tfidf {
p.cl = bayesian.NewClassifierTfIdf(p.classes...)
} else {
p.cl = bayesian.NewClassifier(p.classes...)
}
assertf(p.cl != nil, "Expected a valid classifier. Found nil.")
for _, t := range p.txns {
if _, has := tomap[t.To]; !has {
continue
}
p.cl.Learn(t.getTerms(), bayesian.Class(t.To))
}
if *tfidf {
p.cl.ConvertTermsFreqToTfIdf()
}
}
| score float64
pos int
}
type byScore []pair
func (b byScore) Len() int {
return len(b)
}
func (b byScore) Less(i int, j int) bool {
return b[i].score > b[j].score
}
func (b byScore) Swap(i int, j int) {
b[i], b[j] = b[j], b[i]
}
var trimWhitespace = regexp.MustCompile(`^[\s]+|[\s}]+$`)
var dedupWhitespace = regexp.MustCompile(`[\s]{2,}`)
func (t *txn) isFromJournal() bool {
return t.Key == nil
}
func (t *txn) getTerms() []string {
desc := strings.ToUpper(t.Desc)
desc = trimWhitespace.ReplaceAllString(desc, "")
desc = dedupWhitespace.ReplaceAllString(desc, " ")
terms := strings.Split(desc, " ")
terms = append(terms, "FullDesc: "+desc)
var cur float64
if t.isFromJournal() {
cur = t.Cur
} else {
cur = -t.Cur // we are looking for the opposite
}
var kind string
if cur >= 0 {
kind = "credit"
} else {
kind = "debit"
}
terms = append(terms, "Kind: "+kind)
terms = append(terms, "AmountClassFine: "+strconv.Itoa(getAmountClassFine(cur)))
terms = append(terms, "AmountClassCoarse: "+strconv.Itoa(getAmountClassCoarse(cur)))
if *debug {
fmt.Printf("getTerms(%s, %.2f) = %v\n", t.Desc, t.Cur, terms)
}
return terms
}
func getAmountClassFine(amount float64) int {
if amount == 0 {
return 0
}
log := math.Round(math.Log10(math.Abs(amount)) * 4)
class := int(math.Round(math.Pow(10, log/4)))
return class
}
func getAmountClassCoarse(amount float64) int {
if amount == 0 {
return 0
}
log := int(math.Ceil(math.Log10(math.Abs(amount))))
class := int(math.Round(math.Pow10(log)))
return class
}
func (p *parser) topHits(t *txn) []bayesian.Class {
terms := t.getTerms()
scores, _, _ := p.cl.LogScores(terms)
pairs := make([]pair, 0, len(scores))
var mean, stddev float64
for pos, score := range scores {
pairs = append(pairs, pair{score, pos})
mean += score
}
mean /= float64(len(scores))
for _, score := range scores {
stddev += math.Pow(score-mean, 2)
}
stddev /= float64(len(scores) - 1)
stddev = math.Sqrt(stddev)
if *debug {
fmt.Printf("stddev=%f\n", stddev)
}
sort.Sort(byScore(pairs))
result := make([]bayesian.Class, 0, 5)
last := pairs[0].score
for i := 0; i < mathex.Min(10, len(pairs)); i++ {
pr := pairs[i]
if math.Abs(pr.score-last) > stddev {
break
}
if *debug {
fmt.Printf("i=%d s=%.3g Class=%v\n", i, pr.score, p.classes[pr.pos])
}
result = append(result, p.classes[pr.pos])
last = pr.score
}
return result
}
func includeAll(dir string, data []byte) []byte {
final := make([]byte, len(data))
copy(final, data)
b := bytes.NewBuffer(data)
s := bufio.NewScanner(b)
for s.Scan() {
line := s.Text()
if !strings.HasPrefix(line, "include ") {
continue
}
fname := strings.Trim(line[8:], " \n")
include, err := ioutil.ReadFile(path.Join(dir, fname))
checkf(err, "Unable to read file: %v", fname)
final = append(final, include...)
}
return final
}
func parseDate(col string) (time.Time, bool) {
tm, err := time.Parse(*dateFormat, col)
if err == nil {
return tm, true
}
return time.Time{}, false
}
func parseCurrency(col string) (float64, bool) {
f, err := strconv.ParseFloat(col, 64)
return f, err == nil
}
func parseDescription(col string) (string, bool) {
return strings.Map(func(r rune) rune {
if r == '"' {
return -1
}
return r
}, col), true
}
func (p *parser) parseTransactionsFromCSV(in []byte) []txn {
ignored := make(map[int]bool)
if len(*ignore) > 0 {
for _, i := range strings.Split(*ignore, ",") {
pos, err := strconv.Atoi(i)
checkf(err, "Unable to convert to integer: %v", i)
ignored[pos] = true
}
}
result := make([]txn, 0, 100)
r := csv.NewReader(bytes.NewReader(in))
var t txn
var skipped int
for {
t = txn{}
cols, err := r.Read()
if err == io.EOF {
break
}
checkf(err, "Unable to read line: %v", strings.Join(cols, ", "))
if *skip > skipped {
skipped++
continue
}
var picked []string
for i, col := range cols {
if ignored[i] {
continue
}
picked = append(picked, col)
if date, ok := parseDate(col); ok {
t.Date = date
} else if f, ok := parseCurrency(col); ok {
if *inverseSign {
f = -f
}
t.Cur = f
} else if d, ok := parseDescription(col); ok {
t.Desc = d
}
}
if len(t.Desc) != 0 && !t.Date.IsZero() && t.Cur != 0.0 {
y, m, d := t.Date.Year(), t.Date.Month(), t.Date.Day()
t.Date = time.Date(y, m, d, 0, 0, 0, 0, time.UTC)
// Have a unique key for each transaction in CSV, so we can uniquely identify and
// persist them as we modify their category.
hash := sha256.New()
fmt.Fprintf(hash, "%s\t%s\t%.2f", t.Date.Format(stamp), t.Desc, t.Cur)
t.Key = hash.Sum(nil)
// check if it was reconciled before (in case we are restarted after a crash)
p.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketName)
v := b.Get(t.Key)
if v != nil {
dec := gob.NewDecoder(bytes.NewBuffer(v))
var td txn
if err := dec.Decode(&td); err == nil {
if t.Cur < 0 {
t.To = td.To
} else {
t.From = td.From
}
t.Done = true
}
}
return nil
})
result = append(result, t)
} else {
fmt.Println()
fmt.Printf("ERROR : Unable to parse transaction from the selected columns in CSV.\n")
fmt.Printf("Selected CSV : %v\n", strings.Join(picked, ", "))
fmt.Printf("Parsed Date : %v\n", t.Date)
fmt.Printf("Parsed Desc : %v\n", t.Desc)
fmt.Printf("Parsed Currency : %v\n", t.Cur)
log.Fatalln("Please ensure that the above CSV contains | type pair struct { | random_line_split |
main.go | () int {
return len(b)
}
func (b byScore) Less(i int, j int) bool {
return b[i].score > b[j].score
}
func (b byScore) Swap(i int, j int) {
b[i], b[j] = b[j], b[i]
}
var trimWhitespace = regexp.MustCompile(`^[\s]+|[\s}]+$`)
var dedupWhitespace = regexp.MustCompile(`[\s]{2,}`)
func (t *txn) isFromJournal() bool {
return t.Key == nil
}
func (t *txn) getTerms() []string {
desc := strings.ToUpper(t.Desc)
desc = trimWhitespace.ReplaceAllString(desc, "")
desc = dedupWhitespace.ReplaceAllString(desc, " ")
terms := strings.Split(desc, " ")
terms = append(terms, "FullDesc: "+desc)
var cur float64
if t.isFromJournal() {
cur = t.Cur
} else {
cur = -t.Cur // we are looking for the opposite
}
var kind string
if cur >= 0 {
kind = "credit"
} else {
kind = "debit"
}
terms = append(terms, "Kind: "+kind)
terms = append(terms, "AmountClassFine: "+strconv.Itoa(getAmountClassFine(cur)))
terms = append(terms, "AmountClassCoarse: "+strconv.Itoa(getAmountClassCoarse(cur)))
if *debug {
fmt.Printf("getTerms(%s, %.2f) = %v\n", t.Desc, t.Cur, terms)
}
return terms
}
func getAmountClassFine(amount float64) int {
if amount == 0 {
return 0
}
log := math.Round(math.Log10(math.Abs(amount)) * 4)
class := int(math.Round(math.Pow(10, log/4)))
return class
}
func getAmountClassCoarse(amount float64) int {
if amount == 0 {
return 0
}
log := int(math.Ceil(math.Log10(math.Abs(amount))))
class := int(math.Round(math.Pow10(log)))
return class
}
func (p *parser) topHits(t *txn) []bayesian.Class {
terms := t.getTerms()
scores, _, _ := p.cl.LogScores(terms)
pairs := make([]pair, 0, len(scores))
var mean, stddev float64
for pos, score := range scores {
pairs = append(pairs, pair{score, pos})
mean += score
}
mean /= float64(len(scores))
for _, score := range scores {
stddev += math.Pow(score-mean, 2)
}
stddev /= float64(len(scores) - 1)
stddev = math.Sqrt(stddev)
if *debug {
fmt.Printf("stddev=%f\n", stddev)
}
sort.Sort(byScore(pairs))
result := make([]bayesian.Class, 0, 5)
last := pairs[0].score
for i := 0; i < mathex.Min(10, len(pairs)); i++ {
pr := pairs[i]
if math.Abs(pr.score-last) > stddev {
break
}
if *debug {
fmt.Printf("i=%d s=%.3g Class=%v\n", i, pr.score, p.classes[pr.pos])
}
result = append(result, p.classes[pr.pos])
last = pr.score
}
return result
}
func includeAll(dir string, data []byte) []byte {
final := make([]byte, len(data))
copy(final, data)
b := bytes.NewBuffer(data)
s := bufio.NewScanner(b)
for s.Scan() {
line := s.Text()
if !strings.HasPrefix(line, "include ") {
continue
}
fname := strings.Trim(line[8:], " \n")
include, err := ioutil.ReadFile(path.Join(dir, fname))
checkf(err, "Unable to read file: %v", fname)
final = append(final, include...)
}
return final
}
func parseDate(col string) (time.Time, bool) {
tm, err := time.Parse(*dateFormat, col)
if err == nil {
return tm, true
}
return time.Time{}, false
}
func parseCurrency(col string) (float64, bool) {
f, err := strconv.ParseFloat(col, 64)
return f, err == nil
}
func parseDescription(col string) (string, bool) {
return strings.Map(func(r rune) rune {
if r == '"' {
return -1
}
return r
}, col), true
}
func (p *parser) parseTransactionsFromCSV(in []byte) []txn {
ignored := make(map[int]bool)
if len(*ignore) > 0 {
for _, i := range strings.Split(*ignore, ",") {
pos, err := strconv.Atoi(i)
checkf(err, "Unable to convert to integer: %v", i)
ignored[pos] = true
}
}
result := make([]txn, 0, 100)
r := csv.NewReader(bytes.NewReader(in))
var t txn
var skipped int
for {
t = txn{}
cols, err := r.Read()
if err == io.EOF {
break
}
checkf(err, "Unable to read line: %v", strings.Join(cols, ", "))
if *skip > skipped {
skipped++
continue
}
var picked []string
for i, col := range cols {
if ignored[i] {
continue
}
picked = append(picked, col)
if date, ok := parseDate(col); ok {
t.Date = date
} else if f, ok := parseCurrency(col); ok {
if *inverseSign {
f = -f
}
t.Cur = f
} else if d, ok := parseDescription(col); ok {
t.Desc = d
}
}
if len(t.Desc) != 0 && !t.Date.IsZero() && t.Cur != 0.0 {
y, m, d := t.Date.Year(), t.Date.Month(), t.Date.Day()
t.Date = time.Date(y, m, d, 0, 0, 0, 0, time.UTC)
// Have a unique key for each transaction in CSV, so we can uniquely identify and
// persist them as we modify their category.
hash := sha256.New()
fmt.Fprintf(hash, "%s\t%s\t%.2f", t.Date.Format(stamp), t.Desc, t.Cur)
t.Key = hash.Sum(nil)
// check if it was reconciled before (in case we are restarted after a crash)
p.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketName)
v := b.Get(t.Key)
if v != nil {
dec := gob.NewDecoder(bytes.NewBuffer(v))
var td txn
if err := dec.Decode(&td); err == nil {
if t.Cur < 0 {
t.To = td.To
} else {
t.From = td.From
}
t.Done = true
}
}
return nil
})
result = append(result, t)
} else {
fmt.Println()
fmt.Printf("ERROR : Unable to parse transaction from the selected columns in CSV.\n")
fmt.Printf("Selected CSV : %v\n", strings.Join(picked, ", "))
fmt.Printf("Parsed Date : %v\n", t.Date)
fmt.Printf("Parsed Desc : %v\n", t.Desc)
fmt.Printf("Parsed Currency : %v\n", t.Cur)
log.Fatalln("Please ensure that the above CSV contains ALL the 3 required fields.")
}
}
return result
}
func assignFor(opt string, cl bayesian.Class, keys map[rune]string) bool {
for i := 0; i < len(opt); i++ {
ch := rune(opt[i])
if _, has := keys[ch]; !has {
keys[ch] = string(cl)
return true
}
}
return false
}
func setDefaultMappings(ks *keys.Shortcuts) {
ks.BestEffortAssign('b', ".back", "default")
ks.BestEffortAssign('q', ".quit", "default")
ks.BestEffortAssign('a', ".show all", "default")
ks.BestEffortAssign('s', ".skip", "default")
}
type kv struct {
key rune
val string
}
type byVal []kv
func (b byVal) Len() int {
return len(b)
}
func (b byVal) Less(i int, j int) bool {
return b[i].val < b[j].val
}
func (b byVal) Swap(i int, j int) {
b[i], b[j] = b[j], b[i]
}
func singleCharMode() {
// disable input buffering
exec.Command("stty", "-F", "/dev/tty", "cbreak", "min", "1").Run()
// do not display entered characters on the screen
exec.Command("stty", "-F", "/dev/tty", "-echo").Run()
}
func saneMode() {
exec.Command("stty", "-F", "/dev/tty", "sane").Run()
}
func | getCategory | identifier_name | |
main.go |
return currentUser.HomeDir
}
var (
debug = flag.Bool("debug", false, "Additional debug information if set.")
journal = flag.String("j", "", "Existing journal to learn from.")
output = flag.String("o", "out.ldg", "Journal file to write to.")
csvFile = flag.String("csv", "", "File path of CSV file containing new transactions.")
account = flag.String("a", "", "Name of bank account transactions belong to.")
currency = flag.String("c", "", "Set currency if any.")
ignore = flag.String("ic", "", "Comma separated list of columns to ignore in CSV.")
dateFormat = flag.String("d", "01/02/2006", "Express your date format in numeric form w.r.t. Jan 02, 2006, separated by slashes (/). See: https://golang.org/pkg/time/")
skip = flag.Int("s", 0, "Number of header lines in CSV to skip")
configDir = flag.String("conf", homeDir()+"/.into-ledger", "Config directory to store various into-ledger configs in.")
shortcuts = flag.String("short", "shortcuts.yaml", "Name of shortcuts file.")
inverseSign = flag.Bool("inverseSign", false, "Inverse sign of transaction amounts in CSV.")
reverseCSV = flag.Bool("reverseCSV", false, "Reverse order of transactions in CSV")
allowDups = flag.Bool("allowDups", false, "Don't filter out duplicate transactions")
tfidf = flag.Bool("tfidf", false, "Use TF-IDF classification algorithm instead of Bayesian")
rtxn = regexp.MustCompile(`(\d{4}/\d{2}/\d{2})[\W]*(\w.*)`)
rto = regexp.MustCompile(`\W*([:\w]+)(.*)`)
rfrom = regexp.MustCompile(`\W*([:\w]+).*`)
rcur = regexp.MustCompile(`(\d+\.\d+|\d+)`)
racc = regexp.MustCompile(`^account[\W]+(.*)`)
ralias = regexp.MustCompile(`\balias\s(.*)`)
stamp = "2006/01/02"
bucketName = []byte("txns")
descLength = 40
catLength = 20
short *keys.Shortcuts
)
type accountFlags struct {
flags map[string]string
}
type configs struct {
Accounts map[string]map[string]string // account and the corresponding config.
}
type txn struct {
Date time.Time
Desc string
To string
From string
Cur float64
CurName string
Key []byte
skipClassification bool
Done bool
}
type byTime []txn
func (b byTime) Len() int { return len(b) }
func (b byTime) Less(i int, j int) bool { return b[i].Date.Before(b[j].Date) }
func (b byTime) Swap(i int, j int) { b[i], b[j] = b[j], b[i] }
func checkf(err error, format string, args ...interface{}) {
if err != nil {
log.Printf(format, args...)
log.Println()
log.Fatalf("%+v", errors.WithStack(err))
}
}
func assertf(ok bool, format string, args ...interface{}) {
if !ok {
log.Printf(format, args...)
log.Println()
log.Fatalf("%+v", errors.Errorf("Should be true, but is false"))
}
}
func assignForAccount(account string) {
tree := strings.Split(account, ":")
assertf(len(tree) > 0, "Expected at least one result. Found none for: %v", account)
short.AutoAssign(tree[0], "default")
prev := tree[0]
for _, c := range tree[1:] {
if len(c) == 0 {
continue
}
short.AutoAssign(c, prev)
prev = c
}
}
type parser struct {
db *bolt.DB
data []byte
txns []txn
classes []bayesian.Class
cl *bayesian.Classifier
accounts []string
}
func (p *parser) parseTransactions() {
out, err := exec.Command("ledger", "-f", *journal, "csv").Output()
checkf(err, "Unable to convert journal to csv. Possibly an issue with your ledger installation.")
r := csv.NewReader(newConverter(bytes.NewReader(out)))
var t txn
for {
cols, err := r.Read()
if err == io.EOF {
break
}
checkf(err, "Unable to read a csv line.")
t = txn{}
t.Date, err = time.Parse(stamp, cols[0])
checkf(err, "Unable to parse time: %v", cols[0])
t.Desc = strings.Trim(cols[2], " \n\t")
t.To = cols[3]
assertf(len(t.To) > 0, "Expected TO, found empty.")
if strings.HasPrefix(t.To, "Equity:") {
// Don't pick up Equity.
t.skipClassification = true
}
t.CurName = cols[4]
t.Cur, err = strconv.ParseFloat(cols[5], 64)
checkf(err, "Unable to parse amount.")
p.txns = append(p.txns, t)
assignForAccount(t.To)
}
}
func (p *parser) parseAccounts() {
s := bufio.NewScanner(bytes.NewReader(p.data))
var acc string
for s.Scan() {
m := racc.FindStringSubmatch(s.Text())
if len(m) < 2 {
continue
}
acc = m[1]
if len(acc) == 0 {
continue
}
p.accounts = append(p.accounts, acc)
assignForAccount(acc)
}
}
func (p *parser) generateClasses() {
p.classes = make([]bayesian.Class, 0, 10)
tomap := make(map[string]bool)
for _, t := range p.txns {
if t.skipClassification {
continue
}
tomap[t.To] = true
}
for _, a := range p.accounts {
tomap[a] = true
}
// remove this account as it would appear in many relevant transactions
delete(tomap, *account)
for to := range tomap {
p.classes = append(p.classes, bayesian.Class(to))
}
assertf(len(p.classes) > 1, "Expected some categories. Found none.")
if *tfidf {
p.cl = bayesian.NewClassifierTfIdf(p.classes...)
} else {
p.cl = bayesian.NewClassifier(p.classes...)
}
assertf(p.cl != nil, "Expected a valid classifier. Found nil.")
for _, t := range p.txns {
if _, has := tomap[t.To]; !has {
continue
}
p.cl.Learn(t.getTerms(), bayesian.Class(t.To))
}
if *tfidf {
p.cl.ConvertTermsFreqToTfIdf()
}
}
type pair struct {
score float64
pos int
}
type byScore []pair
func (b byScore) Len() int {
return len(b)
}
func (b byScore) Less(i int, j int) bool {
return b[i].score > b[j].score
}
func (b byScore) Swap(i int, j int) {
b[i], b[j] = b[j], b[i]
}
var trimWhitespace = regexp.MustCompile(`^[\s]+|[\s}]+$`)
var dedupWhitespace = regexp.MustCompile(`[\s]{2,}`)
func (t *txn) isFromJournal() bool {
return t.Key == nil
}
func (t *txn) getTerms() []string {
desc := strings.ToUpper(t.Desc)
desc = trimWhitespace.ReplaceAllString(desc, "")
desc = dedupWhitespace.ReplaceAllString(desc, " ")
terms := strings.Split(desc, " ")
terms = append(terms, "FullDesc: "+desc)
var cur float64
if t.isFromJournal() {
cur = t.Cur
} else {
cur = -t.Cur // we are looking for the opposite
}
var kind string
if cur >= 0 {
kind = "credit"
} else {
kind = "debit"
}
terms = append(terms, "Kind: "+kind)
terms = append(terms, "AmountClassFine: "+strconv.Itoa(getAmountClassFine(cur)))
terms = append(terms, "AmountClassCoarse: "+strconv.Itoa(getAmountClassCoarse(cur)))
if *debug {
fmt.Printf("getTerms(%s, %.2f) = %v\n", t.Desc, t.Cur, terms)
}
return terms
}
func getAmountClassFine(amount float64) int {
if amount == 0 {
return 0
}
log := math.Round(math.Log10(math.Abs(amount)) * 4)
class := int(math.Round(math.Pow(10, log/4)))
return class
}
func getAmountClassCoarse(amount float64) int {
if amount == 0 {
return 0
}
log := int(math.Ceil(math.Log10(math.Abs(amount))))
class := int(math.Round(math.Pow10(log)))
return class
}
func (p *parser) top | {
return ""
} | conditional_block | |
main.go | categories. Found none.")
if *tfidf {
p.cl = bayesian.NewClassifierTfIdf(p.classes...)
} else {
p.cl = bayesian.NewClassifier(p.classes...)
}
assertf(p.cl != nil, "Expected a valid classifier. Found nil.")
for _, t := range p.txns {
if _, has := tomap[t.To]; !has {
continue
}
p.cl.Learn(t.getTerms(), bayesian.Class(t.To))
}
if *tfidf {
p.cl.ConvertTermsFreqToTfIdf()
}
}
type pair struct {
score float64
pos int
}
type byScore []pair
func (b byScore) Len() int {
return len(b)
}
func (b byScore) Less(i int, j int) bool {
return b[i].score > b[j].score
}
func (b byScore) Swap(i int, j int) {
b[i], b[j] = b[j], b[i]
}
var trimWhitespace = regexp.MustCompile(`^[\s]+|[\s}]+$`)
var dedupWhitespace = regexp.MustCompile(`[\s]{2,}`)
func (t *txn) isFromJournal() bool {
return t.Key == nil
}
func (t *txn) getTerms() []string {
desc := strings.ToUpper(t.Desc)
desc = trimWhitespace.ReplaceAllString(desc, "")
desc = dedupWhitespace.ReplaceAllString(desc, " ")
terms := strings.Split(desc, " ")
terms = append(terms, "FullDesc: "+desc)
var cur float64
if t.isFromJournal() {
cur = t.Cur
} else {
cur = -t.Cur // we are looking for the opposite
}
var kind string
if cur >= 0 {
kind = "credit"
} else {
kind = "debit"
}
terms = append(terms, "Kind: "+kind)
terms = append(terms, "AmountClassFine: "+strconv.Itoa(getAmountClassFine(cur)))
terms = append(terms, "AmountClassCoarse: "+strconv.Itoa(getAmountClassCoarse(cur)))
if *debug {
fmt.Printf("getTerms(%s, %.2f) = %v\n", t.Desc, t.Cur, terms)
}
return terms
}
func getAmountClassFine(amount float64) int {
if amount == 0 {
return 0
}
log := math.Round(math.Log10(math.Abs(amount)) * 4)
class := int(math.Round(math.Pow(10, log/4)))
return class
}
func getAmountClassCoarse(amount float64) int {
if amount == 0 {
return 0
}
log := int(math.Ceil(math.Log10(math.Abs(amount))))
class := int(math.Round(math.Pow10(log)))
return class
}
func (p *parser) topHits(t *txn) []bayesian.Class {
terms := t.getTerms()
scores, _, _ := p.cl.LogScores(terms)
pairs := make([]pair, 0, len(scores))
var mean, stddev float64
for pos, score := range scores {
pairs = append(pairs, pair{score, pos})
mean += score
}
mean /= float64(len(scores))
for _, score := range scores {
stddev += math.Pow(score-mean, 2)
}
stddev /= float64(len(scores) - 1)
stddev = math.Sqrt(stddev)
if *debug {
fmt.Printf("stddev=%f\n", stddev)
}
sort.Sort(byScore(pairs))
result := make([]bayesian.Class, 0, 5)
last := pairs[0].score
for i := 0; i < mathex.Min(10, len(pairs)); i++ {
pr := pairs[i]
if math.Abs(pr.score-last) > stddev {
break
}
if *debug {
fmt.Printf("i=%d s=%.3g Class=%v\n", i, pr.score, p.classes[pr.pos])
}
result = append(result, p.classes[pr.pos])
last = pr.score
}
return result
}
func includeAll(dir string, data []byte) []byte {
final := make([]byte, len(data))
copy(final, data)
b := bytes.NewBuffer(data)
s := bufio.NewScanner(b)
for s.Scan() {
line := s.Text()
if !strings.HasPrefix(line, "include ") {
continue
}
fname := strings.Trim(line[8:], " \n")
include, err := ioutil.ReadFile(path.Join(dir, fname))
checkf(err, "Unable to read file: %v", fname)
final = append(final, include...)
}
return final
}
func parseDate(col string) (time.Time, bool) {
tm, err := time.Parse(*dateFormat, col)
if err == nil {
return tm, true
}
return time.Time{}, false
}
func parseCurrency(col string) (float64, bool) {
f, err := strconv.ParseFloat(col, 64)
return f, err == nil
}
func parseDescription(col string) (string, bool) {
return strings.Map(func(r rune) rune {
if r == '"' {
return -1
}
return r
}, col), true
}
func (p *parser) parseTransactionsFromCSV(in []byte) []txn {
ignored := make(map[int]bool)
if len(*ignore) > 0 {
for _, i := range strings.Split(*ignore, ",") {
pos, err := strconv.Atoi(i)
checkf(err, "Unable to convert to integer: %v", i)
ignored[pos] = true
}
}
result := make([]txn, 0, 100)
r := csv.NewReader(bytes.NewReader(in))
var t txn
var skipped int
for {
t = txn{}
cols, err := r.Read()
if err == io.EOF {
break
}
checkf(err, "Unable to read line: %v", strings.Join(cols, ", "))
if *skip > skipped {
skipped++
continue
}
var picked []string
for i, col := range cols {
if ignored[i] {
continue
}
picked = append(picked, col)
if date, ok := parseDate(col); ok {
t.Date = date
} else if f, ok := parseCurrency(col); ok {
if *inverseSign {
f = -f
}
t.Cur = f
} else if d, ok := parseDescription(col); ok {
t.Desc = d
}
}
if len(t.Desc) != 0 && !t.Date.IsZero() && t.Cur != 0.0 {
y, m, d := t.Date.Year(), t.Date.Month(), t.Date.Day()
t.Date = time.Date(y, m, d, 0, 0, 0, 0, time.UTC)
// Have a unique key for each transaction in CSV, so we can uniquely identify and
// persist them as we modify their category.
hash := sha256.New()
fmt.Fprintf(hash, "%s\t%s\t%.2f", t.Date.Format(stamp), t.Desc, t.Cur)
t.Key = hash.Sum(nil)
// check if it was reconciled before (in case we are restarted after a crash)
p.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketName)
v := b.Get(t.Key)
if v != nil {
dec := gob.NewDecoder(bytes.NewBuffer(v))
var td txn
if err := dec.Decode(&td); err == nil {
if t.Cur < 0 {
t.To = td.To
} else {
t.From = td.From
}
t.Done = true
}
}
return nil
})
result = append(result, t)
} else {
fmt.Println()
fmt.Printf("ERROR : Unable to parse transaction from the selected columns in CSV.\n")
fmt.Printf("Selected CSV : %v\n", strings.Join(picked, ", "))
fmt.Printf("Parsed Date : %v\n", t.Date)
fmt.Printf("Parsed Desc : %v\n", t.Desc)
fmt.Printf("Parsed Currency : %v\n", t.Cur)
log.Fatalln("Please ensure that the above CSV contains ALL the 3 required fields.")
}
}
return result
}
func assignFor(opt string, cl bayesian.Class, keys map[rune]string) bool {
for i := 0; i < len(opt); i++ {
ch := rune(opt[i])
if _, has := keys[ch]; !has {
keys[ch] = string(cl)
return true
}
}
return false
}
func setDefaultMappings(ks *keys.Shortcuts) {
ks.BestEffortAssign('b', ".back", "default")
ks.BestEffortAssign('q', ".quit", "default")
ks.BestEffortAssign('a', ".show all", "default")
ks.BestEffortAssign('s', ".skip", "default")
}
type kv struct {
key rune
val string
}
type byVal []kv
func (b byVal) Len() int | {
return len(b)
} | identifier_body | |
cargo_test.rs | , CargoTestError, Config, Test};
use cargo_util::{ProcessBuilder, ProcessError};
use std::ffi::OsString;
use std::path::{Path, PathBuf};
pub struct TestOptions {
pub compile_opts: ops::CompileOptions,
pub no_run: bool,
pub no_fail_fast: bool,
}
pub fn run_tests(
ws: &Workspace<'_>,
options: &TestOptions,
test_args: &[&str],
) -> CargoResult<Option<CargoTestError>> {
let compilation = compile_tests(ws, options)?;
if options.no_run {
if !options.compile_opts.build_config.emit_json() {
display_no_run_information(ws, test_args, &compilation, "unittests")?;
}
return Ok(None);
}
let (test, mut errors) = run_unit_tests(ws.config(), options, test_args, &compilation)?;
// If we have an error and want to fail fast, then return.
if !errors.is_empty() && !options.no_fail_fast {
return Ok(Some(CargoTestError::new(test, errors)));
}
let (doctest, docerrors) = run_doc_tests(ws, options, test_args, &compilation)?;
let test = if docerrors.is_empty() | else { doctest };
errors.extend(docerrors);
if errors.is_empty() {
Ok(None)
} else {
Ok(Some(CargoTestError::new(test, errors)))
}
}
pub fn run_benches(
ws: &Workspace<'_>,
options: &TestOptions,
args: &[&str],
) -> CargoResult<Option<CargoTestError>> {
let compilation = compile_tests(ws, options)?;
if options.no_run {
if !options.compile_opts.build_config.emit_json() {
display_no_run_information(ws, args, &compilation, "benches")?;
}
return Ok(None);
}
let mut args = args.to_vec();
args.push("--bench");
let (test, errors) = run_unit_tests(ws.config(), options, &args, &compilation)?;
match errors.len() {
0 => Ok(None),
_ => Ok(Some(CargoTestError::new(test, errors))),
}
}
fn compile_tests<'a>(ws: &Workspace<'a>, options: &TestOptions) -> CargoResult<Compilation<'a>> {
let mut compilation = ops::compile(ws, &options.compile_opts)?;
compilation.tests.sort();
Ok(compilation)
}
/// Runs the unit and integration tests of a package.
fn run_unit_tests(
config: &Config,
options: &TestOptions,
test_args: &[&str],
compilation: &Compilation<'_>,
) -> CargoResult<(Test, Vec<ProcessError>)> {
let cwd = config.cwd();
let mut errors = Vec::new();
for UnitOutput {
unit,
path,
script_meta,
} in compilation.tests.iter()
{
let (exe_display, cmd) = cmd_builds(
config,
cwd,
unit,
path,
script_meta,
test_args,
compilation,
"unittests",
)?;
config
.shell()
.concise(|shell| shell.status("Running", &exe_display))?;
config
.shell()
.verbose(|shell| shell.status("Running", &cmd))?;
let result = cmd.exec();
if let Err(e) = result {
let e = e.downcast::<ProcessError>()?;
errors.push((
unit.target.kind().clone(),
unit.target.name().to_string(),
unit.pkg.name().to_string(),
e,
));
if !options.no_fail_fast {
break;
}
}
}
if errors.len() == 1 {
let (kind, name, pkg_name, e) = errors.pop().unwrap();
Ok((
Test::UnitTest {
kind,
name,
pkg_name,
},
vec![e],
))
} else {
Ok((
Test::Multiple,
errors.into_iter().map(|(_, _, _, e)| e).collect(),
))
}
}
fn run_doc_tests(
ws: &Workspace<'_>,
options: &TestOptions,
test_args: &[&str],
compilation: &Compilation<'_>,
) -> CargoResult<(Test, Vec<ProcessError>)> {
let config = ws.config();
let mut errors = Vec::new();
let doctest_xcompile = config.cli_unstable().doctest_xcompile;
let doctest_in_workspace = config.cli_unstable().doctest_in_workspace;
for doctest_info in &compilation.to_doc_test {
let Doctest {
args,
unstable_opts,
unit,
linker,
script_meta,
env,
} = doctest_info;
if !doctest_xcompile {
match unit.kind {
CompileKind::Host => {}
CompileKind::Target(target) => {
if target.short_name() != compilation.host {
// Skip doctests, -Zdoctest-xcompile not enabled.
config.shell().verbose(|shell| {
shell.note(format!(
"skipping doctests for {} ({}), \
cross-compilation doctests are not yet supported\n\
See https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#doctest-xcompile \
for more information.",
unit.pkg,
unit.target.description_named()
))
})?;
continue;
}
}
}
}
config.shell().status("Doc-tests", unit.target.name())?;
let mut p = compilation.rustdoc_process(unit, *script_meta)?;
for (var, value) in env {
p.env(var, value);
}
p.arg("--crate-name").arg(&unit.target.crate_name());
p.arg("--test");
if doctest_in_workspace {
add_path_args(ws, unit, &mut p);
// FIXME(swatinem): remove the `unstable-options` once rustdoc stabilizes the `test-run-directory` option
p.arg("-Z").arg("unstable-options");
p.arg("--test-run-directory")
.arg(unit.pkg.root().to_path_buf());
} else {
p.arg(unit.target.src_path().path().unwrap());
}
if let CompileKind::Target(target) = unit.kind {
// use `rustc_target()` to properly handle JSON target paths
p.arg("--target").arg(target.rustc_target());
}
if doctest_xcompile {
p.arg("-Zunstable-options");
p.arg("--enable-per-target-ignores");
if let Some((runtool, runtool_args)) = compilation.target_runner(unit.kind) {
p.arg("--runtool").arg(runtool);
for arg in runtool_args {
p.arg("--runtool-arg").arg(arg);
}
}
if let Some(linker) = linker {
let mut joined = OsString::from("linker=");
joined.push(linker);
p.arg("-C").arg(joined);
}
}
for &rust_dep in &[
&compilation.deps_output[&unit.kind],
&compilation.deps_output[&CompileKind::Host],
] {
let mut arg = OsString::from("dependency=");
arg.push(rust_dep);
p.arg("-L").arg(arg);
}
for native_dep in compilation.native_dirs.iter() {
p.arg("-L").arg(native_dep);
}
for arg in test_args {
p.arg("--test-args").arg(arg);
}
if config.shell().verbosity() == Verbosity::Quiet {
p.arg("--test-args").arg("--quiet");
}
p.args(args);
if *unstable_opts {
p.arg("-Zunstable-options");
}
config
.shell()
.verbose(|shell| shell.status("Running", p.to_string()))?;
if let Err(e) = p.exec() {
let e = e.downcast::<ProcessError>()?;
errors.push(e);
if !options.no_fail_fast {
return Ok((Test::Doc, errors));
}
}
}
Ok((Test::Doc, errors))
}
fn display_no_run_information(
ws: &Workspace<'_>,
test_args: &[&str],
compilation: &Compilation<'_>,
exec_type: &str,
) -> CargoResult<()> {
let config = ws.config();
let cwd = config.cwd();
for UnitOutput {
unit,
path,
script_meta,
} in compilation.tests.iter()
{
let (exe_display, cmd) = cmd_builds(
config,
cwd,
unit,
path,
script_meta,
test_args,
compilation,
exec_type,
)?;
config
.shell()
.concise(|shell| shell.status("Executable", &exe_display))?;
config
.shell()
.verbose(|shell| shell.status("Executable", &cmd))?;
}
return Ok(());
}
fn cmd_builds(
config: &Config,
cwd: &Path,
unit: &Unit,
path: &PathBuf,
script_meta: &Option<Metadata>,
test_args: &[&str],
compilation: &Compilation<'_>,
exec_type: &str,
) -> CargoResult<(String, ProcessBuilder)> {
let test_path = unit.target.src_path().path().unwrap();
let short_test_path = test_path
.strip_prefix(unit.pkg.root())
.unwrap_or(test_path)
| { test } | conditional_block |
cargo_test.rs | , CargoTestError, Config, Test};
use cargo_util::{ProcessBuilder, ProcessError};
use std::ffi::OsString;
use std::path::{Path, PathBuf};
pub struct TestOptions {
pub compile_opts: ops::CompileOptions,
pub no_run: bool,
pub no_fail_fast: bool,
}
pub fn run_tests(
ws: &Workspace<'_>,
options: &TestOptions,
test_args: &[&str],
) -> CargoResult<Option<CargoTestError>> {
let compilation = compile_tests(ws, options)?;
if options.no_run {
if !options.compile_opts.build_config.emit_json() {
display_no_run_information(ws, test_args, &compilation, "unittests")?;
}
return Ok(None);
}
let (test, mut errors) = run_unit_tests(ws.config(), options, test_args, &compilation)?;
// If we have an error and want to fail fast, then return.
if !errors.is_empty() && !options.no_fail_fast {
return Ok(Some(CargoTestError::new(test, errors)));
}
let (doctest, docerrors) = run_doc_tests(ws, options, test_args, &compilation)?;
let test = if docerrors.is_empty() { test } else { doctest };
errors.extend(docerrors);
if errors.is_empty() {
Ok(None)
} else {
Ok(Some(CargoTestError::new(test, errors)))
}
}
pub fn run_benches(
ws: &Workspace<'_>,
options: &TestOptions,
args: &[&str],
) -> CargoResult<Option<CargoTestError>> {
let compilation = compile_tests(ws, options)?;
if options.no_run {
if !options.compile_opts.build_config.emit_json() {
display_no_run_information(ws, args, &compilation, "benches")?;
}
return Ok(None);
}
let mut args = args.to_vec();
args.push("--bench");
let (test, errors) = run_unit_tests(ws.config(), options, &args, &compilation)?;
match errors.len() { | _ => Ok(Some(CargoTestError::new(test, errors))),
}
}
fn compile_tests<'a>(ws: &Workspace<'a>, options: &TestOptions) -> CargoResult<Compilation<'a>> {
let mut compilation = ops::compile(ws, &options.compile_opts)?;
compilation.tests.sort();
Ok(compilation)
}
/// Runs the unit and integration tests of a package.
fn run_unit_tests(
config: &Config,
options: &TestOptions,
test_args: &[&str],
compilation: &Compilation<'_>,
) -> CargoResult<(Test, Vec<ProcessError>)> {
let cwd = config.cwd();
let mut errors = Vec::new();
for UnitOutput {
unit,
path,
script_meta,
} in compilation.tests.iter()
{
let (exe_display, cmd) = cmd_builds(
config,
cwd,
unit,
path,
script_meta,
test_args,
compilation,
"unittests",
)?;
config
.shell()
.concise(|shell| shell.status("Running", &exe_display))?;
config
.shell()
.verbose(|shell| shell.status("Running", &cmd))?;
let result = cmd.exec();
if let Err(e) = result {
let e = e.downcast::<ProcessError>()?;
errors.push((
unit.target.kind().clone(),
unit.target.name().to_string(),
unit.pkg.name().to_string(),
e,
));
if !options.no_fail_fast {
break;
}
}
}
if errors.len() == 1 {
let (kind, name, pkg_name, e) = errors.pop().unwrap();
Ok((
Test::UnitTest {
kind,
name,
pkg_name,
},
vec![e],
))
} else {
Ok((
Test::Multiple,
errors.into_iter().map(|(_, _, _, e)| e).collect(),
))
}
}
fn run_doc_tests(
ws: &Workspace<'_>,
options: &TestOptions,
test_args: &[&str],
compilation: &Compilation<'_>,
) -> CargoResult<(Test, Vec<ProcessError>)> {
let config = ws.config();
let mut errors = Vec::new();
let doctest_xcompile = config.cli_unstable().doctest_xcompile;
let doctest_in_workspace = config.cli_unstable().doctest_in_workspace;
for doctest_info in &compilation.to_doc_test {
let Doctest {
args,
unstable_opts,
unit,
linker,
script_meta,
env,
} = doctest_info;
if !doctest_xcompile {
match unit.kind {
CompileKind::Host => {}
CompileKind::Target(target) => {
if target.short_name() != compilation.host {
// Skip doctests, -Zdoctest-xcompile not enabled.
config.shell().verbose(|shell| {
shell.note(format!(
"skipping doctests for {} ({}), \
cross-compilation doctests are not yet supported\n\
See https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#doctest-xcompile \
for more information.",
unit.pkg,
unit.target.description_named()
))
})?;
continue;
}
}
}
}
config.shell().status("Doc-tests", unit.target.name())?;
let mut p = compilation.rustdoc_process(unit, *script_meta)?;
for (var, value) in env {
p.env(var, value);
}
p.arg("--crate-name").arg(&unit.target.crate_name());
p.arg("--test");
if doctest_in_workspace {
add_path_args(ws, unit, &mut p);
// FIXME(swatinem): remove the `unstable-options` once rustdoc stabilizes the `test-run-directory` option
p.arg("-Z").arg("unstable-options");
p.arg("--test-run-directory")
.arg(unit.pkg.root().to_path_buf());
} else {
p.arg(unit.target.src_path().path().unwrap());
}
if let CompileKind::Target(target) = unit.kind {
// use `rustc_target()` to properly handle JSON target paths
p.arg("--target").arg(target.rustc_target());
}
if doctest_xcompile {
p.arg("-Zunstable-options");
p.arg("--enable-per-target-ignores");
if let Some((runtool, runtool_args)) = compilation.target_runner(unit.kind) {
p.arg("--runtool").arg(runtool);
for arg in runtool_args {
p.arg("--runtool-arg").arg(arg);
}
}
if let Some(linker) = linker {
let mut joined = OsString::from("linker=");
joined.push(linker);
p.arg("-C").arg(joined);
}
}
for &rust_dep in &[
&compilation.deps_output[&unit.kind],
&compilation.deps_output[&CompileKind::Host],
] {
let mut arg = OsString::from("dependency=");
arg.push(rust_dep);
p.arg("-L").arg(arg);
}
for native_dep in compilation.native_dirs.iter() {
p.arg("-L").arg(native_dep);
}
for arg in test_args {
p.arg("--test-args").arg(arg);
}
if config.shell().verbosity() == Verbosity::Quiet {
p.arg("--test-args").arg("--quiet");
}
p.args(args);
if *unstable_opts {
p.arg("-Zunstable-options");
}
config
.shell()
.verbose(|shell| shell.status("Running", p.to_string()))?;
if let Err(e) = p.exec() {
let e = e.downcast::<ProcessError>()?;
errors.push(e);
if !options.no_fail_fast {
return Ok((Test::Doc, errors));
}
}
}
Ok((Test::Doc, errors))
}
fn display_no_run_information(
ws: &Workspace<'_>,
test_args: &[&str],
compilation: &Compilation<'_>,
exec_type: &str,
) -> CargoResult<()> {
let config = ws.config();
let cwd = config.cwd();
for UnitOutput {
unit,
path,
script_meta,
} in compilation.tests.iter()
{
let (exe_display, cmd) = cmd_builds(
config,
cwd,
unit,
path,
script_meta,
test_args,
compilation,
exec_type,
)?;
config
.shell()
.concise(|shell| shell.status("Executable", &exe_display))?;
config
.shell()
.verbose(|shell| shell.status("Executable", &cmd))?;
}
return Ok(());
}
fn cmd_builds(
config: &Config,
cwd: &Path,
unit: &Unit,
path: &PathBuf,
script_meta: &Option<Metadata>,
test_args: &[&str],
compilation: &Compilation<'_>,
exec_type: &str,
) -> CargoResult<(String, ProcessBuilder)> {
let test_path = unit.target.src_path().path().unwrap();
let short_test_path = test_path
.strip_prefix(unit.pkg.root())
.unwrap_or(test_path)
. | 0 => Ok(None), | random_line_split |
cargo_test.rs | , CargoTestError, Config, Test};
use cargo_util::{ProcessBuilder, ProcessError};
use std::ffi::OsString;
use std::path::{Path, PathBuf};
pub struct TestOptions {
pub compile_opts: ops::CompileOptions,
pub no_run: bool,
pub no_fail_fast: bool,
}
pub fn run_tests(
ws: &Workspace<'_>,
options: &TestOptions,
test_args: &[&str],
) -> CargoResult<Option<CargoTestError>> {
let compilation = compile_tests(ws, options)?;
if options.no_run {
if !options.compile_opts.build_config.emit_json() {
display_no_run_information(ws, test_args, &compilation, "unittests")?;
}
return Ok(None);
}
let (test, mut errors) = run_unit_tests(ws.config(), options, test_args, &compilation)?;
// If we have an error and want to fail fast, then return.
if !errors.is_empty() && !options.no_fail_fast {
return Ok(Some(CargoTestError::new(test, errors)));
}
let (doctest, docerrors) = run_doc_tests(ws, options, test_args, &compilation)?;
let test = if docerrors.is_empty() { test } else { doctest };
errors.extend(docerrors);
if errors.is_empty() {
Ok(None)
} else {
Ok(Some(CargoTestError::new(test, errors)))
}
}
pub fn run_benches(
ws: &Workspace<'_>,
options: &TestOptions,
args: &[&str],
) -> CargoResult<Option<CargoTestError>> {
let compilation = compile_tests(ws, options)?;
if options.no_run {
if !options.compile_opts.build_config.emit_json() {
display_no_run_information(ws, args, &compilation, "benches")?;
}
return Ok(None);
}
let mut args = args.to_vec();
args.push("--bench");
let (test, errors) = run_unit_tests(ws.config(), options, &args, &compilation)?;
match errors.len() {
0 => Ok(None),
_ => Ok(Some(CargoTestError::new(test, errors))),
}
}
fn compile_tests<'a>(ws: &Workspace<'a>, options: &TestOptions) -> CargoResult<Compilation<'a>> {
let mut compilation = ops::compile(ws, &options.compile_opts)?;
compilation.tests.sort();
Ok(compilation)
}
/// Runs the unit and integration tests of a package.
fn run_unit_tests(
config: &Config,
options: &TestOptions,
test_args: &[&str],
compilation: &Compilation<'_>,
) -> CargoResult<(Test, Vec<ProcessError>)> {
let cwd = config.cwd();
let mut errors = Vec::new();
for UnitOutput {
unit,
path,
script_meta,
} in compilation.tests.iter()
{
let (exe_display, cmd) = cmd_builds(
config,
cwd,
unit,
path,
script_meta,
test_args,
compilation,
"unittests",
)?;
config
.shell()
.concise(|shell| shell.status("Running", &exe_display))?;
config
.shell()
.verbose(|shell| shell.status("Running", &cmd))?;
let result = cmd.exec();
if let Err(e) = result {
let e = e.downcast::<ProcessError>()?;
errors.push((
unit.target.kind().clone(),
unit.target.name().to_string(),
unit.pkg.name().to_string(),
e,
));
if !options.no_fail_fast {
break;
}
}
}
if errors.len() == 1 {
let (kind, name, pkg_name, e) = errors.pop().unwrap();
Ok((
Test::UnitTest {
kind,
name,
pkg_name,
},
vec![e],
))
} else {
Ok((
Test::Multiple,
errors.into_iter().map(|(_, _, _, e)| e).collect(),
))
}
}
fn | (
ws: &Workspace<'_>,
options: &TestOptions,
test_args: &[&str],
compilation: &Compilation<'_>,
) -> CargoResult<(Test, Vec<ProcessError>)> {
let config = ws.config();
let mut errors = Vec::new();
let doctest_xcompile = config.cli_unstable().doctest_xcompile;
let doctest_in_workspace = config.cli_unstable().doctest_in_workspace;
for doctest_info in &compilation.to_doc_test {
let Doctest {
args,
unstable_opts,
unit,
linker,
script_meta,
env,
} = doctest_info;
if !doctest_xcompile {
match unit.kind {
CompileKind::Host => {}
CompileKind::Target(target) => {
if target.short_name() != compilation.host {
// Skip doctests, -Zdoctest-xcompile not enabled.
config.shell().verbose(|shell| {
shell.note(format!(
"skipping doctests for {} ({}), \
cross-compilation doctests are not yet supported\n\
See https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#doctest-xcompile \
for more information.",
unit.pkg,
unit.target.description_named()
))
})?;
continue;
}
}
}
}
config.shell().status("Doc-tests", unit.target.name())?;
let mut p = compilation.rustdoc_process(unit, *script_meta)?;
for (var, value) in env {
p.env(var, value);
}
p.arg("--crate-name").arg(&unit.target.crate_name());
p.arg("--test");
if doctest_in_workspace {
add_path_args(ws, unit, &mut p);
// FIXME(swatinem): remove the `unstable-options` once rustdoc stabilizes the `test-run-directory` option
p.arg("-Z").arg("unstable-options");
p.arg("--test-run-directory")
.arg(unit.pkg.root().to_path_buf());
} else {
p.arg(unit.target.src_path().path().unwrap());
}
if let CompileKind::Target(target) = unit.kind {
// use `rustc_target()` to properly handle JSON target paths
p.arg("--target").arg(target.rustc_target());
}
if doctest_xcompile {
p.arg("-Zunstable-options");
p.arg("--enable-per-target-ignores");
if let Some((runtool, runtool_args)) = compilation.target_runner(unit.kind) {
p.arg("--runtool").arg(runtool);
for arg in runtool_args {
p.arg("--runtool-arg").arg(arg);
}
}
if let Some(linker) = linker {
let mut joined = OsString::from("linker=");
joined.push(linker);
p.arg("-C").arg(joined);
}
}
for &rust_dep in &[
&compilation.deps_output[&unit.kind],
&compilation.deps_output[&CompileKind::Host],
] {
let mut arg = OsString::from("dependency=");
arg.push(rust_dep);
p.arg("-L").arg(arg);
}
for native_dep in compilation.native_dirs.iter() {
p.arg("-L").arg(native_dep);
}
for arg in test_args {
p.arg("--test-args").arg(arg);
}
if config.shell().verbosity() == Verbosity::Quiet {
p.arg("--test-args").arg("--quiet");
}
p.args(args);
if *unstable_opts {
p.arg("-Zunstable-options");
}
config
.shell()
.verbose(|shell| shell.status("Running", p.to_string()))?;
if let Err(e) = p.exec() {
let e = e.downcast::<ProcessError>()?;
errors.push(e);
if !options.no_fail_fast {
return Ok((Test::Doc, errors));
}
}
}
Ok((Test::Doc, errors))
}
fn display_no_run_information(
ws: &Workspace<'_>,
test_args: &[&str],
compilation: &Compilation<'_>,
exec_type: &str,
) -> CargoResult<()> {
let config = ws.config();
let cwd = config.cwd();
for UnitOutput {
unit,
path,
script_meta,
} in compilation.tests.iter()
{
let (exe_display, cmd) = cmd_builds(
config,
cwd,
unit,
path,
script_meta,
test_args,
compilation,
exec_type,
)?;
config
.shell()
.concise(|shell| shell.status("Executable", &exe_display))?;
config
.shell()
.verbose(|shell| shell.status("Executable", &cmd))?;
}
return Ok(());
}
fn cmd_builds(
config: &Config,
cwd: &Path,
unit: &Unit,
path: &PathBuf,
script_meta: &Option<Metadata>,
test_args: &[&str],
compilation: &Compilation<'_>,
exec_type: &str,
) -> CargoResult<(String, ProcessBuilder)> {
let test_path = unit.target.src_path().path().unwrap();
let short_test_path = test_path
.strip_prefix(unit.pkg.root())
.unwrap_or(test_path)
| run_doc_tests | identifier_name |
lib.rs | where R: Read, F: Facade
{
// building the freetype library
// FIXME: call FT_Done_Library
let library = unsafe {
// taken from https://github.com/PistonDevelopers/freetype-rs/blob/master/src/library.rs
extern "C" fn alloc_library(_memory: freetype::FT_Memory, size: libc::c_long) -> *mut libc::c_void {
unsafe {
libc::malloc(size as libc::size_t)
}
}
extern "C" fn free_library(_memory: freetype::FT_Memory, block: *mut libc::c_void) {
unsafe {
libc::free(block)
}
}
extern "C" fn realloc_library(_memory: freetype::FT_Memory,
_cur_size: libc::c_long,
new_size: libc::c_long,
block: *mut libc::c_void) -> *mut libc::c_void {
unsafe {
libc::realloc(block, new_size as libc::size_t)
}
}
static mut MEMORY: freetype::FT_MemoryRec = freetype::FT_MemoryRec {
user: 0 as *mut libc::c_void,
alloc: alloc_library,
free: free_library,
realloc: realloc_library,
};
let mut raw = ::std::ptr::null_mut();
if freetype::FT_New_Library(&mut MEMORY, &mut raw) != freetype::FT_Err_Ok {
return Err(());
}
freetype::FT_Add_Default_Modules(raw);
raw
};
// building the freetype face object
let font: Vec<u8> = font.bytes().map(|c| c.unwrap()).collect();
let face: freetype::FT_Face = unsafe {
let mut face = ::std::ptr::null_mut();
let err = freetype::FT_New_Memory_Face(library, font.as_ptr(),
font.len() as freetype::FT_Long, 0, &mut face);
if err == freetype::FT_Err_Ok {
face
} else {
return Err(());
}
};
// computing the list of characters in the font
let characters_list = unsafe {
// TODO: unresolved symbol
/*if freetype::FT_Select_CharMap(face, freetype::FT_ENCODING_UNICODE) != 0 {
return Err(());
}*/
let mut result = Vec::new();
let mut g: freetype::FT_UInt = std::mem::uninitialized();
let mut c = freetype::FT_Get_First_Char(face, &mut g);
while g != 0 {
result.push(std::mem::transmute(c as u32)); // TODO: better solution?
c = freetype::FT_Get_Next_Char(face, c, &mut g);
}
result
};
// building the infos
let (texture_data, chr_infos, em_pixels) = unsafe {
build_font_image(face, characters_list, font_size)
};
// we load the texture in the display
let texture = glium::texture::Texture2d::new(facade, &texture_data).unwrap(); | texture: texture,
character_infos: chr_infos,
em_pixels: em_pixels,
})
}
/// Return the size of an em-unit for the generated font texture.
/// This is needed for a pixel-perfect display: the text geometry is scaled so that
/// 1em == 1 unit. We must scale the geometry up by em_pixels to match the screen pixels.
pub fn em_pixels(&self) -> u32 {
self.em_pixels
}
}
/*impl glium::uniforms::AsUniformValue for FontTexture {
fn as_uniform_value(&self) -> glium::uniforms::UniformValue {
glium::uniforms::AsUniformValue::as_uniform_value(&self.texture)
}
}*/
impl TextSystem {
/// Builds a new text system that must be used to build `TextDisplay` objects.
pub fn new<F>(facade: &F) -> TextSystem where F: Facade {
TextSystem {
context: facade.get_context().clone(),
program: program!(facade,
140 => {
vertex: "
#version 140
uniform mat4 matrix;
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
void main() {
gl_Position = matrix * vec4(position, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 140
in vec2 v_tex_coords;
out vec4 f_color;
uniform vec4 color;
uniform sampler2D tex;
void main() {
vec4 c = vec4(color.rgb, color.a * texture(tex, v_tex_coords));
if (c.a <= 0.01) {
discard;
} else {
f_color = c;
}
}
"
},
110 => {
vertex: "
#version 110
attribute vec2 position;
attribute vec2 tex_coords;
varying vec2 v_tex_coords;
uniform mat4 matrix;
void main() {
gl_Position = matrix * vec4(position.x, position.y, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 110
varying vec2 v_tex_coords;
uniform vec4 color;
uniform sampler2D tex;
void main() {
gl_FragColor = vec4(color.rgb, color.a * texture2D(tex, v_tex_coords));
if (gl_FragColor.a <= 0.01) {
discard;
}
}
"
},
).unwrap()
}
}
}
impl<F> TextDisplay<F> where F: Deref<Target=FontTexture> {
/// Builds a new text display that allows you to draw text.
pub fn new(system: &TextSystem, texture: F, text: &str) -> TextDisplay<F> {
let mut text_display = TextDisplay {
context: system.context.clone(),
texture: texture,
vertex_buffer: None,
index_buffer: None,
char_pos_x: vec![],
is_empty: true,
};
text_display.set_text(text);
text_display
}
/// Return the x-positions (in em-units) of the breaks between characters.
/// When a character starts at n-th byte, then get_char_pos_x()[n] is the x-pos of the character.
/// The last value of the array is the x-pos of the end of the string
pub fn get_char_pos_x(&self) -> &[f32] {
&self.char_pos_x
}
/// Modifies the text on this display.
pub fn set_text(&mut self, text: &str) {
self.is_empty = true;
self.char_pos_x = vec![0.];
self.vertex_buffer = None;
self.index_buffer = None;
// returning if no text
if text.len() == 0 {
return;
}
// these arrays will contain the vertex buffer and index buffer data
let mut vertex_buffer_data = Vec::with_capacity(text.len() * 4 * 4);
let mut index_buffer_data = Vec::with_capacity(text.len() * 6);
// iterating over the characters of the string
let mut pos_x = 0.;
for character in text.chars() { // FIXME: wrong, but only thing stable
let infos = match self.texture.character_infos
.iter().find(|&&(chr, _)| chr == character)
{
Some(infos) => infos,
None => continue // character not found in the font, ignoring it
};
let infos = infos.1;
self.is_empty = false;
// adding the quad in the index buffer
{
let first_vertex_offset = vertex_buffer_data.len() as u16;
index_buffer_data.push(first_vertex_offset);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 3);
}
//
pos_x += infos.left_padding;
// calculating coords
let left_coord = pos_x;
let right_coord = left_coord + infos.size.0;
let top_coord = infos.height_over_line;
let bottom_coord = infos.height_over_line - infos.size.1;
// top-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, top_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1],
});
// top-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, top_coord],
tex_coords: [infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1],
});
// bottom-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, bottom_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1 + infos.tex_size.1],
});
// bottom-right vertex
vertex_buffer_data.push(VertexFormat {
|
Ok(FontTexture { | random_line_split |
lib.rs | where R: Read, F: Facade
{
// building the freetype library
// FIXME: call FT_Done_Library
let library = unsafe {
// taken from https://github.com/PistonDevelopers/freetype-rs/blob/master/src/library.rs
extern "C" fn alloc_library(_memory: freetype::FT_Memory, size: libc::c_long) -> *mut libc::c_void {
unsafe {
libc::malloc(size as libc::size_t)
}
}
extern "C" fn free_library(_memory: freetype::FT_Memory, block: *mut libc::c_void) {
unsafe {
libc::free(block)
}
}
extern "C" fn realloc_library(_memory: freetype::FT_Memory,
_cur_size: libc::c_long,
new_size: libc::c_long,
block: *mut libc::c_void) -> *mut libc::c_void {
unsafe {
libc::realloc(block, new_size as libc::size_t)
}
}
static mut MEMORY: freetype::FT_MemoryRec = freetype::FT_MemoryRec {
user: 0 as *mut libc::c_void,
alloc: alloc_library,
free: free_library,
realloc: realloc_library,
};
let mut raw = ::std::ptr::null_mut();
if freetype::FT_New_Library(&mut MEMORY, &mut raw) != freetype::FT_Err_Ok {
return Err(());
}
freetype::FT_Add_Default_Modules(raw);
raw
};
// building the freetype face object
let font: Vec<u8> = font.bytes().map(|c| c.unwrap()).collect();
let face: freetype::FT_Face = unsafe {
let mut face = ::std::ptr::null_mut();
let err = freetype::FT_New_Memory_Face(library, font.as_ptr(),
font.len() as freetype::FT_Long, 0, &mut face);
if err == freetype::FT_Err_Ok {
face
} else {
return Err(());
}
};
// computing the list of characters in the font
let characters_list = unsafe {
// TODO: unresolved symbol
/*if freetype::FT_Select_CharMap(face, freetype::FT_ENCODING_UNICODE) != 0 {
return Err(());
}*/
let mut result = Vec::new();
let mut g: freetype::FT_UInt = std::mem::uninitialized();
let mut c = freetype::FT_Get_First_Char(face, &mut g);
while g != 0 {
result.push(std::mem::transmute(c as u32)); // TODO: better solution?
c = freetype::FT_Get_Next_Char(face, c, &mut g);
}
result
};
// building the infos
let (texture_data, chr_infos, em_pixels) = unsafe {
build_font_image(face, characters_list, font_size)
};
// we load the texture in the display
let texture = glium::texture::Texture2d::new(facade, &texture_data).unwrap();
Ok(FontTexture {
texture: texture,
character_infos: chr_infos,
em_pixels: em_pixels,
})
}
/// Return the size of an em-unit for the generated font texture.
/// This is needed for a pixel-perfect display: the text geometry is scaled so that
/// 1em == 1 unit. We must scale the geometry up by em_pixels to match the screen pixels.
pub fn em_pixels(&self) -> u32 {
self.em_pixels
}
}
/*impl glium::uniforms::AsUniformValue for FontTexture {
fn as_uniform_value(&self) -> glium::uniforms::UniformValue {
glium::uniforms::AsUniformValue::as_uniform_value(&self.texture)
}
}*/
impl TextSystem {
/// Builds a new text system that must be used to build `TextDisplay` objects.
pub fn new<F>(facade: &F) -> TextSystem where F: Facade {
TextSystem {
context: facade.get_context().clone(),
program: program!(facade,
140 => {
vertex: "
#version 140
uniform mat4 matrix;
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
void main() {
gl_Position = matrix * vec4(position, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 140
in vec2 v_tex_coords;
out vec4 f_color;
uniform vec4 color;
uniform sampler2D tex;
void main() {
vec4 c = vec4(color.rgb, color.a * texture(tex, v_tex_coords));
if (c.a <= 0.01) {
discard;
} else {
f_color = c;
}
}
"
},
110 => {
vertex: "
#version 110
attribute vec2 position;
attribute vec2 tex_coords;
varying vec2 v_tex_coords;
uniform mat4 matrix;
void main() {
gl_Position = matrix * vec4(position.x, position.y, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 110
varying vec2 v_tex_coords;
uniform vec4 color;
uniform sampler2D tex;
void main() {
gl_FragColor = vec4(color.rgb, color.a * texture2D(tex, v_tex_coords));
if (gl_FragColor.a <= 0.01) {
discard;
}
}
"
},
).unwrap()
}
}
}
impl<F> TextDisplay<F> where F: Deref<Target=FontTexture> {
/// Builds a new text display that allows you to draw text.
pub fn new(system: &TextSystem, texture: F, text: &str) -> TextDisplay<F> |
/// Return the x-positions (in em-units) of the breaks between characters.
/// When a character starts at n-th byte, then get_char_pos_x()[n] is the x-pos of the character.
/// The last value of the array is the x-pos of the end of the string
pub fn get_char_pos_x(&self) -> &[f32] {
&self.char_pos_x
}
/// Modifies the text on this display.
pub fn set_text(&mut self, text: &str) {
self.is_empty = true;
self.char_pos_x = vec![0.];
self.vertex_buffer = None;
self.index_buffer = None;
// returning if no text
if text.len() == 0 {
return;
}
// these arrays will contain the vertex buffer and index buffer data
let mut vertex_buffer_data = Vec::with_capacity(text.len() * 4 * 4);
let mut index_buffer_data = Vec::with_capacity(text.len() * 6);
// iterating over the characters of the string
let mut pos_x = 0.;
for character in text.chars() { // FIXME: wrong, but only thing stable
let infos = match self.texture.character_infos
.iter().find(|&&(chr, _)| chr == character)
{
Some(infos) => infos,
None => continue // character not found in the font, ignoring it
};
let infos = infos.1;
self.is_empty = false;
// adding the quad in the index buffer
{
let first_vertex_offset = vertex_buffer_data.len() as u16;
index_buffer_data.push(first_vertex_offset);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 3);
}
//
pos_x += infos.left_padding;
// calculating coords
let left_coord = pos_x;
let right_coord = left_coord + infos.size.0;
let top_coord = infos.height_over_line;
let bottom_coord = infos.height_over_line - infos.size.1;
// top-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, top_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1],
});
// top-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, top_coord],
tex_coords: [infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1],
});
// bottom-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, bottom_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1 + infos.tex_size.1],
});
// bottom-right vertex
vertex_buffer_data.push(VertexFormat | {
let mut text_display = TextDisplay {
context: system.context.clone(),
texture: texture,
vertex_buffer: None,
index_buffer: None,
char_pos_x: vec![],
is_empty: true,
};
text_display.set_text(text);
text_display
} | identifier_body |
lib.rs | realloc: realloc_library,
};
let mut raw = ::std::ptr::null_mut();
if freetype::FT_New_Library(&mut MEMORY, &mut raw) != freetype::FT_Err_Ok {
return Err(());
}
freetype::FT_Add_Default_Modules(raw);
raw
};
// building the freetype face object
let font: Vec<u8> = font.bytes().map(|c| c.unwrap()).collect();
let face: freetype::FT_Face = unsafe {
let mut face = ::std::ptr::null_mut();
let err = freetype::FT_New_Memory_Face(library, font.as_ptr(),
font.len() as freetype::FT_Long, 0, &mut face);
if err == freetype::FT_Err_Ok {
face
} else {
return Err(());
}
};
// computing the list of characters in the font
let characters_list = unsafe {
// TODO: unresolved symbol
/*if freetype::FT_Select_CharMap(face, freetype::FT_ENCODING_UNICODE) != 0 {
return Err(());
}*/
let mut result = Vec::new();
let mut g: freetype::FT_UInt = std::mem::uninitialized();
let mut c = freetype::FT_Get_First_Char(face, &mut g);
while g != 0 {
result.push(std::mem::transmute(c as u32)); // TODO: better solution?
c = freetype::FT_Get_Next_Char(face, c, &mut g);
}
result
};
// building the infos
let (texture_data, chr_infos, em_pixels) = unsafe {
build_font_image(face, characters_list, font_size)
};
// we load the texture in the display
let texture = glium::texture::Texture2d::new(facade, &texture_data).unwrap();
Ok(FontTexture {
texture: texture,
character_infos: chr_infos,
em_pixels: em_pixels,
})
}
/// Return the size of an em-unit for the generated font texture.
/// This is needed for a pixel-perfect display: the text geometry is scaled so that
/// 1em == 1 unit. We must scale the geometry up by em_pixels to match the screen pixels.
pub fn em_pixels(&self) -> u32 {
self.em_pixels
}
}
/*impl glium::uniforms::AsUniformValue for FontTexture {
fn as_uniform_value(&self) -> glium::uniforms::UniformValue {
glium::uniforms::AsUniformValue::as_uniform_value(&self.texture)
}
}*/
impl TextSystem {
/// Builds a new text system that must be used to build `TextDisplay` objects.
pub fn new<F>(facade: &F) -> TextSystem where F: Facade {
TextSystem {
context: facade.get_context().clone(),
program: program!(facade,
140 => {
vertex: "
#version 140
uniform mat4 matrix;
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
void main() {
gl_Position = matrix * vec4(position, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 140
in vec2 v_tex_coords;
out vec4 f_color;
uniform vec4 color;
uniform sampler2D tex;
void main() {
vec4 c = vec4(color.rgb, color.a * texture(tex, v_tex_coords));
if (c.a <= 0.01) {
discard;
} else {
f_color = c;
}
}
"
},
110 => {
vertex: "
#version 110
attribute vec2 position;
attribute vec2 tex_coords;
varying vec2 v_tex_coords;
uniform mat4 matrix;
void main() {
gl_Position = matrix * vec4(position.x, position.y, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 110
varying vec2 v_tex_coords;
uniform vec4 color;
uniform sampler2D tex;
void main() {
gl_FragColor = vec4(color.rgb, color.a * texture2D(tex, v_tex_coords));
if (gl_FragColor.a <= 0.01) {
discard;
}
}
"
},
).unwrap()
}
}
}
impl<F> TextDisplay<F> where F: Deref<Target=FontTexture> {
/// Builds a new text display that allows you to draw text.
pub fn new(system: &TextSystem, texture: F, text: &str) -> TextDisplay<F> {
let mut text_display = TextDisplay {
context: system.context.clone(),
texture: texture,
vertex_buffer: None,
index_buffer: None,
char_pos_x: vec![],
is_empty: true,
};
text_display.set_text(text);
text_display
}
/// Return the x-positions (in em-units) of the breaks between characters.
/// When a character starts at n-th byte, then get_char_pos_x()[n] is the x-pos of the character.
/// The last value of the array is the x-pos of the end of the string
pub fn get_char_pos_x(&self) -> &[f32] {
&self.char_pos_x
}
/// Modifies the text on this display.
pub fn set_text(&mut self, text: &str) {
self.is_empty = true;
self.char_pos_x = vec![0.];
self.vertex_buffer = None;
self.index_buffer = None;
// returning if no text
if text.len() == 0 {
return;
}
// these arrays will contain the vertex buffer and index buffer data
let mut vertex_buffer_data = Vec::with_capacity(text.len() * 4 * 4);
let mut index_buffer_data = Vec::with_capacity(text.len() * 6);
// iterating over the characters of the string
let mut pos_x = 0.;
for character in text.chars() { // FIXME: wrong, but only thing stable
let infos = match self.texture.character_infos
.iter().find(|&&(chr, _)| chr == character)
{
Some(infos) => infos,
None => continue // character not found in the font, ignoring it
};
let infos = infos.1;
self.is_empty = false;
// adding the quad in the index buffer
{
let first_vertex_offset = vertex_buffer_data.len() as u16;
index_buffer_data.push(first_vertex_offset);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 3);
}
//
pos_x += infos.left_padding;
// calculating coords
let left_coord = pos_x;
let right_coord = left_coord + infos.size.0;
let top_coord = infos.height_over_line;
let bottom_coord = infos.height_over_line - infos.size.1;
// top-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, top_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1],
});
// top-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, top_coord],
tex_coords: [infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1],
});
// bottom-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, bottom_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1 + infos.tex_size.1],
});
// bottom-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, bottom_coord],
tex_coords: [
infos.tex_coords.0 + infos.tex_size.0,
infos.tex_coords.1 + infos.tex_size.1
],
});
// going to next char
pos_x = right_coord + infos.right_padding;
for _ in 0..character.len_utf8() {
self.char_pos_x.push(pos_x);
}
}
if !vertex_buffer_data.len() != 0 {
// building the vertex buffer
self.vertex_buffer = Some(glium::VertexBuffer::new(&self.context,
&vertex_buffer_data).unwrap());
// building the index buffer
self.index_buffer = Some(glium::IndexBuffer::new(&self.context,
glium::index::PrimitiveType::TrianglesList,
&index_buffer_data).unwrap());
}
}
}
///
/// ## About the matrix
///
/// The matrix must be column-major post-muliplying (which is the usual way to do in OpenGL).
///
/// One unit in height corresponds to a line of text, but the text can go above or under.
/// The bottom of the line is at `0.0`, the top is at `1.0`.
/// You need to adapt your matrix by taking these into consideration.
pub fn | draw | identifier_name | |
lib.rs | new_size: libc::c_long,
block: *mut libc::c_void) -> *mut libc::c_void {
unsafe {
libc::realloc(block, new_size as libc::size_t)
}
}
static mut MEMORY: freetype::FT_MemoryRec = freetype::FT_MemoryRec {
user: 0 as *mut libc::c_void,
alloc: alloc_library,
free: free_library,
realloc: realloc_library,
};
let mut raw = ::std::ptr::null_mut();
if freetype::FT_New_Library(&mut MEMORY, &mut raw) != freetype::FT_Err_Ok {
return Err(());
}
freetype::FT_Add_Default_Modules(raw);
raw
};
// building the freetype face object
let font: Vec<u8> = font.bytes().map(|c| c.unwrap()).collect();
let face: freetype::FT_Face = unsafe {
let mut face = ::std::ptr::null_mut();
let err = freetype::FT_New_Memory_Face(library, font.as_ptr(),
font.len() as freetype::FT_Long, 0, &mut face);
if err == freetype::FT_Err_Ok {
face
} else {
return Err(());
}
};
// computing the list of characters in the font
let characters_list = unsafe {
// TODO: unresolved symbol
/*if freetype::FT_Select_CharMap(face, freetype::FT_ENCODING_UNICODE) != 0 {
return Err(());
}*/
let mut result = Vec::new();
let mut g: freetype::FT_UInt = std::mem::uninitialized();
let mut c = freetype::FT_Get_First_Char(face, &mut g);
while g != 0 {
result.push(std::mem::transmute(c as u32)); // TODO: better solution?
c = freetype::FT_Get_Next_Char(face, c, &mut g);
}
result
};
// building the infos
let (texture_data, chr_infos, em_pixels) = unsafe {
build_font_image(face, characters_list, font_size)
};
// we load the texture in the display
let texture = glium::texture::Texture2d::new(facade, &texture_data).unwrap();
Ok(FontTexture {
texture: texture,
character_infos: chr_infos,
em_pixels: em_pixels,
})
}
/// Return the size of an em-unit for the generated font texture.
/// This is needed for a pixel-perfect display: the text geometry is scaled so that
/// 1em == 1 unit. We must scale the geometry up by em_pixels to match the screen pixels.
pub fn em_pixels(&self) -> u32 {
self.em_pixels
}
}
/*impl glium::uniforms::AsUniformValue for FontTexture {
fn as_uniform_value(&self) -> glium::uniforms::UniformValue {
glium::uniforms::AsUniformValue::as_uniform_value(&self.texture)
}
}*/
impl TextSystem {
/// Builds a new text system that must be used to build `TextDisplay` objects.
pub fn new<F>(facade: &F) -> TextSystem where F: Facade {
TextSystem {
context: facade.get_context().clone(),
program: program!(facade,
140 => {
vertex: "
#version 140
uniform mat4 matrix;
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
void main() {
gl_Position = matrix * vec4(position, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 140
in vec2 v_tex_coords;
out vec4 f_color;
uniform vec4 color;
uniform sampler2D tex;
void main() {
vec4 c = vec4(color.rgb, color.a * texture(tex, v_tex_coords));
if (c.a <= 0.01) {
discard;
} else {
f_color = c;
}
}
"
},
110 => {
vertex: "
#version 110
attribute vec2 position;
attribute vec2 tex_coords;
varying vec2 v_tex_coords;
uniform mat4 matrix;
void main() {
gl_Position = matrix * vec4(position.x, position.y, 0.0, 1.0);
v_tex_coords = tex_coords;
}
",
fragment: "
#version 110
varying vec2 v_tex_coords;
uniform vec4 color;
uniform sampler2D tex;
void main() {
gl_FragColor = vec4(color.rgb, color.a * texture2D(tex, v_tex_coords));
if (gl_FragColor.a <= 0.01) {
discard;
}
}
"
},
).unwrap()
}
}
}
impl<F> TextDisplay<F> where F: Deref<Target=FontTexture> {
/// Builds a new text display that allows you to draw text.
pub fn new(system: &TextSystem, texture: F, text: &str) -> TextDisplay<F> {
let mut text_display = TextDisplay {
context: system.context.clone(),
texture: texture,
vertex_buffer: None,
index_buffer: None,
char_pos_x: vec![],
is_empty: true,
};
text_display.set_text(text);
text_display
}
/// Return the x-positions (in em-units) of the breaks between characters.
/// When a character starts at n-th byte, then get_char_pos_x()[n] is the x-pos of the character.
/// The last value of the array is the x-pos of the end of the string
pub fn get_char_pos_x(&self) -> &[f32] {
&self.char_pos_x
}
/// Modifies the text on this display.
pub fn set_text(&mut self, text: &str) {
self.is_empty = true;
self.char_pos_x = vec![0.];
self.vertex_buffer = None;
self.index_buffer = None;
// returning if no text
if text.len() == 0 {
return;
}
// these arrays will contain the vertex buffer and index buffer data
let mut vertex_buffer_data = Vec::with_capacity(text.len() * 4 * 4);
let mut index_buffer_data = Vec::with_capacity(text.len() * 6);
// iterating over the characters of the string
let mut pos_x = 0.;
for character in text.chars() { // FIXME: wrong, but only thing stable
let infos = match self.texture.character_infos
.iter().find(|&&(chr, _)| chr == character)
{
Some(infos) => infos,
None => continue // character not found in the font, ignoring it
};
let infos = infos.1;
self.is_empty = false;
// adding the quad in the index buffer
{
let first_vertex_offset = vertex_buffer_data.len() as u16;
index_buffer_data.push(first_vertex_offset);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 2);
index_buffer_data.push(first_vertex_offset + 1);
index_buffer_data.push(first_vertex_offset + 3);
}
//
pos_x += infos.left_padding;
// calculating coords
let left_coord = pos_x;
let right_coord = left_coord + infos.size.0;
let top_coord = infos.height_over_line;
let bottom_coord = infos.height_over_line - infos.size.1;
// top-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, top_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1],
});
// top-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, top_coord],
tex_coords: [infos.tex_coords.0 + infos.tex_size.0, infos.tex_coords.1],
});
// bottom-left vertex
vertex_buffer_data.push(VertexFormat {
position: [left_coord, bottom_coord],
tex_coords: [infos.tex_coords.0, infos.tex_coords.1 + infos.tex_size.1],
});
// bottom-right vertex
vertex_buffer_data.push(VertexFormat {
position: [right_coord, bottom_coord],
tex_coords: [
infos.tex_coords.0 + infos.tex_size.0,
infos.tex_coords.1 + infos.tex_size.1
],
});
// going to next char
pos_x = right_coord + infos.right_padding;
for _ in 0..character.len_utf8() {
self.char_pos_x.push(pos_x);
}
}
if !vertex_buffer_data.len() != 0 | {
// building the vertex buffer
self.vertex_buffer = Some(glium::VertexBuffer::new(&self.context,
&vertex_buffer_data).unwrap());
// building the index buffer
self.index_buffer = Some(glium::IndexBuffer::new(&self.context,
glium::index::PrimitiveType::TrianglesList,
&index_buffer_data).unwrap());
} | conditional_block | |
hf2.ts | uint32_t data[flash_page_size];
};
*/
// no result
export const HF2_CMD_CHKSUM_PAGES = 0x0007 | /*
struct HF2_CHKSUM_PAGES_Command {
uint32_t target_addr;
uint32_t num_pages;
};
struct HF2_CHKSUM_PAGES_Result {
uint16_t chksums[num_pages];
};
*/
export const HF2_CMD_READ_WORDS = 0x0008
/*
struct HF2_READ_WORDS_Command {
uint32_t target_addr;
uint32_t num_words;
};
struct HF2_READ_WORDS_Result {
uint32_t words[num_words];
};
*/
export const HF2_CMD_WRITE_WORDS = 0x0009
/*
struct HF2_WRITE_WORDS_Command {
uint32_t target_addr;
uint32_t num_words;
uint32_t words[num_words];
};
*/
// no result
export const HF2_CMD_DMESG = 0x0010
// no arguments
// results is utf8 character array
export const HF2_FLAG_SERIAL_OUT = 0x80
export const HF2_FLAG_SERIAL_ERR = 0xC0
export const HF2_FLAG_CMDPKT_LAST = 0x40
export const HF2_FLAG_CMDPKT_BODY = 0x00
export const HF2_FLAG_MASK = 0xC0
export const HF2_SIZE_MASK = 63
export const HF2_STATUS_OK = 0x00
export const HF2_STATUS_INVALID_CMD = 0x01
export const HF2_STATUS_EXEC_ERR = 0x02
export const HF2_STATUS_EVENT = 0x80
// the eventId is overlayed on the tag+status; the mask corresponds
// to the HF2_STATUS_EVENT above
export const HF2_EV_MASK = 0x800000
export const HF2_CMD_JDS_CONFIG = 0x0020
export const HF2_CMD_JDS_SEND = 0x0021
export const HF2_EV_JDS_PACKET = 0x800020
export class Transport {
dev: USBDevice;
iface: USBInterface;
altIface: USBAlternateInterface;
epIn: USBEndpoint;
epOut: USBEndpoint;
readLoopStarted = false;
ready = false;
onData = (v: Uint8Array) => { };
onError = (e: Error) => {
console.error("HF2 error: " + (e ? e.stack : e))
};
log(msg: string, v?: any) {
if (v != undefined)
console.log("HF2: " + msg, v)
else
console.log("HF2: " + msg)
}
private clearDev() {
if (this.dev) {
this.dev = null
this.epIn = null
this.epOut = null
}
}
disconnectAsync() {
this.ready = false
if (!this.dev) return Promise.resolve()
this.log("close device")
return this.dev.close()
.catch(e => {
// just ignore errors closing, most likely device just disconnected
})
.then(() => {
this.clearDev()
return U.delay(500)
})
}
private recvPacketAsync(): Promise<Uint8Array> {
let final = (res: USBInTransferResult) => {
if (res.status != "ok")
this.error("USB IN transfer failed")
let arr = new Uint8Array(res.data.buffer)
if (arr.length == 0)
return this.recvPacketAsync()
return arr
}
if (!this.dev)
return Promise.reject(new Error("Disconnected"))
if (!this.epIn) {
return this.dev.controlTransferIn({
requestType: "class",
recipient: "interface",
request: controlTransferGetReport,
value: controlTransferInReport,
index: this.iface.interfaceNumber
}, 64).then(final)
}
return this.dev.transferIn(this.epIn.endpointNumber, 64)
.then(final)
}
error(msg: string) {
throw new Error(`USB error on device ${this.dev ? this.dev.productName : "n/a"} (${msg})`)
}
private async readLoop() {
if (this.readLoopStarted)
return
this.readLoopStarted = true
this.log("start read loop")
while (true) {
if (!this.ready) {
break
//await U.delay(300)
//continue
}
try {
const buf = await this.recvPacketAsync()
if (buf[0]) {
// we've got data; retry reading immedietly after processing it
this.onData(buf)
} else {
// throttle down if no data coming
await U.delay(5)
}
} catch (err) {
if (this.dev)
this.onError(err)
await U.delay(300)
}
}
}
sendPacketAsync(pkt: Uint8Array) {
if (!this.dev)
return Promise.reject(new Error("Disconnected"))
U.assert(pkt.length <= 64)
if (!this.epOut) {
return this.dev.controlTransferOut({
requestType: "class",
recipient: "interface",
request: controlTransferSetReport,
value: controlTransferOutReport,
index: this.iface.interfaceNumber
}, pkt).then(res => {
if (res.status != "ok")
this.error("USB CTRL OUT transfer failed")
})
}
return this.dev.transferOut(this.epOut.endpointNumber, pkt)
.then(res => {
if (res.status != "ok")
this.error("USB OUT transfer failed")
})
}
async init() {
const usb = new webusb.USB({
devicesFound: async devices => {
for (const device of devices) {
if (device.deviceVersionMajor == 42) {
for (const iface of device.configuration.interfaces) {
const alt = iface.alternates[0]
if (alt.interfaceClass == 0xff && alt.interfaceSubclass == 42) {
this.dev = device
this.iface = iface
this.altIface = alt
return device
}
}
}
}
return undefined
}
})
this.dev = await usb.requestDevice({ filters: [{}] })
this.log("connect device: " + this.dev.manufacturerName + " " + this.dev.productName)
await this.dev.open()
await this.dev.selectConfiguration(1)
if (this.altIface.endpoints.length) {
this.epIn = this.altIface.endpoints.filter(e => e.direction == "in")[0]
this.epOut = this.altIface.endpoints.filter(e => e.direction == "out")[0]
U.assert(this.epIn.packetSize == 64);
U.assert(this.epOut.packetSize == 64);
}
this.log("claim interface")
await this.dev.claimInterface(this.iface.interfaceNumber)
this.log("all connected")
this.ready = true
this.readLoop()
}
}
export class Proto {
eventHandlers: U.SMap<(buf: Uint8Array) => void> = {}
msgs = new U.PromiseBuffer<Uint8Array>()
cmdSeq = (Math.random() * 0xffff) | 0;
private lock = new U.PromiseQueue();
constructor(public io: Transport) {
let frames: Uint8Array[] = []
io.onData = buf => {
let tp = buf[0] & HF2_FLAG_MASK
let len = buf[0] & 63
//console.log(`msg tp=${tp} len=${len}`)
let frame = new Uint8Array(len)
U.memcpy(frame, 0, buf, 1, len)
if (tp & HF2_FLAG_SERIAL_OUT) {
this.onSerial(frame, tp == HF2_FLAG_SERIAL_ERR)
return
}
frames.push(frame)
if (tp == HF2_FLAG_CMDPKT_BODY) {
return
} else {
U.assert(tp == HF2_FLAG_CMDPKT_LAST)
let total = 0
for (let f of frames) total += f.length
let r = new Uint8Array(total)
let ptr = 0
for (let f of frames) {
U.memcpy(r, ptr, f)
ptr += f.length
}
frames = []
if (r[2] & HF2_STATUS_EVENT) {
// asynchronous event
this.handleEvent(r)
} else {
this.msgs.push(r)
}
}
}
}
error(m: string) {
return this.io.error(m)
}
talkAsync(cmd: number, data?: Uint8Array) {
let len = 8
if (data) len += data.length
let pkt = new Uint8Array(len)
let seq = ++this.cmdSeq & 0xffff
U.write32(pkt, 0, cmd);
U.write16(pkt, 4, seq);
U.write16(pkt, 6, 0);
if (data)
U.memcpy(pkt, 8, data, 0, data.length)
let numSkipped = 0
let handleReturnAsync = (): Promise<Uint8Array> | random_line_split | |
hf2.ts | 32_t data[flash_page_size];
};
*/
// no result
export const HF2_CMD_CHKSUM_PAGES = 0x0007
/*
struct HF2_CHKSUM_PAGES_Command {
uint32_t target_addr;
uint32_t num_pages;
};
struct HF2_CHKSUM_PAGES_Result {
uint16_t chksums[num_pages];
};
*/
export const HF2_CMD_READ_WORDS = 0x0008
/*
struct HF2_READ_WORDS_Command {
uint32_t target_addr;
uint32_t num_words;
};
struct HF2_READ_WORDS_Result {
uint32_t words[num_words];
};
*/
export const HF2_CMD_WRITE_WORDS = 0x0009
/*
struct HF2_WRITE_WORDS_Command {
uint32_t target_addr;
uint32_t num_words;
uint32_t words[num_words];
};
*/
// no result
export const HF2_CMD_DMESG = 0x0010
// no arguments
// results is utf8 character array
export const HF2_FLAG_SERIAL_OUT = 0x80
export const HF2_FLAG_SERIAL_ERR = 0xC0
export const HF2_FLAG_CMDPKT_LAST = 0x40
export const HF2_FLAG_CMDPKT_BODY = 0x00
export const HF2_FLAG_MASK = 0xC0
export const HF2_SIZE_MASK = 63
export const HF2_STATUS_OK = 0x00
export const HF2_STATUS_INVALID_CMD = 0x01
export const HF2_STATUS_EXEC_ERR = 0x02
export const HF2_STATUS_EVENT = 0x80
// the eventId is overlayed on the tag+status; the mask corresponds
// to the HF2_STATUS_EVENT above
export const HF2_EV_MASK = 0x800000
export const HF2_CMD_JDS_CONFIG = 0x0020
export const HF2_CMD_JDS_SEND = 0x0021
export const HF2_EV_JDS_PACKET = 0x800020
export class Transport {
dev: USBDevice;
iface: USBInterface;
altIface: USBAlternateInterface;
epIn: USBEndpoint;
epOut: USBEndpoint;
readLoopStarted = false;
ready = false;
onData = (v: Uint8Array) => { };
onError = (e: Error) => {
console.error("HF2 error: " + (e ? e.stack : e))
};
log(msg: string, v?: any) {
if (v != undefined)
console.log("HF2: " + msg, v)
else
console.log("HF2: " + msg)
}
private clearDev() {
if (this.dev) {
this.dev = null
this.epIn = null
this.epOut = null
}
}
disconnectAsync() |
private recvPacketAsync(): Promise<Uint8Array> {
let final = (res: USBInTransferResult) => {
if (res.status != "ok")
this.error("USB IN transfer failed")
let arr = new Uint8Array(res.data.buffer)
if (arr.length == 0)
return this.recvPacketAsync()
return arr
}
if (!this.dev)
return Promise.reject(new Error("Disconnected"))
if (!this.epIn) {
return this.dev.controlTransferIn({
requestType: "class",
recipient: "interface",
request: controlTransferGetReport,
value: controlTransferInReport,
index: this.iface.interfaceNumber
}, 64).then(final)
}
return this.dev.transferIn(this.epIn.endpointNumber, 64)
.then(final)
}
error(msg: string) {
throw new Error(`USB error on device ${this.dev ? this.dev.productName : "n/a"} (${msg})`)
}
private async readLoop() {
if (this.readLoopStarted)
return
this.readLoopStarted = true
this.log("start read loop")
while (true) {
if (!this.ready) {
break
//await U.delay(300)
//continue
}
try {
const buf = await this.recvPacketAsync()
if (buf[0]) {
// we've got data; retry reading immedietly after processing it
this.onData(buf)
} else {
// throttle down if no data coming
await U.delay(5)
}
} catch (err) {
if (this.dev)
this.onError(err)
await U.delay(300)
}
}
}
sendPacketAsync(pkt: Uint8Array) {
if (!this.dev)
return Promise.reject(new Error("Disconnected"))
U.assert(pkt.length <= 64)
if (!this.epOut) {
return this.dev.controlTransferOut({
requestType: "class",
recipient: "interface",
request: controlTransferSetReport,
value: controlTransferOutReport,
index: this.iface.interfaceNumber
}, pkt).then(res => {
if (res.status != "ok")
this.error("USB CTRL OUT transfer failed")
})
}
return this.dev.transferOut(this.epOut.endpointNumber, pkt)
.then(res => {
if (res.status != "ok")
this.error("USB OUT transfer failed")
})
}
async init() {
const usb = new webusb.USB({
devicesFound: async devices => {
for (const device of devices) {
if (device.deviceVersionMajor == 42) {
for (const iface of device.configuration.interfaces) {
const alt = iface.alternates[0]
if (alt.interfaceClass == 0xff && alt.interfaceSubclass == 42) {
this.dev = device
this.iface = iface
this.altIface = alt
return device
}
}
}
}
return undefined
}
})
this.dev = await usb.requestDevice({ filters: [{}] })
this.log("connect device: " + this.dev.manufacturerName + " " + this.dev.productName)
await this.dev.open()
await this.dev.selectConfiguration(1)
if (this.altIface.endpoints.length) {
this.epIn = this.altIface.endpoints.filter(e => e.direction == "in")[0]
this.epOut = this.altIface.endpoints.filter(e => e.direction == "out")[0]
U.assert(this.epIn.packetSize == 64);
U.assert(this.epOut.packetSize == 64);
}
this.log("claim interface")
await this.dev.claimInterface(this.iface.interfaceNumber)
this.log("all connected")
this.ready = true
this.readLoop()
}
}
export class Proto {
eventHandlers: U.SMap<(buf: Uint8Array) => void> = {}
msgs = new U.PromiseBuffer<Uint8Array>()
cmdSeq = (Math.random() * 0xffff) | 0;
private lock = new U.PromiseQueue();
constructor(public io: Transport) {
let frames: Uint8Array[] = []
io.onData = buf => {
let tp = buf[0] & HF2_FLAG_MASK
let len = buf[0] & 63
//console.log(`msg tp=${tp} len=${len}`)
let frame = new Uint8Array(len)
U.memcpy(frame, 0, buf, 1, len)
if (tp & HF2_FLAG_SERIAL_OUT) {
this.onSerial(frame, tp == HF2_FLAG_SERIAL_ERR)
return
}
frames.push(frame)
if (tp == HF2_FLAG_CMDPKT_BODY) {
return
} else {
U.assert(tp == HF2_FLAG_CMDPKT_LAST)
let total = 0
for (let f of frames) total += f.length
let r = new Uint8Array(total)
let ptr = 0
for (let f of frames) {
U.memcpy(r, ptr, f)
ptr += f.length
}
frames = []
if (r[2] & HF2_STATUS_EVENT) {
// asynchronous event
this.handleEvent(r)
} else {
this.msgs.push(r)
}
}
}
}
error(m: string) {
return this.io.error(m)
}
talkAsync(cmd: number, data?: Uint8Array) {
let len = 8
if (data) len += data.length
let pkt = new Uint8Array(len)
let seq = ++this.cmdSeq & 0xffff
U.write32(pkt, 0, cmd);
U.write16(pkt, 4, seq);
U.write16(pkt, 6, 0);
if (data)
U.memcpy(pkt, 8, data, 0, data.length)
let numSkipped = 0
let handleReturnAsync = (): Promise<Uint8 | {
this.ready = false
if (!this.dev) return Promise.resolve()
this.log("close device")
return this.dev.close()
.catch(e => {
// just ignore errors closing, most likely device just disconnected
})
.then(() => {
this.clearDev()
return U.delay(500)
})
} | identifier_body |
hf2.ts | 32_t data[flash_page_size];
};
*/
// no result
export const HF2_CMD_CHKSUM_PAGES = 0x0007
/*
struct HF2_CHKSUM_PAGES_Command {
uint32_t target_addr;
uint32_t num_pages;
};
struct HF2_CHKSUM_PAGES_Result {
uint16_t chksums[num_pages];
};
*/
export const HF2_CMD_READ_WORDS = 0x0008
/*
struct HF2_READ_WORDS_Command {
uint32_t target_addr;
uint32_t num_words;
};
struct HF2_READ_WORDS_Result {
uint32_t words[num_words];
};
*/
export const HF2_CMD_WRITE_WORDS = 0x0009
/*
struct HF2_WRITE_WORDS_Command {
uint32_t target_addr;
uint32_t num_words;
uint32_t words[num_words];
};
*/
// no result
export const HF2_CMD_DMESG = 0x0010
// no arguments
// results is utf8 character array
export const HF2_FLAG_SERIAL_OUT = 0x80
export const HF2_FLAG_SERIAL_ERR = 0xC0
export const HF2_FLAG_CMDPKT_LAST = 0x40
export const HF2_FLAG_CMDPKT_BODY = 0x00
export const HF2_FLAG_MASK = 0xC0
export const HF2_SIZE_MASK = 63
export const HF2_STATUS_OK = 0x00
export const HF2_STATUS_INVALID_CMD = 0x01
export const HF2_STATUS_EXEC_ERR = 0x02
export const HF2_STATUS_EVENT = 0x80
// the eventId is overlayed on the tag+status; the mask corresponds
// to the HF2_STATUS_EVENT above
export const HF2_EV_MASK = 0x800000
export const HF2_CMD_JDS_CONFIG = 0x0020
export const HF2_CMD_JDS_SEND = 0x0021
export const HF2_EV_JDS_PACKET = 0x800020
export class Transport {
dev: USBDevice;
iface: USBInterface;
altIface: USBAlternateInterface;
epIn: USBEndpoint;
epOut: USBEndpoint;
readLoopStarted = false;
ready = false;
onData = (v: Uint8Array) => { };
onError = (e: Error) => {
console.error("HF2 error: " + (e ? e.stack : e))
};
log(msg: string, v?: any) {
if (v != undefined)
console.log("HF2: " + msg, v)
else
console.log("HF2: " + msg)
}
private clearDev() {
if (this.dev) {
this.dev = null
this.epIn = null
this.epOut = null
}
}
disconnectAsync() {
this.ready = false
if (!this.dev) return Promise.resolve()
this.log("close device")
return this.dev.close()
.catch(e => {
// just ignore errors closing, most likely device just disconnected
})
.then(() => {
this.clearDev()
return U.delay(500)
})
}
private recvPacketAsync(): Promise<Uint8Array> {
let final = (res: USBInTransferResult) => {
if (res.status != "ok")
this.error("USB IN transfer failed")
let arr = new Uint8Array(res.data.buffer)
if (arr.length == 0)
return this.recvPacketAsync()
return arr
}
if (!this.dev)
return Promise.reject(new Error("Disconnected"))
if (!this.epIn) {
return this.dev.controlTransferIn({
requestType: "class",
recipient: "interface",
request: controlTransferGetReport,
value: controlTransferInReport,
index: this.iface.interfaceNumber
}, 64).then(final)
}
return this.dev.transferIn(this.epIn.endpointNumber, 64)
.then(final)
}
error(msg: string) {
throw new Error(`USB error on device ${this.dev ? this.dev.productName : "n/a"} (${msg})`)
}
private async readLoop() {
if (this.readLoopStarted)
return
this.readLoopStarted = true
this.log("start read loop")
while (true) {
if (!this.ready) {
break
//await U.delay(300)
//continue
}
try {
const buf = await this.recvPacketAsync()
if (buf[0]) {
// we've got data; retry reading immedietly after processing it
this.onData(buf)
} else {
// throttle down if no data coming
await U.delay(5)
}
} catch (err) {
if (this.dev)
this.onError(err)
await U.delay(300)
}
}
}
sendPacketAsync(pkt: Uint8Array) {
if (!this.dev)
return Promise.reject(new Error("Disconnected"))
U.assert(pkt.length <= 64)
if (!this.epOut) {
return this.dev.controlTransferOut({
requestType: "class",
recipient: "interface",
request: controlTransferSetReport,
value: controlTransferOutReport,
index: this.iface.interfaceNumber
}, pkt).then(res => {
if (res.status != "ok")
this.error("USB CTRL OUT transfer failed")
})
}
return this.dev.transferOut(this.epOut.endpointNumber, pkt)
.then(res => {
if (res.status != "ok")
this.error("USB OUT transfer failed")
})
}
async init() {
const usb = new webusb.USB({
devicesFound: async devices => {
for (const device of devices) {
if (device.deviceVersionMajor == 42) {
for (const iface of device.configuration.interfaces) {
const alt = iface.alternates[0]
if (alt.interfaceClass == 0xff && alt.interfaceSubclass == 42) {
this.dev = device
this.iface = iface
this.altIface = alt
return device
}
}
}
}
return undefined
}
})
this.dev = await usb.requestDevice({ filters: [{}] })
this.log("connect device: " + this.dev.manufacturerName + " " + this.dev.productName)
await this.dev.open()
await this.dev.selectConfiguration(1)
if (this.altIface.endpoints.length) {
this.epIn = this.altIface.endpoints.filter(e => e.direction == "in")[0]
this.epOut = this.altIface.endpoints.filter(e => e.direction == "out")[0]
U.assert(this.epIn.packetSize == 64);
U.assert(this.epOut.packetSize == 64);
}
this.log("claim interface")
await this.dev.claimInterface(this.iface.interfaceNumber)
this.log("all connected")
this.ready = true
this.readLoop()
}
}
export class Proto {
eventHandlers: U.SMap<(buf: Uint8Array) => void> = {}
msgs = new U.PromiseBuffer<Uint8Array>()
cmdSeq = (Math.random() * 0xffff) | 0;
private lock = new U.PromiseQueue();
constructor(public io: Transport) {
let frames: Uint8Array[] = []
io.onData = buf => {
let tp = buf[0] & HF2_FLAG_MASK
let len = buf[0] & 63
//console.log(`msg tp=${tp} len=${len}`)
let frame = new Uint8Array(len)
U.memcpy(frame, 0, buf, 1, len)
if (tp & HF2_FLAG_SERIAL_OUT) {
this.onSerial(frame, tp == HF2_FLAG_SERIAL_ERR)
return
}
frames.push(frame)
if (tp == HF2_FLAG_CMDPKT_BODY) {
return
} else {
U.assert(tp == HF2_FLAG_CMDPKT_LAST)
let total = 0
for (let f of frames) total += f.length
let r = new Uint8Array(total)
let ptr = 0
for (let f of frames) {
U.memcpy(r, ptr, f)
ptr += f.length
}
frames = []
if (r[2] & HF2_STATUS_EVENT) {
// asynchronous event
this.handleEvent(r)
} else |
}
}
}
error(m: string) {
return this.io.error(m)
}
talkAsync(cmd: number, data?: Uint8Array) {
let len = 8
if (data) len += data.length
let pkt = new Uint8Array(len)
let seq = ++this.cmdSeq & 0xffff
U.write32(pkt, 0, cmd);
U.write16(pkt, 4, seq);
U.write16(pkt, 6, 0);
if (data)
U.memcpy(pkt, 8, data, 0, data.length)
let numSkipped = 0
let handleReturnAsync = (): Promise<Uint8 | {
this.msgs.push(r)
} | conditional_block |
hf2.ts | 32_t data[flash_page_size];
};
*/
// no result
export const HF2_CMD_CHKSUM_PAGES = 0x0007
/*
struct HF2_CHKSUM_PAGES_Command {
uint32_t target_addr;
uint32_t num_pages;
};
struct HF2_CHKSUM_PAGES_Result {
uint16_t chksums[num_pages];
};
*/
export const HF2_CMD_READ_WORDS = 0x0008
/*
struct HF2_READ_WORDS_Command {
uint32_t target_addr;
uint32_t num_words;
};
struct HF2_READ_WORDS_Result {
uint32_t words[num_words];
};
*/
export const HF2_CMD_WRITE_WORDS = 0x0009
/*
struct HF2_WRITE_WORDS_Command {
uint32_t target_addr;
uint32_t num_words;
uint32_t words[num_words];
};
*/
// no result
export const HF2_CMD_DMESG = 0x0010
// no arguments
// results is utf8 character array
export const HF2_FLAG_SERIAL_OUT = 0x80
export const HF2_FLAG_SERIAL_ERR = 0xC0
export const HF2_FLAG_CMDPKT_LAST = 0x40
export const HF2_FLAG_CMDPKT_BODY = 0x00
export const HF2_FLAG_MASK = 0xC0
export const HF2_SIZE_MASK = 63
export const HF2_STATUS_OK = 0x00
export const HF2_STATUS_INVALID_CMD = 0x01
export const HF2_STATUS_EXEC_ERR = 0x02
export const HF2_STATUS_EVENT = 0x80
// the eventId is overlayed on the tag+status; the mask corresponds
// to the HF2_STATUS_EVENT above
export const HF2_EV_MASK = 0x800000
export const HF2_CMD_JDS_CONFIG = 0x0020
export const HF2_CMD_JDS_SEND = 0x0021
export const HF2_EV_JDS_PACKET = 0x800020
export class Transport {
dev: USBDevice;
iface: USBInterface;
altIface: USBAlternateInterface;
epIn: USBEndpoint;
epOut: USBEndpoint;
readLoopStarted = false;
ready = false;
onData = (v: Uint8Array) => { };
onError = (e: Error) => {
console.error("HF2 error: " + (e ? e.stack : e))
};
log(msg: string, v?: any) {
if (v != undefined)
console.log("HF2: " + msg, v)
else
console.log("HF2: " + msg)
}
private clearDev() {
if (this.dev) {
this.dev = null
this.epIn = null
this.epOut = null
}
}
disconnectAsync() {
this.ready = false
if (!this.dev) return Promise.resolve()
this.log("close device")
return this.dev.close()
.catch(e => {
// just ignore errors closing, most likely device just disconnected
})
.then(() => {
this.clearDev()
return U.delay(500)
})
}
private recvPacketAsync(): Promise<Uint8Array> {
let final = (res: USBInTransferResult) => {
if (res.status != "ok")
this.error("USB IN transfer failed")
let arr = new Uint8Array(res.data.buffer)
if (arr.length == 0)
return this.recvPacketAsync()
return arr
}
if (!this.dev)
return Promise.reject(new Error("Disconnected"))
if (!this.epIn) {
return this.dev.controlTransferIn({
requestType: "class",
recipient: "interface",
request: controlTransferGetReport,
value: controlTransferInReport,
index: this.iface.interfaceNumber
}, 64).then(final)
}
return this.dev.transferIn(this.epIn.endpointNumber, 64)
.then(final)
}
| (msg: string) {
throw new Error(`USB error on device ${this.dev ? this.dev.productName : "n/a"} (${msg})`)
}
private async readLoop() {
if (this.readLoopStarted)
return
this.readLoopStarted = true
this.log("start read loop")
while (true) {
if (!this.ready) {
break
//await U.delay(300)
//continue
}
try {
const buf = await this.recvPacketAsync()
if (buf[0]) {
// we've got data; retry reading immedietly after processing it
this.onData(buf)
} else {
// throttle down if no data coming
await U.delay(5)
}
} catch (err) {
if (this.dev)
this.onError(err)
await U.delay(300)
}
}
}
sendPacketAsync(pkt: Uint8Array) {
if (!this.dev)
return Promise.reject(new Error("Disconnected"))
U.assert(pkt.length <= 64)
if (!this.epOut) {
return this.dev.controlTransferOut({
requestType: "class",
recipient: "interface",
request: controlTransferSetReport,
value: controlTransferOutReport,
index: this.iface.interfaceNumber
}, pkt).then(res => {
if (res.status != "ok")
this.error("USB CTRL OUT transfer failed")
})
}
return this.dev.transferOut(this.epOut.endpointNumber, pkt)
.then(res => {
if (res.status != "ok")
this.error("USB OUT transfer failed")
})
}
async init() {
const usb = new webusb.USB({
devicesFound: async devices => {
for (const device of devices) {
if (device.deviceVersionMajor == 42) {
for (const iface of device.configuration.interfaces) {
const alt = iface.alternates[0]
if (alt.interfaceClass == 0xff && alt.interfaceSubclass == 42) {
this.dev = device
this.iface = iface
this.altIface = alt
return device
}
}
}
}
return undefined
}
})
this.dev = await usb.requestDevice({ filters: [{}] })
this.log("connect device: " + this.dev.manufacturerName + " " + this.dev.productName)
await this.dev.open()
await this.dev.selectConfiguration(1)
if (this.altIface.endpoints.length) {
this.epIn = this.altIface.endpoints.filter(e => e.direction == "in")[0]
this.epOut = this.altIface.endpoints.filter(e => e.direction == "out")[0]
U.assert(this.epIn.packetSize == 64);
U.assert(this.epOut.packetSize == 64);
}
this.log("claim interface")
await this.dev.claimInterface(this.iface.interfaceNumber)
this.log("all connected")
this.ready = true
this.readLoop()
}
}
export class Proto {
eventHandlers: U.SMap<(buf: Uint8Array) => void> = {}
msgs = new U.PromiseBuffer<Uint8Array>()
cmdSeq = (Math.random() * 0xffff) | 0;
private lock = new U.PromiseQueue();
constructor(public io: Transport) {
let frames: Uint8Array[] = []
io.onData = buf => {
let tp = buf[0] & HF2_FLAG_MASK
let len = buf[0] & 63
//console.log(`msg tp=${tp} len=${len}`)
let frame = new Uint8Array(len)
U.memcpy(frame, 0, buf, 1, len)
if (tp & HF2_FLAG_SERIAL_OUT) {
this.onSerial(frame, tp == HF2_FLAG_SERIAL_ERR)
return
}
frames.push(frame)
if (tp == HF2_FLAG_CMDPKT_BODY) {
return
} else {
U.assert(tp == HF2_FLAG_CMDPKT_LAST)
let total = 0
for (let f of frames) total += f.length
let r = new Uint8Array(total)
let ptr = 0
for (let f of frames) {
U.memcpy(r, ptr, f)
ptr += f.length
}
frames = []
if (r[2] & HF2_STATUS_EVENT) {
// asynchronous event
this.handleEvent(r)
} else {
this.msgs.push(r)
}
}
}
}
error(m: string) {
return this.io.error(m)
}
talkAsync(cmd: number, data?: Uint8Array) {
let len = 8
if (data) len += data.length
let pkt = new Uint8Array(len)
let seq = ++this.cmdSeq & 0xffff
U.write32(pkt, 0, cmd);
U.write16(pkt, 4, seq);
U.write16(pkt, 6, 0);
if (data)
U.memcpy(pkt, 8, data, 0, data.length)
let numSkipped = 0
let handleReturnAsync = (): Promise<Uint8Array> | error | identifier_name |
lib.rs | _TLS` | enable secure connection using AMQPS (default: `false`, enable with `true` or `1` or `TRUE` or `True`) |
//! | `AMQP_USERNAME` | Username used to connect to AMQP server (default: `guest`) |
//! | `AMQP_PASSWORD` | Password used to connect to AMQP server (default: `guest`) |
//! | `AMQP_VHOST` | AMQP virtual host (default: `/`) |
//! | `AMQP_QUEUE` | AMQP queue name used to receive job orders (default: `job_undefined`) |
//!
//! ### Vault connection
//!
//! | Variable | Description |
//! |--------------------|-------------|
//! | `BACKEND_HOSTNAME` | URL used to connect to backend server (default: `http://127.0.0.1:4000/api`) |
//! | `BACKEND_USERNAME` | Username used to connect to backend server |
//! | `BACKEND_PASSWORD` | Password used to connect to backend server |
//!
//! ## Start worker locally
//!
//! MCAI Worker SDK can be launched locally - without RabbitMQ.
//! It can process some message for different purpose (functional tests, message order examples, etc.).
//!
//! To start worker in this mode, setup the environment variable `SOURCE_ORDERS` with path(s) to json orders.
//! It can take multiple orders, joined with `:` on unix platform, `;` on windows os.
//!
//! ### Examples:
//!
//! ```bash
//! RUST_LOG=info SOURCE_ORDERS=./examples/success_order.json:./examples/error_order.json cargo run --example worker
//! ```
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
#[cfg(feature = "media")]
#[macro_use]
extern crate yaserde_derive;
mod channels;
mod config;
mod error;
pub mod job;
pub mod message;
pub mod parameter;
pub mod worker;
/// Re-export from lapin Channel
pub use lapin::Channel;
pub use log::{debug, error, info, trace, warn};
pub use schemars::JsonSchema;
/// Re-export from semver:
pub use semver::Version;
pub use error::{MessageError, Result};
#[cfg(feature = "media")]
pub use message::media::{
audio::AudioFormat,
ebu_ttml_live::{
Body, Div, EbuTtmlLive, Frames, Head, Paragraph, Span, Styling, TimeExpression, TimeUnit, Title,
},
filters::{AudioFilter, GenericFilter, VideoFilter},
video::{RegionOfInterest, Scaling, VideoFormat},
StreamDescriptor,
};
pub use message::publish_job_progression;
pub use parameter::container::ParametersContainer;
pub use parameter::{Parameter, ParameterValue, Requirement};
#[cfg(feature = "media")]
pub use stainless_ffmpeg::{format_context::FormatContext, frame::Frame};
use crate::worker::docker;
use chrono::prelude::*;
use config::*;
use env_logger::Builder;
use futures_executor::LocalPool;
use futures_util::{future::FutureExt, stream::StreamExt, task::LocalSpawnExt};
use job::JobResult;
use lapin::{options::*, types::FieldTable, Connection, ConnectionProperties};
use serde::de::DeserializeOwned;
#[cfg(feature = "media")]
use serde::Serialize;
use std::str::FromStr;
#[cfg(feature = "media")]
use std::sync::{mpsc::Sender, Mutex};
use std::{cell::RefCell, fs, io::Write, rc::Rc, sync::Arc, thread, time};
#[cfg(feature = "media")]
use yaserde::YaSerialize;
/// Exposed Channel type
pub type McaiChannel = Arc<Channel>;
#[cfg(feature = "media")]
#[derive(Debug)]
pub struct ProcessResult {
end_of_process: bool,
json_content: Option<String>,
xml_content: Option<String>,
}
#[cfg(feature = "media")]
impl ProcessResult {
pub fn empty() -> Self {
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: None,
}
}
pub fn end_of_process() -> Self {
ProcessResult {
end_of_process: true,
json_content: None,
xml_content: None,
}
}
pub fn new_json<S: Serialize>(content: S) -> Self {
let content = serde_json::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: Some(content),
xml_content: None,
}
}
pub fn new_xml<Y: YaSerialize>(content: Y) -> Self {
let content = yaserde::ser::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: Some(content),
}
}
}
#[cfg(feature = "media")]
pub enum ProcessFrame {
AudioVideo(Frame),
EbuTtmlLive(Box<EbuTtmlLive>),
Data(Vec<u8>),
}
#[cfg(feature = "media")]
impl ProcessFrame {
pub fn get_pts(&self) -> i64 {
match self {
ProcessFrame::AudioVideo(frame) => frame.get_pts(),
ProcessFrame::EbuTtmlLive(_) | ProcessFrame::Data(_) => {
// improvement: support pts to terminate
0
}
}
}
}
/// # Trait to describe a worker
/// Implement this trait to implement a worker
pub trait MessageEvent<P: DeserializeOwned + JsonSchema> {
fn get_name(&self) -> String;
fn get_short_description(&self) -> String;
fn get_description(&self) -> String;
fn get_version(&self) -> semver::Version;
fn init(&mut self) -> Result<()> {
Ok(())
}
#[cfg(feature = "media")]
fn init_process(
&mut self,
_parameters: P,
_format_context: Arc<Mutex<FormatContext>>,
_response_sender: Arc<Mutex<Sender<ProcessResult>>>,
) -> Result<Vec<StreamDescriptor>> {
Ok(vec![])
}
#[cfg(feature = "media")]
fn process_frame(
&mut self,
_job_result: JobResult,
_stream_index: usize,
_frame: ProcessFrame,
) -> Result<ProcessResult> {
Err(MessageError::NotImplemented())
}
#[cfg(feature = "media")]
fn ending_process(&mut self) -> Result<()> {
Ok(())
}
/// Not called when the "media" feature is enabled
fn process(
&self,
_channel: Option<McaiChannel>,
_parameters: P,
_job_result: JobResult,
) -> Result<JobResult>
where
Self: std::marker::Sized,
{
Err(MessageError::NotImplemented())
}
}
/// Function to start a worker
pub fn start_worker<P: DeserializeOwned + JsonSchema, ME: MessageEvent<P>>(mut message_event: ME)
where
ME: std::marker::Sync,
{
let mut builder = Builder::from_default_env();
let amqp_queue = get_amqp_queue();
let instance_id = docker::get_instance_id("/proc/self/cgroup");
let container_id = instance_id.clone();
builder
.format(move |stream, record| {
writeln!(
stream,
"{} - {} - {} - {} - {} - {}",
Utc::now(),
&container_id,
get_amqp_queue(),
record.target().parse::<i64>().unwrap_or(-1),
record.level(),
record.args(),
)
})
.init();
let worker_configuration =
worker::WorkerConfiguration::new(&amqp_queue, &message_event, &instance_id);
if let Err(configuration_error) = worker_configuration {
error!("{:?}", configuration_error);
return;
}
let worker_configuration = worker_configuration.unwrap();
info!(
"Worker: {}, version: {} (MCAI Worker SDK {})",
worker_configuration.get_worker_name(),
worker_configuration.get_worker_version(),
worker_configuration.get_sdk_version(),
);
if let Ok(enabled) = std::env::var("DESCRIBE") {
if enabled == "1" || bool::from_str(&enabled.to_lowercase()).unwrap_or(false) {
match serde_json::to_string_pretty(&worker_configuration) {
Ok(serialized_configuration) => {
println!("{}", serialized_configuration);
return;
}
Err(error) => error!("Could not serialize worker configuration: {:?}", error),
}
}
}
if let Err(message) = message_event.init() {
error!("{:?}", message);
return;
}
let message_event_ref = Rc::new(RefCell::new(message_event));
info!("Worker initialized, ready to receive jobs");
if let Some(source_orders) = get_source_orders() {
warn!("Worker will process source orders");
for source_order in &source_orders {
info!("Start to process order: {:?}", source_order);
let count = None;
let channel = None;
let message_data = fs::read_to_string(source_order).unwrap();
let result = message::parse_and_process_message(
message_event_ref.clone(),
&message_data,
count,
channel,
message::publish_job_progression,
);
match result {
Ok(mut job_result) => | {
job_result.update_execution_duration();
info!(target: &job_result.get_job_id().to_string(), "Process succeeded: {:?}", job_result)
} | conditional_block | |
lib.rs |
//! // fn main() { | //! // }
//! ```
//!
//! ## Runtime configuration
//!
//! ### AMQP connection
//!
//! | Variable | Description |
//! |-----------------|-------------|
//! | `AMQP_HOSTNAME` | IP or host of AMQP server (default: `localhost`) |
//! | `AMQP_PORT` | AMQP server port (default: `5672`) |
//! | `AMQP_TLS` | enable secure connection using AMQPS (default: `false`, enable with `true` or `1` or `TRUE` or `True`) |
//! | `AMQP_USERNAME` | Username used to connect to AMQP server (default: `guest`) |
//! | `AMQP_PASSWORD` | Password used to connect to AMQP server (default: `guest`) |
//! | `AMQP_VHOST` | AMQP virtual host (default: `/`) |
//! | `AMQP_QUEUE` | AMQP queue name used to receive job orders (default: `job_undefined`) |
//!
//! ### Vault connection
//!
//! | Variable | Description |
//! |--------------------|-------------|
//! | `BACKEND_HOSTNAME` | URL used to connect to backend server (default: `http://127.0.0.1:4000/api`) |
//! | `BACKEND_USERNAME` | Username used to connect to backend server |
//! | `BACKEND_PASSWORD` | Password used to connect to backend server |
//!
//! ## Start worker locally
//!
//! MCAI Worker SDK can be launched locally - without RabbitMQ.
//! It can process some message for different purpose (functional tests, message order examples, etc.).
//!
//! To start worker in this mode, setup the environment variable `SOURCE_ORDERS` with path(s) to json orders.
//! It can take multiple orders, joined with `:` on unix platform, `;` on windows os.
//!
//! ### Examples:
//!
//! ```bash
//! RUST_LOG=info SOURCE_ORDERS=./examples/success_order.json:./examples/error_order.json cargo run --example worker
//! ```
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
#[cfg(feature = "media")]
#[macro_use]
extern crate yaserde_derive;
mod channels;
mod config;
mod error;
pub mod job;
pub mod message;
pub mod parameter;
pub mod worker;
/// Re-export from lapin Channel
pub use lapin::Channel;
pub use log::{debug, error, info, trace, warn};
pub use schemars::JsonSchema;
/// Re-export from semver:
pub use semver::Version;
pub use error::{MessageError, Result};
#[cfg(feature = "media")]
pub use message::media::{
audio::AudioFormat,
ebu_ttml_live::{
Body, Div, EbuTtmlLive, Frames, Head, Paragraph, Span, Styling, TimeExpression, TimeUnit, Title,
},
filters::{AudioFilter, GenericFilter, VideoFilter},
video::{RegionOfInterest, Scaling, VideoFormat},
StreamDescriptor,
};
pub use message::publish_job_progression;
pub use parameter::container::ParametersContainer;
pub use parameter::{Parameter, ParameterValue, Requirement};
#[cfg(feature = "media")]
pub use stainless_ffmpeg::{format_context::FormatContext, frame::Frame};
use crate::worker::docker;
use chrono::prelude::*;
use config::*;
use env_logger::Builder;
use futures_executor::LocalPool;
use futures_util::{future::FutureExt, stream::StreamExt, task::LocalSpawnExt};
use job::JobResult;
use lapin::{options::*, types::FieldTable, Connection, ConnectionProperties};
use serde::de::DeserializeOwned;
#[cfg(feature = "media")]
use serde::Serialize;
use std::str::FromStr;
#[cfg(feature = "media")]
use std::sync::{mpsc::Sender, Mutex};
use std::{cell::RefCell, fs, io::Write, rc::Rc, sync::Arc, thread, time};
#[cfg(feature = "media")]
use yaserde::YaSerialize;
/// Exposed Channel type
pub type McaiChannel = Arc<Channel>;
#[cfg(feature = "media")]
#[derive(Debug)]
pub struct ProcessResult {
end_of_process: bool,
json_content: Option<String>,
xml_content: Option<String>,
}
#[cfg(feature = "media")]
impl ProcessResult {
pub fn empty() -> Self {
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: None,
}
}
pub fn end_of_process() -> Self {
ProcessResult {
end_of_process: true,
json_content: None,
xml_content: None,
}
}
pub fn new_json<S: Serialize>(content: S) -> Self {
let content = serde_json::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: Some(content),
xml_content: None,
}
}
pub fn new_xml<Y: YaSerialize>(content: Y) -> Self {
let content = yaserde::ser::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: Some(content),
}
}
}
#[cfg(feature = "media")]
pub enum ProcessFrame {
AudioVideo(Frame),
EbuTtmlLive(Box<EbuTtmlLive>),
Data(Vec<u8>),
}
#[cfg(feature = "media")]
impl ProcessFrame {
pub fn get_pts(&self) -> i64 {
match self {
ProcessFrame::AudioVideo(frame) => frame.get_pts(),
ProcessFrame::EbuTtmlLive(_) | ProcessFrame::Data(_) => {
// improvement: support pts to terminate
0
}
}
}
}
/// # Trait to describe a worker
/// Implement this trait to implement a worker
pub trait MessageEvent<P: DeserializeOwned + JsonSchema> {
fn get_name(&self) -> String;
fn get_short_description(&self) -> String;
fn get_description(&self) -> String;
fn get_version(&self) -> semver::Version;
fn init(&mut self) -> Result<()> {
Ok(())
}
#[cfg(feature = "media")]
fn init_process(
&mut self,
_parameters: P,
_format_context: Arc<Mutex<FormatContext>>,
_response_sender: Arc<Mutex<Sender<ProcessResult>>>,
) -> Result<Vec<StreamDescriptor>> {
Ok(vec![])
}
#[cfg(feature = "media")]
fn process_frame(
&mut self,
_job_result: JobResult,
_stream_index: usize,
_frame: ProcessFrame,
) -> Result<ProcessResult> {
Err(MessageError::NotImplemented())
}
#[cfg(feature = "media")]
fn ending_process(&mut self) -> Result<()> {
Ok(())
}
/// Not called when the "media" feature is enabled
fn process(
&self,
_channel: Option<McaiChannel>,
_parameters: P,
_job_result: JobResult,
) -> Result<JobResult>
where
Self: std::marker::Sized,
{
Err(MessageError::NotImplemented())
}
}
/// Function to start a worker
pub fn start_worker<P: DeserializeOwned + JsonSchema, ME: MessageEvent<P>>(mut message_event: ME)
where
ME: std::marker::Sync,
{
let mut builder = Builder::from_default_env();
let amqp_queue = get_amqp_queue();
let instance_id = docker::get_instance_id("/proc/self/cgroup");
let container_id = instance_id.clone();
builder
.format(move |stream, record| {
writeln!(
stream,
"{} - {} - {} - {} - {} - {}",
Utc::now(),
&container_id,
get_amqp_queue(),
record.target().parse::<i64>().unwrap_or(-1),
record.level(),
record.args(),
)
})
.init();
let worker_configuration =
worker::WorkerConfiguration::new(&amqp_queue, &message_event, &instance_id);
if let Err(configuration_error) = worker_configuration {
error!("{:?}", configuration_error);
return;
}
let worker_configuration = worker_configuration.unwrap();
info!(
"Worker: {}, version: {} (MCAI Worker SDK {})",
worker_configuration.get_worker_name(),
worker_configuration.get_worker_version(),
worker_configuration.get_sdk_version(),
);
if let Ok(enabled) = std::env::var("DESCRIBE") {
if enabled == "1" || bool::from_str(&enabled.to_lowercase()).unwrap_or(false) {
match serde_json::to_string_pretty(&worker_configuration) {
Ok(serialized_configuration) => {
println!("{}", serialized_configuration);
return;
}
Err(error) => error!("Could not serialize worker configuration: {:?}", error),
}
}
}
if let Err(message) = message_event.init() {
error!("{:?}", message);
return;
}
let message_event_ref = Rc::new(RefCell::new(message_event));
info!("Worker initialized, ready to receive jobs");
if let Some(source_orders) = get_source_orders() {
warn!("Worker will process source orders");
for source_order in &source_orders {
info!("Start to process order: {:?}", source_order);
let | //! // mcai_worker_sdk::start_worker(&WORKER_NAME_EVENT); | random_line_split |
lib.rs |
//! // fn main() {
//! // mcai_worker_sdk::start_worker(&WORKER_NAME_EVENT);
//! // }
//! ```
//!
//! ## Runtime configuration
//!
//! ### AMQP connection
//!
//! | Variable | Description |
//! |-----------------|-------------|
//! | `AMQP_HOSTNAME` | IP or host of AMQP server (default: `localhost`) |
//! | `AMQP_PORT` | AMQP server port (default: `5672`) |
//! | `AMQP_TLS` | enable secure connection using AMQPS (default: `false`, enable with `true` or `1` or `TRUE` or `True`) |
//! | `AMQP_USERNAME` | Username used to connect to AMQP server (default: `guest`) |
//! | `AMQP_PASSWORD` | Password used to connect to AMQP server (default: `guest`) |
//! | `AMQP_VHOST` | AMQP virtual host (default: `/`) |
//! | `AMQP_QUEUE` | AMQP queue name used to receive job orders (default: `job_undefined`) |
//!
//! ### Vault connection
//!
//! | Variable | Description |
//! |--------------------|-------------|
//! | `BACKEND_HOSTNAME` | URL used to connect to backend server (default: `http://127.0.0.1:4000/api`) |
//! | `BACKEND_USERNAME` | Username used to connect to backend server |
//! | `BACKEND_PASSWORD` | Password used to connect to backend server |
//!
//! ## Start worker locally
//!
//! MCAI Worker SDK can be launched locally - without RabbitMQ.
//! It can process some message for different purpose (functional tests, message order examples, etc.).
//!
//! To start worker in this mode, setup the environment variable `SOURCE_ORDERS` with path(s) to json orders.
//! It can take multiple orders, joined with `:` on unix platform, `;` on windows os.
//!
//! ### Examples:
//!
//! ```bash
//! RUST_LOG=info SOURCE_ORDERS=./examples/success_order.json:./examples/error_order.json cargo run --example worker
//! ```
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
#[cfg(feature = "media")]
#[macro_use]
extern crate yaserde_derive;
mod channels;
mod config;
mod error;
pub mod job;
pub mod message;
pub mod parameter;
pub mod worker;
/// Re-export from lapin Channel
pub use lapin::Channel;
pub use log::{debug, error, info, trace, warn};
pub use schemars::JsonSchema;
/// Re-export from semver:
pub use semver::Version;
pub use error::{MessageError, Result};
#[cfg(feature = "media")]
pub use message::media::{
audio::AudioFormat,
ebu_ttml_live::{
Body, Div, EbuTtmlLive, Frames, Head, Paragraph, Span, Styling, TimeExpression, TimeUnit, Title,
},
filters::{AudioFilter, GenericFilter, VideoFilter},
video::{RegionOfInterest, Scaling, VideoFormat},
StreamDescriptor,
};
pub use message::publish_job_progression;
pub use parameter::container::ParametersContainer;
pub use parameter::{Parameter, ParameterValue, Requirement};
#[cfg(feature = "media")]
pub use stainless_ffmpeg::{format_context::FormatContext, frame::Frame};
use crate::worker::docker;
use chrono::prelude::*;
use config::*;
use env_logger::Builder;
use futures_executor::LocalPool;
use futures_util::{future::FutureExt, stream::StreamExt, task::LocalSpawnExt};
use job::JobResult;
use lapin::{options::*, types::FieldTable, Connection, ConnectionProperties};
use serde::de::DeserializeOwned;
#[cfg(feature = "media")]
use serde::Serialize;
use std::str::FromStr;
#[cfg(feature = "media")]
use std::sync::{mpsc::Sender, Mutex};
use std::{cell::RefCell, fs, io::Write, rc::Rc, sync::Arc, thread, time};
#[cfg(feature = "media")]
use yaserde::YaSerialize;
/// Exposed Channel type
pub type McaiChannel = Arc<Channel>;
#[cfg(feature = "media")]
#[derive(Debug)]
pub struct ProcessResult {
end_of_process: bool,
json_content: Option<String>,
xml_content: Option<String>,
}
#[cfg(feature = "media")]
impl ProcessResult {
pub fn empty() -> Self {
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: None,
}
}
pub fn end_of_process() -> Self {
ProcessResult {
end_of_process: true,
json_content: None,
xml_content: None,
}
}
pub fn new_json<S: Serialize>(content: S) -> Self {
let content = serde_json::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: Some(content),
xml_content: None,
}
}
pub fn new_xml<Y: YaSerialize>(content: Y) -> Self {
let content = yaserde::ser::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: Some(content),
}
}
}
#[cfg(feature = "media")]
pub enum ProcessFrame {
AudioVideo(Frame),
EbuTtmlLive(Box<EbuTtmlLive>),
Data(Vec<u8>),
}
#[cfg(feature = "media")]
impl ProcessFrame {
pub fn get_pts(&self) -> i64 {
match self {
ProcessFrame::AudioVideo(frame) => frame.get_pts(),
ProcessFrame::EbuTtmlLive(_) | ProcessFrame::Data(_) => {
// improvement: support pts to terminate
0
}
}
}
}
/// # Trait to describe a worker
/// Implement this trait to implement a worker
pub trait MessageEvent<P: DeserializeOwned + JsonSchema> {
fn get_name(&self) -> String;
fn get_short_description(&self) -> String;
fn get_description(&self) -> String;
fn get_version(&self) -> semver::Version;
fn init(&mut self) -> Result<()> {
Ok(())
}
#[cfg(feature = "media")]
fn init_process(
&mut self,
_parameters: P,
_format_context: Arc<Mutex<FormatContext>>,
_response_sender: Arc<Mutex<Sender<ProcessResult>>>,
) -> Result<Vec<StreamDescriptor>> {
Ok(vec![])
}
#[cfg(feature = "media")]
fn | (
&mut self,
_job_result: JobResult,
_stream_index: usize,
_frame: ProcessFrame,
) -> Result<ProcessResult> {
Err(MessageError::NotImplemented())
}
#[cfg(feature = "media")]
fn ending_process(&mut self) -> Result<()> {
Ok(())
}
/// Not called when the "media" feature is enabled
fn process(
&self,
_channel: Option<McaiChannel>,
_parameters: P,
_job_result: JobResult,
) -> Result<JobResult>
where
Self: std::marker::Sized,
{
Err(MessageError::NotImplemented())
}
}
/// Function to start a worker
pub fn start_worker<P: DeserializeOwned + JsonSchema, ME: MessageEvent<P>>(mut message_event: ME)
where
ME: std::marker::Sync,
{
let mut builder = Builder::from_default_env();
let amqp_queue = get_amqp_queue();
let instance_id = docker::get_instance_id("/proc/self/cgroup");
let container_id = instance_id.clone();
builder
.format(move |stream, record| {
writeln!(
stream,
"{} - {} - {} - {} - {} - {}",
Utc::now(),
&container_id,
get_amqp_queue(),
record.target().parse::<i64>().unwrap_or(-1),
record.level(),
record.args(),
)
})
.init();
let worker_configuration =
worker::WorkerConfiguration::new(&amqp_queue, &message_event, &instance_id);
if let Err(configuration_error) = worker_configuration {
error!("{:?}", configuration_error);
return;
}
let worker_configuration = worker_configuration.unwrap();
info!(
"Worker: {}, version: {} (MCAI Worker SDK {})",
worker_configuration.get_worker_name(),
worker_configuration.get_worker_version(),
worker_configuration.get_sdk_version(),
);
if let Ok(enabled) = std::env::var("DESCRIBE") {
if enabled == "1" || bool::from_str(&enabled.to_lowercase()).unwrap_or(false) {
match serde_json::to_string_pretty(&worker_configuration) {
Ok(serialized_configuration) => {
println!("{}", serialized_configuration);
return;
}
Err(error) => error!("Could not serialize worker configuration: {:?}", error),
}
}
}
if let Err(message) = message_event.init() {
error!("{:?}", message);
return;
}
let message_event_ref = Rc::new(RefCell::new(message_event));
info!("Worker initialized, ready to receive jobs");
if let Some(source_orders) = get_source_orders() {
warn!("Worker will process source orders");
for source_order in &source_orders {
info!("Start to process order: {:?}", source_order);
| process_frame | identifier_name |
lib.rs | message::publish_job_progression;
pub use parameter::container::ParametersContainer;
pub use parameter::{Parameter, ParameterValue, Requirement};
#[cfg(feature = "media")]
pub use stainless_ffmpeg::{format_context::FormatContext, frame::Frame};
use crate::worker::docker;
use chrono::prelude::*;
use config::*;
use env_logger::Builder;
use futures_executor::LocalPool;
use futures_util::{future::FutureExt, stream::StreamExt, task::LocalSpawnExt};
use job::JobResult;
use lapin::{options::*, types::FieldTable, Connection, ConnectionProperties};
use serde::de::DeserializeOwned;
#[cfg(feature = "media")]
use serde::Serialize;
use std::str::FromStr;
#[cfg(feature = "media")]
use std::sync::{mpsc::Sender, Mutex};
use std::{cell::RefCell, fs, io::Write, rc::Rc, sync::Arc, thread, time};
#[cfg(feature = "media")]
use yaserde::YaSerialize;
/// Exposed Channel type
pub type McaiChannel = Arc<Channel>;
#[cfg(feature = "media")]
#[derive(Debug)]
pub struct ProcessResult {
end_of_process: bool,
json_content: Option<String>,
xml_content: Option<String>,
}
#[cfg(feature = "media")]
impl ProcessResult {
pub fn empty() -> Self {
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: None,
}
}
pub fn end_of_process() -> Self {
ProcessResult {
end_of_process: true,
json_content: None,
xml_content: None,
}
}
pub fn new_json<S: Serialize>(content: S) -> Self {
let content = serde_json::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: Some(content),
xml_content: None,
}
}
pub fn new_xml<Y: YaSerialize>(content: Y) -> Self {
let content = yaserde::ser::to_string(&content).unwrap();
ProcessResult {
end_of_process: false,
json_content: None,
xml_content: Some(content),
}
}
}
#[cfg(feature = "media")]
pub enum ProcessFrame {
AudioVideo(Frame),
EbuTtmlLive(Box<EbuTtmlLive>),
Data(Vec<u8>),
}
#[cfg(feature = "media")]
impl ProcessFrame {
pub fn get_pts(&self) -> i64 {
match self {
ProcessFrame::AudioVideo(frame) => frame.get_pts(),
ProcessFrame::EbuTtmlLive(_) | ProcessFrame::Data(_) => {
// improvement: support pts to terminate
0
}
}
}
}
/// # Trait to describe a worker
/// Implement this trait to implement a worker
pub trait MessageEvent<P: DeserializeOwned + JsonSchema> {
fn get_name(&self) -> String;
fn get_short_description(&self) -> String;
fn get_description(&self) -> String;
fn get_version(&self) -> semver::Version;
fn init(&mut self) -> Result<()> {
Ok(())
}
#[cfg(feature = "media")]
fn init_process(
&mut self,
_parameters: P,
_format_context: Arc<Mutex<FormatContext>>,
_response_sender: Arc<Mutex<Sender<ProcessResult>>>,
) -> Result<Vec<StreamDescriptor>> {
Ok(vec![])
}
#[cfg(feature = "media")]
fn process_frame(
&mut self,
_job_result: JobResult,
_stream_index: usize,
_frame: ProcessFrame,
) -> Result<ProcessResult> {
Err(MessageError::NotImplemented())
}
#[cfg(feature = "media")]
fn ending_process(&mut self) -> Result<()> {
Ok(())
}
/// Not called when the "media" feature is enabled
fn process(
&self,
_channel: Option<McaiChannel>,
_parameters: P,
_job_result: JobResult,
) -> Result<JobResult>
where
Self: std::marker::Sized,
{
Err(MessageError::NotImplemented())
}
}
/// Function to start a worker
pub fn start_worker<P: DeserializeOwned + JsonSchema, ME: MessageEvent<P>>(mut message_event: ME)
where
ME: std::marker::Sync,
{
let mut builder = Builder::from_default_env();
let amqp_queue = get_amqp_queue();
let instance_id = docker::get_instance_id("/proc/self/cgroup");
let container_id = instance_id.clone();
builder
.format(move |stream, record| {
writeln!(
stream,
"{} - {} - {} - {} - {} - {}",
Utc::now(),
&container_id,
get_amqp_queue(),
record.target().parse::<i64>().unwrap_or(-1),
record.level(),
record.args(),
)
})
.init();
let worker_configuration =
worker::WorkerConfiguration::new(&amqp_queue, &message_event, &instance_id);
if let Err(configuration_error) = worker_configuration {
error!("{:?}", configuration_error);
return;
}
let worker_configuration = worker_configuration.unwrap();
info!(
"Worker: {}, version: {} (MCAI Worker SDK {})",
worker_configuration.get_worker_name(),
worker_configuration.get_worker_version(),
worker_configuration.get_sdk_version(),
);
if let Ok(enabled) = std::env::var("DESCRIBE") {
if enabled == "1" || bool::from_str(&enabled.to_lowercase()).unwrap_or(false) {
match serde_json::to_string_pretty(&worker_configuration) {
Ok(serialized_configuration) => {
println!("{}", serialized_configuration);
return;
}
Err(error) => error!("Could not serialize worker configuration: {:?}", error),
}
}
}
if let Err(message) = message_event.init() {
error!("{:?}", message);
return;
}
let message_event_ref = Rc::new(RefCell::new(message_event));
info!("Worker initialized, ready to receive jobs");
if let Some(source_orders) = get_source_orders() {
warn!("Worker will process source orders");
for source_order in &source_orders {
info!("Start to process order: {:?}", source_order);
let count = None;
let channel = None;
let message_data = fs::read_to_string(source_order).unwrap();
let result = message::parse_and_process_message(
message_event_ref.clone(),
&message_data,
count,
channel,
message::publish_job_progression,
);
match result {
Ok(mut job_result) => {
job_result.update_execution_duration();
info!(target: &job_result.get_job_id().to_string(), "Process succeeded: {:?}", job_result)
}
Err(message) => {
error!("{:?}", message);
}
}
}
return;
}
loop {
let amqp_uri = get_amqp_uri();
let mut executor = LocalPool::new();
let spawner = executor.spawner();
executor.run_until(async {
let conn = Connection::connect_uri(
amqp_uri,
ConnectionProperties::default().with_default_executor(8),
)
.wait()
.unwrap();
info!("Connected");
let channel = Arc::new(channels::declare_consumer_channel(
&conn,
&worker_configuration,
));
let consumer = channel
.clone()
.basic_consume(
&amqp_queue,
"amqp_worker",
BasicConsumeOptions::default(),
FieldTable::default(),
)
.await
.unwrap();
let status_consumer = channel
.clone()
.basic_consume(
&worker_configuration.get_direct_messaging_queue_name(),
"status_amqp_worker",
BasicConsumeOptions::default(),
FieldTable::default(),
)
.await
.unwrap();
let status_response_channel = channel.clone();
let status_worker_configuration = worker_configuration.clone();
let _consumer = spawner.spawn_local(async move {
status_consumer
.for_each(move |delivery| {
let (_channel, delivery) = delivery.expect("error caught in in consumer");
worker::system_information::send_real_time_information(
delivery,
&status_response_channel,
&status_worker_configuration,
)
.map(|_| ())
})
.await
});
info!("Start to consume on queue {:?}", amqp_queue);
let clone_channel = channel.clone();
let message_event = message_event_ref.clone();
consumer
.for_each(move |delivery| {
let (_channel, delivery) = delivery.expect("error caught in in consumer");
message::process_message(message_event.clone(), delivery, clone_channel.clone())
.map(|_| ())
})
.await
});
let sleep_duration = time::Duration::new(1, 0);
thread::sleep(sleep_duration);
info!("Reconnection...");
}
}
#[test]
fn empty_message_event_impl() {
#[derive(Debug)]
struct CustomEvent {}
#[derive(JsonSchema, Deserialize)]
struct CustomParameters {}
impl MessageEvent<CustomParameters> for CustomEvent {
fn get_name(&self) -> String {
"custom".to_string()
}
fn get_short_description(&self) -> String {
"short description".to_string()
}
fn get_description(&self) -> String | {
"long description".to_string()
} | identifier_body | |
esp.py | (len(ridge_list)):
r = ridge_list[i]
L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))
G = af.T
g = bf
if verbose > 0:
print("\nStarting eq set " + str(E_0) + "\nStarting ridges ")
for rr in L:
print(str(rr.E_r))
E.append(E_0)
while len(L) > 0:
rid_fac1 = L[0]
if verbose > 0:
print("\nLooking for neighbors to " + str(rid_fac1.E_0) +
" and " + str(rid_fac1.E_r) + " ..")
E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)
if verbose > 0:
print("found neighbor " + str(E_adj) +
". \n\nLooking for ridges of neighbor..")
ridge_list = ridge(
C, D, b, E_adj, a_adj, b_adj,
abs_tol=abs_tol, verbose=verbose)
if verbose > 0:
print("found " + str(len(ridge_list)) + " ridges\n")
found_org = False
for i in range(len(ridge_list)):
r = ridge_list[i]
E_r = r.E_r
ar = r.ar
br = r.br
found = False
for j in range(len(L)):
rid_fac2 = L[j]
A_r = rid_fac2.E_r
if len(A_r) != len(E_r):
continue
t1 = np.sort(np.array(A_r))
t2 = np.sort(np.array(E_r))
if np.sum(np.abs(t1 - t2)) < abs_tol:
found = True
break
if found:
if verbose > 0:
print("Ridge " + str(E_r) +
" already visited, removing from L..")
if rid_fac2 == rid_fac1:
found_org = True
L.remove(rid_fac2)
else:
if verbose > 0:
print("Adding ridge-facet " + str(E_adj) +
" " + str(E_r) + "")
L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))
if not found_org:
print("Expected ridge " + str(rid_fac1.E_r))
print("but got ridges ")
for rid in ridge_list:
print(rid.E_r)
raise Exception(
"esp: ridge did not return neighboring ridge as expected")
G = np.vstack([G, a_adj])
g = np.hstack([g, b_adj])
E.append(E_adj)
# Restore center
if trans:
g = g + np.dot(G, xc0)
# Return zero rows
for Ef in E:
Ef = nonzerorows[Ef]
return G, g, E
def shoot(C, D, b, maxiter=1000, abs_tol=1e-7):
"""Return random equality set of P that projects on a projection facet.
Returns randomly selected equality set E_0 of P such
that the projection of the equality set is a facet of the projection.
@param C: Matrix defining the polytope Cx+Dy <= b
@param D: Matrix defining the polytope Cx+Dy <= b
@param b: Vector defining the polytope Cx+Dy <= b
@return: `E_0,af,bf`: Equality set and affine hull
"""
d = C.shape[1]
k = D.shape[1]
iter = 0
while True:
if iter > maxiter:
raise Exception(
"shoot: could not find starting equality set")
gamma = np.random.rand(d) - 0.5
c = np.zeros(k + 1)
c[0] = -1
G = np.hstack([np.array([np.dot(C, gamma)]).T, D])
sol = solvers.lpsolve(c, G, b, solver='glpk')
opt_sol = np.array(sol['x']).flatten()
opt_dual = np.array(sol['z']).flatten()
r_opt = opt_sol[0]
y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()
x_opt = r_opt * gamma
E_0 = np.nonzero(
np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0]
DE0 = D[E_0, :]
CE0 = C[E_0, :]
b0 = b[E_0]
if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:
break
iter += 1
af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)
if is_dual_degenerate(c, G, b, None, None, opt_sol,
opt_dual, abs_tol=abs_tol):
E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)
af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])
if len(bf) > 1:
raise Exception("shoot: wrong dimension of affine hull")
return E_0, af.flatten(), bf
def ridge(C, D, b, E, af, bf, abs_tol=1e-7, verbose=0):
"""Compute all ridges of a facet in the projection.
Input:
`C,D,b`: Original polytope data
`E,af,bf`: Equality set and affine hull of a facet in the projection
Output:
`ridge_list`: A list containing all the ridges of
the facet as Ridge objects
"""
d = C.shape[1]
k = D.shape[1]
Er_list = []
q = C.shape[0]
E_c = np.setdiff1d(range(q), E)
# E slices
C_E = C[E, :]
D_E = D[E, :]
b_E = b[E, :]
# E_c slices
C_Ec = C[E_c, :]
D_Ec = D[E_c, :]
b_Ec = b[E_c]
# dots
S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E)
L = np.dot(D_Ec, null_space(D_E))
t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E))
if rank(np.hstack([C_E, D_E])) < k + 1:
if verbose > 1:
print("Doing recursive ESP call")
u, s, v = linalg.svd(np.array([af]), full_matrices=1)
sigma = s[0]
v = v.T * u[0, 0] # Correct sign
V_hat = v[:, [0]]
V_tilde = v[:, range(1, v.shape[1])]
Cnew = np.dot(S, V_tilde)
Dnew = L
bnew = t - np.dot(S, V_hat).flatten() * bf / sigma
Anew = np.hstack([Cnew, Dnew])
xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew)
bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten()
Gt, gt, E_t = esp(
Cnew, Dnew, bnew,
centered=True, abs_tol=abs_tol, verbose=0)
if (len(E_t[0]) == 0) or (len(E_t[1]) == 0):
raise Exception(
"ridge: recursive call did not return any equality sets")
for i in range(len(E_t)):
E_f = E_t[i]
er = np.sort(np.hstack([E, E_c[E_f]]))
ar = np.dot(Gt[i, :], V_tilde.T).flatten()
br0 = gt[i].flatten()
# Make orthogonal to facet
ar = ar - af * np.dot(af.flatten(), ar.flatten())
br = br0 - bf * np.dot(af.flatten(), ar.flatten())
# Normalize and make ridge equation point outwards
norm = np.sqrt(np.sum(ar * ar))
ar = ar * np.sign(br) / norm
br = br * np.sign(br) / norm
# Restore center
br = br + np.dot(Gt[i, :], xc2) / norm
if len(ar) > d:
raise Exception("ridge: wrong length of new ridge!")
Er_list.append(Ridge(er, ar, br))
else:
if verbose > 0:
print("Doing direct calculation of ridges")
X = np.arange(S.shape[0])
while len(X) > 0:
i = X[0]
X = np.setdiff1d(X, i)
if np.linalg.norm(S[i, :]) < abs_tol:
| continue | conditional_block | |
esp.py |
class Ridge_Facet(object):
"""A ridge facet.
Attributes:
- `E_r`: Equality set of a ridge
- `ar,br`: Affine hull of the ridge s.t.
P_{E_f} intersection {x | ar x = br}
defines the ridge, where E_f is the
equality set of the facet.
- `E_0`: Equality set of a facet
- `af,bf`: Affine hull of the facet.
"""
def __init__(self, E_r, ar, br, E_0, af, bf):
self.E_r = E_r
self.ar = ar
self.br = br
self.E_0 = E_0
self.af = af
self.bf = bf
def esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):
"""Project polytope [C D] x <= b onto C coordinates.
Projects the polytope [C D] x <= b onto the
coordinates that correspond to C. The projection of the polytope
P = {[C D]x <= b} where C is M x D and D is M x K is
defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b}
"""
if 'glpk' not in solvers.installed_solvers:
raise Exception(
"projection_esp error:"
" Equality set projection requires `cvxopt.glpk` to run.")
# Remove zero columns and rows
nonzerorows = np.nonzero(
np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0]
nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]
nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]
C = CC[nonzerorows, :].copy()
D = DD[nonzerorows, :].copy()
C = C[:, nonzeroxcols]
D = D[:, nonzeroycols]
b = bb[nonzerorows].copy()
# Make sure origo is inside polytope
if not centered:
xc0, yc0, trans = cheby_center(C, D, b)
if trans:
b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()
else:
b = b
else:
trans = False
d = C.shape[1]
k = D.shape[1]
if verbose > 0:
print("Projecting from dim " + str(d + k) + " to " + str(d))
if k == 0:
# Not projecting
return C, bb, []
if d == 1:
# Projection to 1D
c = np.zeros(d + k)
c[0] = 1
G = np.hstack([C, D])
sol = solvers.lpsolve(c, G, b, solver='glpk')
if sol['status'] != "optimal":
raise Exception(
"esp: projection to 1D is not full-dimensional, "
"LP returned status " + str(sol['status']))
min_sol = np.array(sol['x']).flatten()
min_dual_sol = np.array(sol['z']).flatten()
sol = solvers.lpsolve(-c, G, b, solver='glpk')
if sol['status'] != "optimal":
raise Exception(
"esp: projection to 1D is not full-dimensional, " +
"LP returned status " + str(sol['status']))
max_sol = np.array(sol['x']).flatten()
max_dual_sol = np.array(sol['z']).flatten()
# min, max
x_min = min_sol[0]
x_max = max_sol[0]
y_min = min_sol[range(1, k + 1)]
y_max = max_sol[range(1, k + 1)]
if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):
# Min case, relax constraint a little to avoid infeasibility
E_min = unique_equalityset(
C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol)
else:
E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]
if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):
# Max case, relax constraint a little to avoid infeasibility
E_max = unique_equalityset(
C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol)
else:
E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]
G = np.array([[1.], [-1.]])
g = np.array([x_max, -x_min])
# Relocate
if trans:
g = g + np.dot(G, xc0)
# Return zero cols/rows
E_max = nonzerorows[E_max]
E_min = nonzerorows[E_min]
if verbose > 0:
print(
"Returning projection from dim " +
str(d + k) + " to dim 1 \n")
return G, g, [E_max, E_min]
E = []
L = []
E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)
ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)
for i in range(len(ridge_list)):
r = ridge_list[i]
L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))
G = af.T
g = bf
if verbose > 0:
print("\nStarting eq set " + str(E_0) + "\nStarting ridges ")
for rr in L:
print(str(rr.E_r))
E.append(E_0)
while len(L) > 0:
rid_fac1 = L[0]
if verbose > 0:
print("\nLooking for neighbors to " + str(rid_fac1.E_0) +
" and " + str(rid_fac1.E_r) + " ..")
E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)
if verbose > 0:
print("found neighbor " + str(E_adj) +
". \n\nLooking for ridges of neighbor..")
ridge_list = ridge(
C, D, b, E_adj, a_adj, b_adj,
abs_tol=abs_tol, verbose=verbose)
if verbose > 0:
print("found " + str(len(ridge_list)) + " ridges\n")
found_org = False
for i in range(len(ridge_list)):
r = ridge_list[i]
E_r = r.E_r
ar = r.ar
br = r.br
found = False
for j in range(len(L)):
rid_fac2 = L[j]
A_r = rid_fac2.E_r
if len(A_r) != len(E_r):
continue
t1 = np.sort(np.array(A_r))
t2 = np.sort(np.array(E_r))
if np.sum(np.abs(t1 - t2)) < abs_tol:
found = True
break
if found:
if verbose > 0:
print("Ridge " + str(E_r) +
" already visited, removing from L..")
if rid_fac2 == rid_fac1:
found_org = True
L.remove(rid_fac2)
else:
if verbose > 0:
print("Adding ridge-facet " + str(E_adj) +
" " + str(E_r) + "")
L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))
if not found_org:
print("Expected ridge " + str(rid_fac1.E_r))
print("but got ridges ")
for rid in ridge_list:
print(rid.E_r)
raise Exception(
"esp: ridge did not return neighboring ridge as expected")
G = np.vstack([G, a_adj])
g = np.hstack([g, b_adj])
E.append(E_adj)
# Restore center
if trans:
g = g + np.dot(G, xc0)
# Return zero rows
for Ef in E:
Ef = nonzerorows[Ef]
return G, g, E
def shoot(C, D, b, maxiter=1000, abs_tol=1e-7):
"""Return random equality set of P that projects on a projection facet.
Returns randomly selected equality set E_0 of P such
that the projection of the equality set is a facet of the projection.
@ | self.E_r = E
self.ar = a
self.br = b | identifier_body | |
esp.py | abs_tol)
return E_adj, af_adj, bf_adj
def proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-7):
"""Affine projection.
Compute the set aff = {x | Ce x + De y = be} on the form
aff = ({x | a x = b} intersection {Ce x + De y < be}).
Input: Polytope parameters Ce, De and be
Output: Constants a and b
"""
# Remove zero columns
ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0]
D = De[:, ind]
if D.shape[1] == 0:
a = Ce
b = be
a_n, b_n = normalize(a, b)
if expected_dim is not None:
if expected_dim != b_n.size:
raise Exception(
"proj_aff: wrong dimension calculated in 1")
return a_n.flatten(), b_n
sh = np.shape(D.T)
m = sh[0]
n = sh[1]
nDe = null_space(D.T)
a = np.dot(nDe.T, Ce)
b = np.dot(nDe.T, be)
a_n, b_n = normalize(a, b)
if expected_dim is not None:
if expected_dim != b_n.size:
raise Exception("proj_aff: wrong dimension calculated in 2")
return a_n, b_n
def is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-7):
"""Return `True` if pair of dual problems is dual degenerate.
Checks if the pair of dual problems::
(P): min c'x (D): max h'z + b'y
s.t Gx <= h s.t G'z + A'y = c
Ax = b z <= 0
is dual degenerate, i.e. if (P) has several optimal solutions.
Optimal solutions x* and z* are required.
Input:
`G,h,A,b`: Parameters of (P)
`x_opt`: One optimal solution to (P)
`z_opt`: The optimal solution to (D) corresponding to
_inequality constraints_ in (P)
Output:
`dual`: Boolean indicating whether (P) has many optimal solutions.
"""
D = - G
d = - h.flatten()
mu = - z_opt.flatten() # mu >= 0
# Active constraints
I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0]
# Positive elements in dual opt
J = np.nonzero(mu > abs_tol)[0]
# i, j
i = mu < abs_tol # Zero elements in dual opt
i = i.astype(int)
j = np.zeros(len(mu), dtype=int)
j[I] = 1 # 1 if active
# Indices where active constraints have 0 dual opt
L = np.nonzero(i + j == 2)[0]
# sizes
nI = len(I)
nJ = len(J)
nL = len(L)
# constraints
DI = D[I, :] # Active constraints
DJ = D[J, :] # Constraints with positive lagrange mult
DL = D[L, :] # Active constraints with zero dual opt
dual = 0
if A is None:
test = DI
else:
test = np.vstack([DI, A])
if rank(test) < np.amin(DI.shape):
return True
else:
if len(L) > 0:
if A is None:
Ae = DJ
else:
Ae = np.vstack([DJ, A])
be = np.zeros(Ae.shape[0])
Ai = - DL
bi = np.zeros(nL)
sol = solvers._solve_lp_using_cvxopt(
c= - np.sum(DL, axis=0), G=Ai,
h=bi, A=Ae, b=be)
if sol['status'] == "dual infeasible":
# Dual infeasible -> primal unbounded -> value>epsilon
return True
if sol['primal objective'] > abs_tol:
return True
return False
def unique_equalityset(C, D, b, af, bf, abs_tol=1e-7, verbose=0):
"""Return equality set E with the following property:
P_E = {x | af x = bf} intersection P
where P is the polytope C x + D y < b
The inequalities have to be satisfied with equality everywhere on
the face defined by af and bf.
"""
if D is not None:
A = np.hstack([C, D])
a = np.hstack([af, np.zeros(D.shape[1])])
else:
A = C
a = af
E = []
for i in range(A.shape[0]):
A_i = np.array(A[i, :])
b_i = b[i]
sol = solvers._solve_lp_using_cvxopt(
c=A_i, G=A, h=b,
A=a.T, b=bf)
if sol['status'] != "optimal":
raise Exception(
"unique_equalityset: LP returned status " +
str(sol['status']))
if np.abs(sol['primal objective'] - b_i) < abs_tol:
# Constraint is active everywhere
E.append(i)
if len(E) == 0:
raise Exception("unique_equalityset: empty E")
return np.array(E)
def unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-7):
A = np.hstack([C, D])
E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]
af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)
# stack
ineq = np.hstack([af, np.zeros(D.shape[1])])
G = np.vstack([A, np.vstack([ineq, -ineq])])
h = np.hstack([b, np.hstack([bf, -bf])])
# shape
m = G.shape[0]
n = G.shape[1]
# ht
e = 1e-3
v = np.vstack([np.zeros([1, n]), np.eye(n)]).T
v = v - np.array([np.mean(v, axis=1)]).T
v = v * e
ht = h + np.amin(-np.dot(G, v), axis=1)
# stack
H1 = np.hstack([G, -np.eye(m)])
H2 = np.hstack([G, np.zeros([m, m])])
H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])
H = np.vstack([H1, np.vstack([H2, H3])])
h = np.hstack([ht, np.hstack([h, np.zeros(m)])])
c = np.hstack([np.zeros(n), np.ones(m)])
sol = solvers.lpsolve(c, H, h, solver='glpk')
if not sol['status'] == "optimal":
raise Exception(
"unique_equalityset: LP returned status " +
str(sol['status']))
opt_sol2 = np.array(sol['x']).flatten()
x = opt_sol2[range(n)]
s = opt_sol2[range(n, len(opt_sol2))]
E = np.nonzero(s > abs_tol)[0]
print(E)
E = np.sort(E[np.nonzero(E < C.shape[0])])
# Check that they define the same projection
at, bt = proj_aff(C[E, :], D[E, :], b[E])
if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:
raise Exception("unique_equalityset2: affine hulls not the same")
return E
def cheby_center(C, D, b):
"""Calculate Chebyshev center for the polytope `C x + D y <= b`.
Input:
`C, D, b`: Polytope parameters
Output:
`x_0, y_0`: The chebyshev centra
`boolean`: True if a point could be found, False otherwise.
"""
d = C.shape[1]
k = D.shape[1]
A = np.hstack([C, D])
dim = np.shape(A)[1]
c = - np.r_[np.zeros(dim), 1]
norm2 = np.sqrt(np.sum(A * A, axis=1))
G = np.c_[A, norm2]
sol = solvers.lpsolve(c, G, h=b, solver='glpk')
if sol['status'] == "optimal":
opt = np.array(sol['x'][0:-1]).flatten()
return opt[range(d)], opt[range(d, d + k)], True
else:
return np.zeros(d), np.zeros(k), False
def | normalize | identifier_name | |
esp.py | _tol=abs_tol)
if verbose > 0:
print("found neighbor " + str(E_adj) +
". \n\nLooking for ridges of neighbor..")
ridge_list = ridge(
C, D, b, E_adj, a_adj, b_adj,
abs_tol=abs_tol, verbose=verbose)
if verbose > 0:
print("found " + str(len(ridge_list)) + " ridges\n")
found_org = False
for i in range(len(ridge_list)):
r = ridge_list[i]
E_r = r.E_r
ar = r.ar
br = r.br
found = False
for j in range(len(L)):
rid_fac2 = L[j]
A_r = rid_fac2.E_r
if len(A_r) != len(E_r):
continue
t1 = np.sort(np.array(A_r))
t2 = np.sort(np.array(E_r))
if np.sum(np.abs(t1 - t2)) < abs_tol:
found = True
break
if found:
if verbose > 0:
print("Ridge " + str(E_r) +
" already visited, removing from L..")
if rid_fac2 == rid_fac1:
found_org = True
L.remove(rid_fac2)
else:
if verbose > 0:
print("Adding ridge-facet " + str(E_adj) +
" " + str(E_r) + "")
L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))
if not found_org:
print("Expected ridge " + str(rid_fac1.E_r))
print("but got ridges ")
for rid in ridge_list:
print(rid.E_r)
raise Exception(
"esp: ridge did not return neighboring ridge as expected")
G = np.vstack([G, a_adj])
g = np.hstack([g, b_adj])
E.append(E_adj)
# Restore center
if trans:
g = g + np.dot(G, xc0)
# Return zero rows
for Ef in E:
Ef = nonzerorows[Ef]
return G, g, E
def shoot(C, D, b, maxiter=1000, abs_tol=1e-7):
"""Return random equality set of P that projects on a projection facet.
Returns randomly selected equality set E_0 of P such
that the projection of the equality set is a facet of the projection.
@param C: Matrix defining the polytope Cx+Dy <= b
@param D: Matrix defining the polytope Cx+Dy <= b
@param b: Vector defining the polytope Cx+Dy <= b
@return: `E_0,af,bf`: Equality set and affine hull
"""
d = C.shape[1]
k = D.shape[1]
iter = 0
while True:
if iter > maxiter:
raise Exception(
"shoot: could not find starting equality set")
gamma = np.random.rand(d) - 0.5
c = np.zeros(k + 1)
c[0] = -1
G = np.hstack([np.array([np.dot(C, gamma)]).T, D])
sol = solvers.lpsolve(c, G, b, solver='glpk')
opt_sol = np.array(sol['x']).flatten()
opt_dual = np.array(sol['z']).flatten()
r_opt = opt_sol[0]
y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()
x_opt = r_opt * gamma
E_0 = np.nonzero(
np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0]
DE0 = D[E_0, :]
CE0 = C[E_0, :]
b0 = b[E_0]
if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:
break
iter += 1
af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)
if is_dual_degenerate(c, G, b, None, None, opt_sol,
opt_dual, abs_tol=abs_tol):
E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)
af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])
if len(bf) > 1:
raise Exception("shoot: wrong dimension of affine hull")
return E_0, af.flatten(), bf
def ridge(C, D, b, E, af, bf, abs_tol=1e-7, verbose=0):
"""Compute all ridges of a facet in the projection.
Input:
`C,D,b`: Original polytope data
`E,af,bf`: Equality set and affine hull of a facet in the projection
Output:
`ridge_list`: A list containing all the ridges of
the facet as Ridge objects
"""
d = C.shape[1]
k = D.shape[1]
Er_list = []
q = C.shape[0]
E_c = np.setdiff1d(range(q), E)
# E slices
C_E = C[E, :]
D_E = D[E, :]
b_E = b[E, :]
# E_c slices
C_Ec = C[E_c, :]
D_Ec = D[E_c, :]
b_Ec = b[E_c]
# dots
S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E)
L = np.dot(D_Ec, null_space(D_E))
t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E))
if rank(np.hstack([C_E, D_E])) < k + 1:
if verbose > 1:
print("Doing recursive ESP call")
u, s, v = linalg.svd(np.array([af]), full_matrices=1)
sigma = s[0]
v = v.T * u[0, 0] # Correct sign
V_hat = v[:, [0]]
V_tilde = v[:, range(1, v.shape[1])]
Cnew = np.dot(S, V_tilde)
Dnew = L
bnew = t - np.dot(S, V_hat).flatten() * bf / sigma
Anew = np.hstack([Cnew, Dnew])
xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew)
bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten()
Gt, gt, E_t = esp(
Cnew, Dnew, bnew,
centered=True, abs_tol=abs_tol, verbose=0)
if (len(E_t[0]) == 0) or (len(E_t[1]) == 0):
raise Exception(
"ridge: recursive call did not return any equality sets")
for i in range(len(E_t)):
E_f = E_t[i]
er = np.sort(np.hstack([E, E_c[E_f]]))
ar = np.dot(Gt[i, :], V_tilde.T).flatten()
br0 = gt[i].flatten()
# Make orthogonal to facet
ar = ar - af * np.dot(af.flatten(), ar.flatten())
br = br0 - bf * np.dot(af.flatten(), ar.flatten())
# Normalize and make ridge equation point outwards
norm = np.sqrt(np.sum(ar * ar))
ar = ar * np.sign(br) / norm
br = br * np.sign(br) / norm
# Restore center
br = br + np.dot(Gt[i, :], xc2) / norm
if len(ar) > d:
raise Exception("ridge: wrong length of new ridge!")
Er_list.append(Ridge(er, ar, br))
else:
if verbose > 0:
print("Doing direct calculation of ridges")
X = np.arange(S.shape[0])
while len(X) > 0:
i = X[0]
X = np.setdiff1d(X, i)
if np.linalg.norm(S[i, :]) < abs_tol:
continue
Si = S[i, :]
Si = Si / np.linalg.norm(Si)
if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol:
test1 = null_space(
np.vstack([
np.hstack([af, bf]),
np.hstack([S[i, :], t[i]])]),
nonempty=True)
test2 = np.hstack([S, np.array([t]).T])
test = np.dot(test1.T, test2.T)
test = np.sum(np.abs(test), 0)
Q_i = np.nonzero(test > abs_tol)[0] | Q = np.nonzero(test < abs_tol)[0]
X = np.setdiff1d(X, Q)
# Have Q_i
Sq = S[Q_i, :]
tq = t[Q_i] | random_line_split | |
learnPython.py | 数据量。flag提供有关消息的其他信息,通常可以忽略。
# s.send() 发送TCP数据,将string中的数据发送到连接的套接字。返回值是要发送的字节数量,该数量可能小于string的字节大小。
# s.sendall() 完整发送TCP数据,完整发送TCP数据。将string中的数据发送到连接的套接字,但在返回之前会尝试发送所有数据。成功返回None,失败则抛出异常。
# s.recvfrom() 接收UDP数据,与recv()类似,但返回值是(data,address)。其中data是包含接收数据的字符串,address是发送数据的套接字地址。
# s.sendto() 发送UDP数据,将数据发送到套接字,address是形式为(ipaddr,port)的元组,指定远程地址。返回值是发送的字节数。
# s.close() 关闭套接字
# s.fileno() 返回套接字的文件描述符。
# s.setblocking(flag) 如果flag为0,则将套接字设为非阻塞模式,否则将套接字设为阻塞模式(默认值)。非阻塞模式下,如果调用recv()没有发现任何数据,或send()调用无法立即发送数据,那么将引起socket.error异常。
# 注:send、recv发送的是bytes,用户可以看清的是str
# encode(): 将str编码为指定的bytes
# decode(): 如果我们从网络或磁盘上读取了字节流,那么读到的数据就是bytes -> bytes变为str
# 示例:
# n = s.send(send_data.encode())
# recv_data = s.recv(1024).decode()
# https://www.cnblogs.com/nulige/p/6235531.html?utm_source=itdadao&utm_medium=referral 使用到的模块解析
# 注:socketserver详解
# SocketServer框架式一个基本的socket服务器框架,使用了threading来处理多个客户端的连接,使用seletor模块来处理高并发访问
# SocketServer内部使用IO多路复用以及"多进程"和"多线程",从而实现并发处理客户端请求
# SocketServer提供5个基本服务类:
# -请求处理类
# - BaseServer 基类,不直接对外服务
# - TCPServer:派生类,针对TCP套接字流
# - UnixStreamServer针对UNIX域套接字,不常用
# - UDPServer:派生类,针对UDP数据报套接字
# - UnixDatagramServer针对UNIX域套接字,不常用
# 请求处理类有三种方法:
# - setup() 也就是在handle()之前被调用,主要的作用就是执行处理请求之前的初始化相关的各种工作。默认不会做任何事
# - handle() 做那些所有与处理请求相关的工作。默认也不会做任何事。他有数个实例参数:self.request self.client_address self.server
# - finish() 在handle()方法之后会被调用,他的作用就是执行当处理完请求后的清理工作,默认不会做任何事
#
# 用socketserver创建一个服务的步骤:
# 1、创建一个request handler class(请求处理类),合理选择StreamRequestHandler和DatagramRequestHandler之中的一个作为父类(当然,使用BaseRequestHandler作为父类也可),并重写它的handle()方法。
# 2、实例化一个server class对象,并将服务的地址和之前创建的request handler class传递给它。
# 3、调用server class对象的handle_request() 或 serve_forever()方法来开始处理请求。
#
# 十一、多线程
# Python使用线程有两种方式:函数 或者 用类来包装线程对象
# (1) thread模块的start_new_thread()函数
# 语法:start_new_thread(function,args[,kwargs])
# - function: 线程函数
# - args: 传递给线程函数的参数,必须是tuple类型(元组类型)
# - kwargs: 可选参数
# (2)threading 模块除了包含 _thread 模块中的所有方法外,还提供的其他方法:
# threading.currentThread(): 返回当前的线程变量。
# threading.enumerate(): 返回一个包含正在运行的线程的list。正在运行指线程启动后、结束前,不包括启动前和终止后的线程。
# threading.activeCount(): 返回正在运行的线程数量,与len(threading.enumerate())有相同的结果。
#
# 除了使用方法外,线程模块同样提供了Thread类来处理线程,Thread类提供了以下方法:
# run(): 用以表示线程活动的方法。
# start():启动线程活动。
# join([time]): 等待至线程中止。这阻塞调用线程直至线程的join() 方法被调用中止-正常退出或者抛出未处理的异常-或者是可选的超时发生。
# isAlive(): 返回线程是否活动的。
# getName(): 返回线程名。
# setName(): 设置线程名。
# 十二、Gevent
# Gevent是一个基于greenlet的Python的并发框架,以微线程greenlet为核心,使用了epoll事件监听机制以及诸多其他优化而变得高效。
# gevent每次遇到io操作,需要耗时等待时,会自动跳到下一个协程继续执行
# gevent是第三方库,通过greenlet实现协程的基本思想是:
# - 当一个greenlet遇到IO操作时,比如访问网络,就自动切换到其他的greenlet,等待IO操作完成,再在适当的时候切换回来继续执行。
# - 由于IO操作非常耗时,经常使程序处于等待状态,有了gevent为我们自动切换协程,就保证总有greenlet在运行,而不是等待IO
# - 在gevent里面,上下文切换是通过yielding(退位)来完成 -> 通过调用gevent.sleep(***),让它们yield向对方
# (1) 协程,又称微线程,纤程
# - 协程的特点在于是一个线程执行
# - 最大的优势就是协程极高的执行效率。因为子程序切换不是线程切换,而是由程序自身控制,因此,没有线程切换的开销,和多线程比,线程数量越多,协程的性能优势就越明显。
# - 第二大优势就是不需要多线程的锁机制,因为只有一个线程,也不存在同时写变量冲突,在协程中控制共享资源不加锁,只需要判断状态就好了,所以执行效率比多线程高很多。
# - 因为协程是一个线程执行,那怎么利用多核CPU呢?最简单的方法是多进程+协程,既充分利用多核,又充分发挥协程的高效率,可获得极高的性能
# (2) Greenlets
# 在gevent中用到的主要模式是Greenlet,它是以C扩展模块形式接入Python的轻量级协程
# - 创建Greenlets
# import gevent
# from gevent import Greenlet
#
# def foo(message, n):
# """
# Each thread will be passed the message, and n arguments
# in its initialization.
# """
# gevent.sleep(n)
# print(message)
#
#
# thread1 = Greenlet.spawn(foo, "Hello", 1)
# thread2 = gevent.spawn(foo, "I live!", 2)
# thread3 = gevent.spawn(lambda x: (x+1), 2)
#
# threads = [thread1, thread2, thread3]
#
# # Block until all threads complete.
# gevent.joinall(threads)
# 十三、python调用shell命令
# (1) os模块的system方法
# system方法:会创建子进程执行外部程序
# 示例: os.system("ls")
# (2) os模块popen方法
# popen方法:可以的搭配shell命令的返回值 os.popen(cmd)后,需要在调用read()或者readlines()这两个命令,输出结果
# 示例:os.popen("ls").read()
# (3) commands模块
# 使用commands模块的getoutput方法,这样的方法同popend的差别在于popen返回的是一个文件句柄,而本方法将外部程序的输出结果当作字符串返回。非常多情况下用起来要更方便些。
# 主要方法:
# commands.getstatusoutput(cmd) 返回(status, output)
# commands.getoutput(cmd) 仅仅返回输出结果
# commands.getstatus(file) 返回ls -ld file的运行结果字符串,调用了getoutput。不建议使用此方法
#
# (4) subprocess模块
# 使用subprocess模块能够创建新的进程。能够与新建进程的输入/输出/错误管道连通。并能够获得新建进程运行的返回状态。使用subprocess模块的目的是替代os.system()、os.popen*()、commands.*等旧的函数或模块。
# subprocess.call(["some_command","some_argument","another_argument_or_path"]) | random_line_split | ||
main.rs | 127.0.0.1:8080
> User-Agent: curl/7.64.1
> Accept: * / *
>
< HTTP/1.1 302 Found
< content-length: 51
< location: https://linkedin.com/in/tsauvajon
< date: Wed, 19 May 2021 17:36:49 GMT
<
* Connection #0 to host 127.0.0.1 left intact
redirecting to https://linkedin.com/in/tsauvajon...* Closing connection 0
```
*/
use actix_web::{error, get, post, web, App, HttpResponse, HttpServer, Responder};
use futures::StreamExt;
use std::collections::HashMap;
use std::sync::RwLock;
use url::Url;
const MAX_SIZE: usize = 1_024; // max payload size is 1k
const RANDOM_URL_SIZE: usize = 5; // ramdomly generated URLs are 5 characters long
type Db = web::Data<RwLock<HashMap<String, String>>>;
#[get("/{id}")]
async fn browse(db: web::Data<Db>, web::Path(id): web::Path<String>) -> impl Responder {
match db.read() {
Ok(db) => match db.get(&id) {
None => Err(error::ErrorNotFound("not found")),
Some(url) => Ok(HttpResponse::Found()
.header("Location", url.clone())
.body(format!("redirecting to {}...", url))),
},
Err(err) => {
println!("accessing the db: {}", err);
Err(error::ErrorInternalServerError(err.to_string()))
}
}
}
fn hash(input: &str) -> String {
blake3::hash(input.as_bytes()).to_hex()[..RANDOM_URL_SIZE].to_string()
}
async fn read_target(mut payload: web::Payload) -> Result<String, String> {
let mut body = web::BytesMut::new();
while let Some(chunk) = payload.next().await {
let chunk = chunk.or_else(|err| Err(err.to_string()))?;
// limit max size of in-memory payload
if (body.len() + chunk.len()) > MAX_SIZE {
return Err("overflow".to_string());
}
body.extend_from_slice(&chunk);
}
String::from_utf8(body[..].to_vec())
.or_else(|err| Err(format!("invalid request body: {}", err)))
}
fn create_short_url(
db: web::Data<Db>,
target: String,
id: Option<String>,
) -> Result<String, String> |
#[post("/{id}")]
async fn create_with_id(
db: web::Data<Db>,
payload: web::Payload,
web::Path(id): web::Path<String>,
) -> impl Responder {
let target = match read_target(payload).await {
Ok(target) => target,
Err(err) => return Err(error::ErrorBadRequest(err)),
};
create_short_url(db, target, Some(id)).or_else(|err| Err(error::ErrorBadRequest(err)))
}
#[post("/")]
async fn create_random(db: web::Data<Db>, payload: web::Payload) -> impl Responder {
let target = match read_target(payload).await {
Ok(target) => target,
Err(err) => return Err(error::ErrorBadRequest(err)),
};
create_short_url(db, target, None).or_else(|err| Err(error::ErrorBadRequest(err)))
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
HttpServer::new(move || {
App::new()
.data(db.clone())
.service(browse)
.service(create_random)
.service(create_with_id)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_hash() {
assert_eq!("4cca4", hash("something"));
assert_eq!("284a1", hash("something else"));
}
#[test]
fn test_create_short_malformed_url() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "this is not a valid URL".to_string();
let id = Some("hello".to_string());
assert_eq!(
Err("malformed URL: relative URL without a base".to_string()),
create_short_url(web::Data::new(db), target, id)
);
}
#[test]
fn test_create_short_url() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "https://google.com".to_string();
let id = "hello".to_string();
create_short_url(web::Data::new(db.clone()), target.clone(), Some(id.clone())).unwrap();
let db = db.read().unwrap();
let got = db.get(&id).unwrap();
assert_eq!(&target, got);
}
#[test]
fn test_create_short_url_hashed_id() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "https://google.com";
create_short_url(web::Data::new(db.clone()), target.to_string(), None).unwrap();
let id = hash(target);
let db = db.read().unwrap();
let got = db.get(&id).unwrap();
assert_eq!(&target, got);
}
#[test]
fn test_create_short_url_already_exists() {
let id = "hello".to_string();
let mut db: HashMap<String, String> = HashMap::new();
db.insert(id.clone(), "some existing value".to_string());
let db: Db = web::Data::new(RwLock::new(db));
let target = "https://google.com".to_string();
assert_eq!(
Err("already registered".to_string()),
create_short_url(web::Data::new(db), target, Some(id))
);
}
}
#[cfg(test)]
mod integration_tests {
use super::*;
use actix_web::{
body::Body,
http::{HeaderValue, StatusCode},
test,
};
// create a new custom shorturl
#[actix_rt::test]
async fn integration_test_create_custom_shortened_url() {
let req = test::TestRequest::post()
.uri("/hello")
.set_payload("https://hello.world")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db.clone()).service(create_with_id)).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let db = db.read().unwrap();
assert_eq!(db.get("hello"), Some(&"https://hello.world".to_string()));
assert_eq!(db.get("wwerwewrew"), None);
}
// create a new random shorturl
#[actix_rt::test]
async fn integration_test_create_random_shortened_url() {
let req = test::TestRequest::post()
.uri("/")
.set_payload("https://hello.world")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db.clone()).service(create_random)).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let db = db.read().unwrap();
assert_eq!(
db.get(&hash("https://hello.world")),
Some(&"https://hello.world".to_string())
);
assert_eq!(db.get("wwerwewrew"), None);
}
// follow an existing shorturl
#[actix_rt::test]
async fn integration_test_use_shortened_url() {
let req = test::TestRequest::get().uri("/hi").to_request();
let mut db: HashMap<String, String> = HashMap::new();
db.insert("hi".into(), "https://linkedin.com/in/tsauvajon".into());
let mut app = test::init_service(
App::new()
.data(web::Data::new(RwLock::new(db)))
.service(browse),
)
.await;
let mut resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::FOUND);
let body = resp.take_body();
let body = body.as_ref().unwrap();
assert_eq!(
&Body::from("redirecting to https://linkedin.com/in/tsauvaj | {
if let Err(err) = Url::parse(&target) {
return Err(format!("malformed URL: {}", err));
};
let id = match id {
Some(id) => id,
None => hash(&target),
};
let mut db = db.write().unwrap();
if db.contains_key(&id) {
Err("already registered".to_string())
} else {
db.insert(id.clone(), target.clone());
Ok(format!("/{} now redirects to {}", id, target))
}
} | identifier_body |
main.rs | Host: 127.0.0.1:8080
> User-Agent: curl/7.64.1
> Accept: * / *
>
< HTTP/1.1 302 Found
< content-length: 51
< location: https://linkedin.com/in/tsauvajon
< date: Wed, 19 May 2021 17:36:49 GMT
<
* Connection #0 to host 127.0.0.1 left intact | */
use actix_web::{error, get, post, web, App, HttpResponse, HttpServer, Responder};
use futures::StreamExt;
use std::collections::HashMap;
use std::sync::RwLock;
use url::Url;
const MAX_SIZE: usize = 1_024; // max payload size is 1k
const RANDOM_URL_SIZE: usize = 5; // ramdomly generated URLs are 5 characters long
type Db = web::Data<RwLock<HashMap<String, String>>>;
#[get("/{id}")]
async fn browse(db: web::Data<Db>, web::Path(id): web::Path<String>) -> impl Responder {
match db.read() {
Ok(db) => match db.get(&id) {
None => Err(error::ErrorNotFound("not found")),
Some(url) => Ok(HttpResponse::Found()
.header("Location", url.clone())
.body(format!("redirecting to {}...", url))),
},
Err(err) => {
println!("accessing the db: {}", err);
Err(error::ErrorInternalServerError(err.to_string()))
}
}
}
fn hash(input: &str) -> String {
blake3::hash(input.as_bytes()).to_hex()[..RANDOM_URL_SIZE].to_string()
}
async fn read_target(mut payload: web::Payload) -> Result<String, String> {
let mut body = web::BytesMut::new();
while let Some(chunk) = payload.next().await {
let chunk = chunk.or_else(|err| Err(err.to_string()))?;
// limit max size of in-memory payload
if (body.len() + chunk.len()) > MAX_SIZE {
return Err("overflow".to_string());
}
body.extend_from_slice(&chunk);
}
String::from_utf8(body[..].to_vec())
.or_else(|err| Err(format!("invalid request body: {}", err)))
}
fn create_short_url(
db: web::Data<Db>,
target: String,
id: Option<String>,
) -> Result<String, String> {
if let Err(err) = Url::parse(&target) {
return Err(format!("malformed URL: {}", err));
};
let id = match id {
Some(id) => id,
None => hash(&target),
};
let mut db = db.write().unwrap();
if db.contains_key(&id) {
Err("already registered".to_string())
} else {
db.insert(id.clone(), target.clone());
Ok(format!("/{} now redirects to {}", id, target))
}
}
#[post("/{id}")]
async fn create_with_id(
db: web::Data<Db>,
payload: web::Payload,
web::Path(id): web::Path<String>,
) -> impl Responder {
let target = match read_target(payload).await {
Ok(target) => target,
Err(err) => return Err(error::ErrorBadRequest(err)),
};
create_short_url(db, target, Some(id)).or_else(|err| Err(error::ErrorBadRequest(err)))
}
#[post("/")]
async fn create_random(db: web::Data<Db>, payload: web::Payload) -> impl Responder {
let target = match read_target(payload).await {
Ok(target) => target,
Err(err) => return Err(error::ErrorBadRequest(err)),
};
create_short_url(db, target, None).or_else(|err| Err(error::ErrorBadRequest(err)))
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
HttpServer::new(move || {
App::new()
.data(db.clone())
.service(browse)
.service(create_random)
.service(create_with_id)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_hash() {
assert_eq!("4cca4", hash("something"));
assert_eq!("284a1", hash("something else"));
}
#[test]
fn test_create_short_malformed_url() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "this is not a valid URL".to_string();
let id = Some("hello".to_string());
assert_eq!(
Err("malformed URL: relative URL without a base".to_string()),
create_short_url(web::Data::new(db), target, id)
);
}
#[test]
fn test_create_short_url() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "https://google.com".to_string();
let id = "hello".to_string();
create_short_url(web::Data::new(db.clone()), target.clone(), Some(id.clone())).unwrap();
let db = db.read().unwrap();
let got = db.get(&id).unwrap();
assert_eq!(&target, got);
}
#[test]
fn test_create_short_url_hashed_id() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "https://google.com";
create_short_url(web::Data::new(db.clone()), target.to_string(), None).unwrap();
let id = hash(target);
let db = db.read().unwrap();
let got = db.get(&id).unwrap();
assert_eq!(&target, got);
}
#[test]
fn test_create_short_url_already_exists() {
let id = "hello".to_string();
let mut db: HashMap<String, String> = HashMap::new();
db.insert(id.clone(), "some existing value".to_string());
let db: Db = web::Data::new(RwLock::new(db));
let target = "https://google.com".to_string();
assert_eq!(
Err("already registered".to_string()),
create_short_url(web::Data::new(db), target, Some(id))
);
}
}
#[cfg(test)]
mod integration_tests {
use super::*;
use actix_web::{
body::Body,
http::{HeaderValue, StatusCode},
test,
};
// create a new custom shorturl
#[actix_rt::test]
async fn integration_test_create_custom_shortened_url() {
let req = test::TestRequest::post()
.uri("/hello")
.set_payload("https://hello.world")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db.clone()).service(create_with_id)).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let db = db.read().unwrap();
assert_eq!(db.get("hello"), Some(&"https://hello.world".to_string()));
assert_eq!(db.get("wwerwewrew"), None);
}
// create a new random shorturl
#[actix_rt::test]
async fn integration_test_create_random_shortened_url() {
let req = test::TestRequest::post()
.uri("/")
.set_payload("https://hello.world")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db.clone()).service(create_random)).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let db = db.read().unwrap();
assert_eq!(
db.get(&hash("https://hello.world")),
Some(&"https://hello.world".to_string())
);
assert_eq!(db.get("wwerwewrew"), None);
}
// follow an existing shorturl
#[actix_rt::test]
async fn integration_test_use_shortened_url() {
let req = test::TestRequest::get().uri("/hi").to_request();
let mut db: HashMap<String, String> = HashMap::new();
db.insert("hi".into(), "https://linkedin.com/in/tsauvajon".into());
let mut app = test::init_service(
App::new()
.data(web::Data::new(RwLock::new(db)))
.service(browse),
)
.await;
let mut resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::FOUND);
let body = resp.take_body();
let body = body.as_ref().unwrap();
assert_eq!(
&Body::from("redirecting to https://linkedin.com/in/tsauvajon... | redirecting to https://linkedin.com/in/tsauvajon...* Closing connection 0
``` | random_line_split |
main.rs | : 127.0.0.1:8080
> User-Agent: curl/7.64.1
> Accept: * / *
>
< HTTP/1.1 302 Found
< content-length: 51
< location: https://linkedin.com/in/tsauvajon
< date: Wed, 19 May 2021 17:36:49 GMT
<
* Connection #0 to host 127.0.0.1 left intact
redirecting to https://linkedin.com/in/tsauvajon...* Closing connection 0
```
*/
use actix_web::{error, get, post, web, App, HttpResponse, HttpServer, Responder};
use futures::StreamExt;
use std::collections::HashMap;
use std::sync::RwLock;
use url::Url;
const MAX_SIZE: usize = 1_024; // max payload size is 1k
const RANDOM_URL_SIZE: usize = 5; // ramdomly generated URLs are 5 characters long
type Db = web::Data<RwLock<HashMap<String, String>>>;
#[get("/{id}")]
async fn | (db: web::Data<Db>, web::Path(id): web::Path<String>) -> impl Responder {
match db.read() {
Ok(db) => match db.get(&id) {
None => Err(error::ErrorNotFound("not found")),
Some(url) => Ok(HttpResponse::Found()
.header("Location", url.clone())
.body(format!("redirecting to {}...", url))),
},
Err(err) => {
println!("accessing the db: {}", err);
Err(error::ErrorInternalServerError(err.to_string()))
}
}
}
fn hash(input: &str) -> String {
blake3::hash(input.as_bytes()).to_hex()[..RANDOM_URL_SIZE].to_string()
}
async fn read_target(mut payload: web::Payload) -> Result<String, String> {
let mut body = web::BytesMut::new();
while let Some(chunk) = payload.next().await {
let chunk = chunk.or_else(|err| Err(err.to_string()))?;
// limit max size of in-memory payload
if (body.len() + chunk.len()) > MAX_SIZE {
return Err("overflow".to_string());
}
body.extend_from_slice(&chunk);
}
String::from_utf8(body[..].to_vec())
.or_else(|err| Err(format!("invalid request body: {}", err)))
}
fn create_short_url(
db: web::Data<Db>,
target: String,
id: Option<String>,
) -> Result<String, String> {
if let Err(err) = Url::parse(&target) {
return Err(format!("malformed URL: {}", err));
};
let id = match id {
Some(id) => id,
None => hash(&target),
};
let mut db = db.write().unwrap();
if db.contains_key(&id) {
Err("already registered".to_string())
} else {
db.insert(id.clone(), target.clone());
Ok(format!("/{} now redirects to {}", id, target))
}
}
#[post("/{id}")]
async fn create_with_id(
db: web::Data<Db>,
payload: web::Payload,
web::Path(id): web::Path<String>,
) -> impl Responder {
let target = match read_target(payload).await {
Ok(target) => target,
Err(err) => return Err(error::ErrorBadRequest(err)),
};
create_short_url(db, target, Some(id)).or_else(|err| Err(error::ErrorBadRequest(err)))
}
#[post("/")]
async fn create_random(db: web::Data<Db>, payload: web::Payload) -> impl Responder {
let target = match read_target(payload).await {
Ok(target) => target,
Err(err) => return Err(error::ErrorBadRequest(err)),
};
create_short_url(db, target, None).or_else(|err| Err(error::ErrorBadRequest(err)))
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
HttpServer::new(move || {
App::new()
.data(db.clone())
.service(browse)
.service(create_random)
.service(create_with_id)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_hash() {
assert_eq!("4cca4", hash("something"));
assert_eq!("284a1", hash("something else"));
}
#[test]
fn test_create_short_malformed_url() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "this is not a valid URL".to_string();
let id = Some("hello".to_string());
assert_eq!(
Err("malformed URL: relative URL without a base".to_string()),
create_short_url(web::Data::new(db), target, id)
);
}
#[test]
fn test_create_short_url() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "https://google.com".to_string();
let id = "hello".to_string();
create_short_url(web::Data::new(db.clone()), target.clone(), Some(id.clone())).unwrap();
let db = db.read().unwrap();
let got = db.get(&id).unwrap();
assert_eq!(&target, got);
}
#[test]
fn test_create_short_url_hashed_id() {
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let target = "https://google.com";
create_short_url(web::Data::new(db.clone()), target.to_string(), None).unwrap();
let id = hash(target);
let db = db.read().unwrap();
let got = db.get(&id).unwrap();
assert_eq!(&target, got);
}
#[test]
fn test_create_short_url_already_exists() {
let id = "hello".to_string();
let mut db: HashMap<String, String> = HashMap::new();
db.insert(id.clone(), "some existing value".to_string());
let db: Db = web::Data::new(RwLock::new(db));
let target = "https://google.com".to_string();
assert_eq!(
Err("already registered".to_string()),
create_short_url(web::Data::new(db), target, Some(id))
);
}
}
#[cfg(test)]
mod integration_tests {
use super::*;
use actix_web::{
body::Body,
http::{HeaderValue, StatusCode},
test,
};
// create a new custom shorturl
#[actix_rt::test]
async fn integration_test_create_custom_shortened_url() {
let req = test::TestRequest::post()
.uri("/hello")
.set_payload("https://hello.world")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db.clone()).service(create_with_id)).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let db = db.read().unwrap();
assert_eq!(db.get("hello"), Some(&"https://hello.world".to_string()));
assert_eq!(db.get("wwerwewrew"), None);
}
// create a new random shorturl
#[actix_rt::test]
async fn integration_test_create_random_shortened_url() {
let req = test::TestRequest::post()
.uri("/")
.set_payload("https://hello.world")
.to_request();
let db: Db = web::Data::new(RwLock::new(HashMap::new()));
let mut app = test::init_service(App::new().data(db.clone()).service(create_random)).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let db = db.read().unwrap();
assert_eq!(
db.get(&hash("https://hello.world")),
Some(&"https://hello.world".to_string())
);
assert_eq!(db.get("wwerwewrew"), None);
}
// follow an existing shorturl
#[actix_rt::test]
async fn integration_test_use_shortened_url() {
let req = test::TestRequest::get().uri("/hi").to_request();
let mut db: HashMap<String, String> = HashMap::new();
db.insert("hi".into(), "https://linkedin.com/in/tsauvajon".into());
let mut app = test::init_service(
App::new()
.data(web::Data::new(RwLock::new(db)))
.service(browse),
)
.await;
let mut resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::FOUND);
let body = resp.take_body();
let body = body.as_ref().unwrap();
assert_eq!(
&Body::from("redirecting to https://linkedin.com/in/tsauvaj | browse | identifier_name |
gateway.go | ListenerProtocol: listenerType,
Env: env,
Node: node,
ProxyInstances: workloadInstances,
Push: push,
ServiceInstance: si,
Port: &model.Port{
Name: servers[0].Port.Name,
Port: int(portNumber),
Protocol: protocol,
},
}
if err = p.OnOutboundListener(params, mutable); err != nil {
log.Warna("buildGatewayListeners: failed to build listener for gateway: ", err.Error())
}
}
// Filters are serialized one time into an opaque struct once we have the complete list.
if err = marshalFilters(mutable.Listener, opts, mutable.FilterChains); err != nil {
errs = multierror.Append(errs, fmt.Errorf("gateway omitting listener %q due to: %v", mutable.Listener.Name, err.Error()))
continue
}
if err = mutable.Listener.Validate(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("gateway listener %s validation failed: %v", mutable.Listener.Name, err.Error()))
continue
}
if log.DebugEnabled() {
log.Debugf("buildGatewayListeners: constructed listener with %d filter chains:\n%v",
len(mutable.Listener.FilterChains), mutable.Listener) | listeners = append(listeners, mutable.Listener)
}
// We'll try to return any listeners we successfully marshaled; if we have none, we'll emit the error we built up
err = errs.ErrorOrNil()
if err != nil {
// we have some listeners to return, but we also have some errors; log them
log.Info(err.Error())
}
if len(listeners) == 0 {
log.Error("buildGatewayListeners: Have zero listeners")
return []*xdsapi.Listener{}, nil
}
validatedListeners := make([]*xdsapi.Listener, 0, len(mergedGateway.Servers))
for _, l := range listeners {
if err := l.Validate(); err != nil {
log.Warnf("buildGatewayListeners: error validating listener %s: %v.. Skipping.", l.Name, err)
continue
}
validatedListeners = append(validatedListeners, l)
}
return validatedListeners, nil
}
func (configgen *ConfigGeneratorImpl) buildGatewayHTTPRouteConfig(env *model.Environment, node *model.Proxy, push *model.PushContext,
proxyInstances []*model.ServiceInstance, services []*model.Service, routeName string) (*xdsapi.RouteConfiguration, error) {
// collect workload labels
var workloadLabels model.LabelsCollection
for _, w := range proxyInstances {
workloadLabels = append(workloadLabels, w.Labels)
}
gateways := env.Gateways(workloadLabels)
if len(gateways) == 0 {
log.Debuga("buildGatewayRoutes: no gateways for router", node.ID)
return nil, nil
}
merged := model.MergeGateways(gateways...)
log.Debugf("buildGatewayRoutes: gateways after merging: %v", merged)
// make sure that there is some server listening on this port
if _, ok := merged.RDSRouteConfigNames[routeName]; !ok {
log.Errorf("buildGatewayRoutes: could not find server for routeName %s, have %v", routeName, merged.RDSRouteConfigNames)
return nil, fmt.Errorf("buildGatewayRoutes: could not find server for routeName %s, have %v", routeName, merged.RDSRouteConfigNames)
}
servers := merged.RDSRouteConfigNames[routeName]
nameToServiceMap := make(map[model.Hostname]*model.Service, len(services))
for _, svc := range services {
nameToServiceMap[svc.Hostname] = svc
}
gatewayHosts := make(map[model.Hostname]bool)
tlsRedirect := make(map[model.Hostname]bool)
for _, server := range servers {
for _, host := range server.Hosts {
gatewayHosts[model.Hostname(host)] = true
if server.Tls != nil && server.Tls.HttpsRedirect {
tlsRedirect[model.Hostname(host)] = true
}
}
}
port := int(servers[0].Port.Number)
// NOTE: WE DO NOT SUPPORT two gateways on same workload binding to same virtual service
virtualServices := push.VirtualServices(merged.Names)
vHostDedupMap := make(map[string]*route.VirtualHost)
for _, v := range virtualServices {
vs := v.Spec.(*networking.VirtualService)
matchingHosts := pickMatchingGatewayHosts(gatewayHosts, vs.Hosts)
if len(matchingHosts) == 0 {
log.Debugf("%s omitting virtual service %q because its hosts don't match gateways %v server %d", node.ID, v.Name, gateways, port)
continue
}
routes, err := istio_route.BuildHTTPRoutesForVirtualService(node, push, v, nameToServiceMap, port, nil, merged.Names)
if err != nil {
log.Debugf("%s omitting routes for service %v due to error: %v", node.ID, v, err)
continue
}
for vsvcHost, gatewayHost := range matchingHosts {
if currentVhost, exists := vHostDedupMap[vsvcHost]; exists {
currentVhost.Routes = istio_route.CombineVHostRoutes(currentVhost.Routes, routes)
} else {
newVhost := &route.VirtualHost{
Name: fmt.Sprintf("%s:%d", vsvcHost, port),
Domains: []string{vsvcHost, fmt.Sprintf("%s:%d", vsvcHost, port)},
Routes: routes,
}
if tlsRedirect[gatewayHost] {
newVhost.RequireTls = route.VirtualHost_ALL
}
vHostDedupMap[vsvcHost] = newVhost
}
}
}
virtualHosts := make([]route.VirtualHost, 0, len(virtualServices))
if len(vHostDedupMap) == 0 {
log.Warnf("constructed http route config for port %d with no vhosts; Setting up a default 404 vhost", port)
virtualHosts = append(virtualHosts, route.VirtualHost{
Name: fmt.Sprintf("blackhole:%d", port),
Domains: []string{"*"},
Routes: []route.Route{
{
Match: route.RouteMatch{
PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"},
},
Action: &route.Route_DirectResponse{
DirectResponse: &route.DirectResponseAction{
Status: 404,
},
},
},
},
})
} else {
for _, v := range vHostDedupMap {
virtualHosts = append(virtualHosts, *v)
}
}
util.SortVirtualHosts(virtualHosts)
routeCfg := &xdsapi.RouteConfiguration{
Name: routeName,
VirtualHosts: virtualHosts,
ValidateClusters: boolFalse,
}
// call plugins
for _, p := range configgen.Plugins {
in := &plugin.InputParams{
ListenerProtocol: plugin.ListenerProtocolHTTP,
Env: env,
Node: node,
Push: push,
}
p.OnOutboundRouteConfiguration(in, routeCfg)
}
return routeCfg, nil
}
// to process HTTP and HTTPS servers along with virtualService.HTTP rules
func (configgen *ConfigGeneratorImpl) createGatewayHTTPFilterChainOpts(
node *model.Proxy, env *model.Environment, push *model.PushContext, servers []*networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts {
httpListeners := make([]*filterChainOpts, 0, len(servers))
// Are we processing plaintext servers or HTTPS servers?
// If plain text, we have to combine all servers into a single listener
if model.ParseProtocol(servers[0].Port.Protocol).IsHTTP() {
rdsName := model.GatewayRDSRouteName(servers[0])
o := &filterChainOpts{
// This works because we validate that only HTTPS servers can have same port but still different port names
// and that no two non-HTTPS servers can be on same port or share port names.
// Validation is done per gateway and also during merging
sniHosts: nil,
tlsContext: nil,
httpOpts: &httpListenerOpts{
rds: rdsName,
useRemoteAddress: true,
direction: http_conn.EGRESS, // viewed as from gateway to internal
connectionManager: &http_conn.HttpConnectionManager{
// Forward client cert if connection is mTLS
ForwardClientCertDetails: http_conn.SANITIZE_SET,
SetCurrentClientCertDetails: &http_conn.HttpConnectionManager_SetCurrentClientCertDetails{
Subject: &types.BoolValue{Value: true},
Uri: true,
Dns: true,
},
},
},
}
httpListeners = append(httpListeners, o)
} else {
// Build a filter chain for each HTTPS server
// We know that this is a HTTPS server because this function is called only | } | random_line_split |
gateway.go | Protocol: listenerType,
Env: env,
Node: node,
ProxyInstances: workloadInstances,
Push: push,
ServiceInstance: si,
Port: &model.Port{
Name: servers[0].Port.Name,
Port: int(portNumber),
Protocol: protocol,
},
}
if err = p.OnOutboundListener(params, mutable); err != nil {
log.Warna("buildGatewayListeners: failed to build listener for gateway: ", err.Error())
}
}
// Filters are serialized one time into an opaque struct once we have the complete list.
if err = marshalFilters(mutable.Listener, opts, mutable.FilterChains); err != nil {
errs = multierror.Append(errs, fmt.Errorf("gateway omitting listener %q due to: %v", mutable.Listener.Name, err.Error()))
continue
}
if err = mutable.Listener.Validate(); err != nil {
errs = multierror.Append(errs, fmt.Errorf("gateway listener %s validation failed: %v", mutable.Listener.Name, err.Error()))
continue
}
if log.DebugEnabled() {
log.Debugf("buildGatewayListeners: constructed listener with %d filter chains:\n%v",
len(mutable.Listener.FilterChains), mutable.Listener)
}
listeners = append(listeners, mutable.Listener)
}
// We'll try to return any listeners we successfully marshaled; if we have none, we'll emit the error we built up
err = errs.ErrorOrNil()
if err != nil {
// we have some listeners to return, but we also have some errors; log them
log.Info(err.Error())
}
if len(listeners) == 0 {
log.Error("buildGatewayListeners: Have zero listeners")
return []*xdsapi.Listener{}, nil
}
validatedListeners := make([]*xdsapi.Listener, 0, len(mergedGateway.Servers))
for _, l := range listeners {
if err := l.Validate(); err != nil {
log.Warnf("buildGatewayListeners: error validating listener %s: %v.. Skipping.", l.Name, err)
continue
}
validatedListeners = append(validatedListeners, l)
}
return validatedListeners, nil
}
func (configgen *ConfigGeneratorImpl) buildGatewayHTTPRouteConfig(env *model.Environment, node *model.Proxy, push *model.PushContext,
proxyInstances []*model.ServiceInstance, services []*model.Service, routeName string) (*xdsapi.RouteConfiguration, error) {
// collect workload labels
var workloadLabels model.LabelsCollection
for _, w := range proxyInstances {
workloadLabels = append(workloadLabels, w.Labels)
}
gateways := env.Gateways(workloadLabels)
if len(gateways) == 0 {
log.Debuga("buildGatewayRoutes: no gateways for router", node.ID)
return nil, nil
}
merged := model.MergeGateways(gateways...)
log.Debugf("buildGatewayRoutes: gateways after merging: %v", merged)
// make sure that there is some server listening on this port
if _, ok := merged.RDSRouteConfigNames[routeName]; !ok {
log.Errorf("buildGatewayRoutes: could not find server for routeName %s, have %v", routeName, merged.RDSRouteConfigNames)
return nil, fmt.Errorf("buildGatewayRoutes: could not find server for routeName %s, have %v", routeName, merged.RDSRouteConfigNames)
}
servers := merged.RDSRouteConfigNames[routeName]
nameToServiceMap := make(map[model.Hostname]*model.Service, len(services))
for _, svc := range services {
nameToServiceMap[svc.Hostname] = svc
}
gatewayHosts := make(map[model.Hostname]bool)
tlsRedirect := make(map[model.Hostname]bool)
for _, server := range servers {
for _, host := range server.Hosts {
gatewayHosts[model.Hostname(host)] = true
if server.Tls != nil && server.Tls.HttpsRedirect {
tlsRedirect[model.Hostname(host)] = true
}
}
}
port := int(servers[0].Port.Number)
// NOTE: WE DO NOT SUPPORT two gateways on same workload binding to same virtual service
virtualServices := push.VirtualServices(merged.Names)
vHostDedupMap := make(map[string]*route.VirtualHost)
for _, v := range virtualServices {
vs := v.Spec.(*networking.VirtualService)
matchingHosts := pickMatchingGatewayHosts(gatewayHosts, vs.Hosts)
if len(matchingHosts) == 0 {
log.Debugf("%s omitting virtual service %q because its hosts don't match gateways %v server %d", node.ID, v.Name, gateways, port)
continue
}
routes, err := istio_route.BuildHTTPRoutesForVirtualService(node, push, v, nameToServiceMap, port, nil, merged.Names)
if err != nil {
log.Debugf("%s omitting routes for service %v due to error: %v", node.ID, v, err)
continue
}
for vsvcHost, gatewayHost := range matchingHosts {
if currentVhost, exists := vHostDedupMap[vsvcHost]; exists {
currentVhost.Routes = istio_route.CombineVHostRoutes(currentVhost.Routes, routes)
} else {
newVhost := &route.VirtualHost{
Name: fmt.Sprintf("%s:%d", vsvcHost, port),
Domains: []string{vsvcHost, fmt.Sprintf("%s:%d", vsvcHost, port)},
Routes: routes,
}
if tlsRedirect[gatewayHost] {
newVhost.RequireTls = route.VirtualHost_ALL
}
vHostDedupMap[vsvcHost] = newVhost
}
}
}
virtualHosts := make([]route.VirtualHost, 0, len(virtualServices))
if len(vHostDedupMap) == 0 {
log.Warnf("constructed http route config for port %d with no vhosts; Setting up a default 404 vhost", port)
virtualHosts = append(virtualHosts, route.VirtualHost{
Name: fmt.Sprintf("blackhole:%d", port),
Domains: []string{"*"},
Routes: []route.Route{
{
Match: route.RouteMatch{
PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"},
},
Action: &route.Route_DirectResponse{
DirectResponse: &route.DirectResponseAction{
Status: 404,
},
},
},
},
})
} else {
for _, v := range vHostDedupMap {
virtualHosts = append(virtualHosts, *v)
}
}
util.SortVirtualHosts(virtualHosts)
routeCfg := &xdsapi.RouteConfiguration{
Name: routeName,
VirtualHosts: virtualHosts,
ValidateClusters: boolFalse,
}
// call plugins
for _, p := range configgen.Plugins |
return routeCfg, nil
}
// to process HTTP and HTTPS servers along with virtualService.HTTP rules
func (configgen *ConfigGeneratorImpl) createGatewayHTTPFilterChainOpts(
node *model.Proxy, env *model.Environment, push *model.PushContext, servers []*networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts {
httpListeners := make([]*filterChainOpts, 0, len(servers))
// Are we processing plaintext servers or HTTPS servers?
// If plain text, we have to combine all servers into a single listener
if model.ParseProtocol(servers[0].Port.Protocol).IsHTTP() {
rdsName := model.GatewayRDSRouteName(servers[0])
o := &filterChainOpts{
// This works because we validate that only HTTPS servers can have same port but still different port names
// and that no two non-HTTPS servers can be on same port or share port names.
// Validation is done per gateway and also during merging
sniHosts: nil,
tlsContext: nil,
httpOpts: &httpListenerOpts{
rds: rdsName,
useRemoteAddress: true,
direction: http_conn.EGRESS, // viewed as from gateway to internal
connectionManager: &http_conn.HttpConnectionManager{
// Forward client cert if connection is mTLS
ForwardClientCertDetails: http_conn.SANITIZE_SET,
SetCurrentClientCertDetails: &http_conn.HttpConnectionManager_SetCurrentClientCertDetails{
Subject: &types.BoolValue{Value: true},
Uri: true,
Dns: true,
},
},
},
}
httpListeners = append(httpListeners, o)
} else {
// Build a filter chain for each HTTPS server
// We know that this is a HTTPS server because this function is called | {
in := &plugin.InputParams{
ListenerProtocol: plugin.ListenerProtocolHTTP,
Env: env,
Node: node,
Push: push,
}
p.OnOutboundRouteConfiguration(in, routeCfg)
} | conditional_block |
gateway.go | if len(server.Tls.CaCertificates) != 0 {
trustedCa = &core.DataSource{
Specifier: &core.DataSource_Filename{
Filename: server.Tls.CaCertificates,
},
}
}
if trustedCa != nil || len(server.Tls.SubjectAltNames) > 0 {
certValidationContext = &auth.CertificateValidationContext{
TrustedCa: trustedCa,
VerifySubjectAltName: server.Tls.SubjectAltNames,
}
}
requireClientCert := server.Tls.Mode == networking.Server_TLSOptions_MUTUAL
// Set TLS parameters if they are non-default
var tlsParams *auth.TlsParameters
if len(server.Tls.CipherSuites) > 0 ||
server.Tls.MinProtocolVersion != networking.Server_TLSOptions_TLS_AUTO ||
server.Tls.MaxProtocolVersion != networking.Server_TLSOptions_TLS_AUTO {
tlsParams = &auth.TlsParameters{
TlsMinimumProtocolVersion: convertTLSProtocol(server.Tls.MinProtocolVersion),
TlsMaximumProtocolVersion: convertTLSProtocol(server.Tls.MaxProtocolVersion),
CipherSuites: server.Tls.CipherSuites,
}
}
return &auth.DownstreamTlsContext{
CommonTlsContext: &auth.CommonTlsContext{
TlsCertificates: []*auth.TlsCertificate{
{
CertificateChain: &core.DataSource{
Specifier: &core.DataSource_Filename{
Filename: server.Tls.ServerCertificate,
},
},
PrivateKey: &core.DataSource{
Specifier: &core.DataSource_Filename{
Filename: server.Tls.PrivateKey,
},
},
},
},
ValidationContextType: &auth.CommonTlsContext_ValidationContext{
ValidationContext: certValidationContext,
},
AlpnProtocols: ListenersALPNProtocols,
TlsParams: tlsParams,
},
RequireClientCertificate: &types.BoolValue{
Value: requireClientCert,
},
}
}
func convertTLSProtocol(in networking.Server_TLSOptions_TLSProtocol) auth.TlsParameters_TlsProtocol {
out := auth.TlsParameters_TlsProtocol(in) // There should be a one-to-one enum mapping
if out < auth.TlsParameters_TLS_AUTO || out > auth.TlsParameters_TLSv1_3 {
log.Warnf("was not able to map TLS protocol to Envoy TLS protocol")
return auth.TlsParameters_TLS_AUTO
}
return out
}
func (configgen *ConfigGeneratorImpl) createGatewayTCPFilterChainOpts(
node *model.Proxy, env *model.Environment, push *model.PushContext, servers []*networking.Server,
gatewaysForWorkload map[string]bool) []*filterChainOpts {
opts := make([]*filterChainOpts, 0, len(servers))
for _, server := range servers {
// We have a TCP/TLS server. This could be TLS termination (user specifies server.TLS with simple/mutual)
// or opaque TCP (server.TLS is nil). or it could be a TLS passthrough with SNI based routing.
// Handle the TLS termination or opaque TCP first.
// This is opaque TCP server. Find matching virtual services with TCP blocks and forward
if server.Tls == nil {
if filters := buildGatewayNetworkFiltersFromTCPRoutes(node, env,
push, server, gatewaysForWorkload); len(filters) > 0 {
opts = append(opts, &filterChainOpts{
sniHosts: nil,
tlsContext: nil,
networkFilters: filters,
})
}
} else if server.Tls.Mode != networking.Server_TLSOptions_PASSTHROUGH {
// TCP with TLS termination and forwarding. Setup TLS context to terminate, find matching services with TCP blocks
// and forward to backend
// Validation ensures that non-passthrough servers will have certs
if filters := buildGatewayNetworkFiltersFromTCPRoutes(node, env,
push, server, gatewaysForWorkload); len(filters) > 0 {
opts = append(opts, &filterChainOpts{
sniHosts: getSNIHostsForServer(server),
tlsContext: buildGatewayListenerTLSContext(server),
networkFilters: filters,
})
}
} else {
// Passthrough server.
opts = append(opts, buildGatewayNetworkFiltersFromTLSRoutes(node, env, push, server, gatewaysForWorkload)...)
}
}
return opts
}
// buildGatewayNetworkFiltersFromTCPRoutes builds tcp proxy routes for all VirtualServices with TCP blocks.
// It first obtains all virtual services bound to the set of Gateways for this workload, filters them by this
// server's port and hostnames, and produces network filters for each destination from the filtered services.
func buildGatewayNetworkFiltersFromTCPRoutes(node *model.Proxy, env *model.Environment, push *model.PushContext, server *networking.Server,
gatewaysForWorkload map[string]bool) []listener.Filter {
port := &model.Port{
Name: server.Port.Name,
Port: int(server.Port.Number),
Protocol: model.ParseProtocol(server.Port.Protocol),
}
gatewayServerHosts := make(map[model.Hostname]bool, len(server.Hosts))
for _, host := range server.Hosts {
gatewayServerHosts[model.Hostname(host)] = true
}
virtualServices := push.VirtualServices(gatewaysForWorkload)
for _, spec := range virtualServices {
vsvc := spec.Spec.(*networking.VirtualService)
matchingHosts := pickMatchingGatewayHosts(gatewayServerHosts, vsvc.Hosts)
if len(matchingHosts) == 0 {
// the VirtualService's hosts don't include hosts advertised by server
continue
}
// ensure we satisfy the rule's l4 match conditions, if any exist
// For the moment, there can be only one match that succeeds
// based on the match port/server port and the gateway name
for _, tcp := range vsvc.Tcp {
if l4MultiMatch(tcp.Match, server, gatewaysForWorkload) {
return buildOutboundNetworkFilters(env, node, tcp.Route, push, port, spec.ConfigMeta)
}
}
}
return nil
}
// buildGatewayNetworkFiltersFromTLSRoutes builds tcp proxy routes for all VirtualServices with TLS blocks.
// It first obtains all virtual services bound to the set of Gateways for this workload, filters them by this
// server's port and hostnames, and produces network filters for each destination from the filtered services
func buildGatewayNetworkFiltersFromTLSRoutes(node *model.Proxy, env *model.Environment, push *model.PushContext, server *networking.Server,
gatewaysForWorkload map[string]bool) []*filterChainOpts {
port := &model.Port{
Name: server.Port.Name,
Port: int(server.Port.Number),
Protocol: model.ParseProtocol(server.Port.Protocol),
}
gatewayServerHosts := make(map[model.Hostname]bool, len(server.Hosts))
for _, host := range server.Hosts {
gatewayServerHosts[model.Hostname(host)] = true
}
virtualServices := push.VirtualServices(gatewaysForWorkload)
filterChains := make([]*filterChainOpts, 0)
for _, spec := range virtualServices {
vsvc := spec.Spec.(*networking.VirtualService)
matchingHosts := pickMatchingGatewayHosts(gatewayServerHosts, vsvc.Hosts)
if len(matchingHosts) == 0 {
// the VirtualService's hosts don't include hosts advertised by server
continue
}
// For every matching TLS block, generate a filter chain with sni match
for _, tls := range vsvc.Tls {
for _, match := range tls.Match {
if l4SingleMatch(convertTLSMatchToL4Match(match), server, gatewaysForWorkload) {
// the sni hosts in the match will become part of a filter chain match
filterChains = append(filterChains, &filterChainOpts{
sniHosts: match.SniHosts,
tlsContext: nil, // NO TLS context because this is passthrough
networkFilters: buildOutboundNetworkFilters(env, node, tls.Route, push, port, spec.ConfigMeta),
})
}
}
}
}
return filterChains
}
func pickMatchingGatewayHosts(gatewayServerHosts map[model.Hostname]bool, virtualServiceHosts []string) map[string]model.Hostname {
matchingHosts := make(map[string]model.Hostname, 0)
for _, vsvcHost := range virtualServiceHosts {
for gatewayHost := range gatewayServerHosts {
if gatewayHost.Matches(model.Hostname(vsvcHost)) {
matchingHosts[vsvcHost] = gatewayHost
}
}
}
return matchingHosts
}
func convertTLSMatchToL4Match(tlsMatch *networking.TLSMatchAttributes) *networking.L4MatchAttributes | {
return &networking.L4MatchAttributes{
DestinationSubnets: tlsMatch.DestinationSubnets,
Port: tlsMatch.Port,
SourceSubnet: tlsMatch.SourceSubnet,
SourceLabels: tlsMatch.SourceLabels,
Gateways: tlsMatch.Gateways,
}
} | identifier_body | |
gateway.go | name]bool)
for _, server := range servers {
for _, host := range server.Hosts {
gatewayHosts[model.Hostname(host)] = true
if server.Tls != nil && server.Tls.HttpsRedirect {
tlsRedirect[model.Hostname(host)] = true
}
}
}
port := int(servers[0].Port.Number)
// NOTE: WE DO NOT SUPPORT two gateways on same workload binding to same virtual service
virtualServices := push.VirtualServices(merged.Names)
vHostDedupMap := make(map[string]*route.VirtualHost)
for _, v := range virtualServices {
vs := v.Spec.(*networking.VirtualService)
matchingHosts := pickMatchingGatewayHosts(gatewayHosts, vs.Hosts)
if len(matchingHosts) == 0 {
log.Debugf("%s omitting virtual service %q because its hosts don't match gateways %v server %d", node.ID, v.Name, gateways, port)
continue
}
routes, err := istio_route.BuildHTTPRoutesForVirtualService(node, push, v, nameToServiceMap, port, nil, merged.Names)
if err != nil {
log.Debugf("%s omitting routes for service %v due to error: %v", node.ID, v, err)
continue
}
for vsvcHost, gatewayHost := range matchingHosts {
if currentVhost, exists := vHostDedupMap[vsvcHost]; exists {
currentVhost.Routes = istio_route.CombineVHostRoutes(currentVhost.Routes, routes)
} else {
newVhost := &route.VirtualHost{
Name: fmt.Sprintf("%s:%d", vsvcHost, port),
Domains: []string{vsvcHost, fmt.Sprintf("%s:%d", vsvcHost, port)},
Routes: routes,
}
if tlsRedirect[gatewayHost] {
newVhost.RequireTls = route.VirtualHost_ALL
}
vHostDedupMap[vsvcHost] = newVhost
}
}
}
virtualHosts := make([]route.VirtualHost, 0, len(virtualServices))
if len(vHostDedupMap) == 0 {
log.Warnf("constructed http route config for port %d with no vhosts; Setting up a default 404 vhost", port)
virtualHosts = append(virtualHosts, route.VirtualHost{
Name: fmt.Sprintf("blackhole:%d", port),
Domains: []string{"*"},
Routes: []route.Route{
{
Match: route.RouteMatch{
PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"},
},
Action: &route.Route_DirectResponse{
DirectResponse: &route.DirectResponseAction{
Status: 404,
},
},
},
},
})
} else {
for _, v := range vHostDedupMap {
virtualHosts = append(virtualHosts, *v)
}
}
util.SortVirtualHosts(virtualHosts)
routeCfg := &xdsapi.RouteConfiguration{
Name: routeName,
VirtualHosts: virtualHosts,
ValidateClusters: boolFalse,
}
// call plugins
for _, p := range configgen.Plugins {
in := &plugin.InputParams{
ListenerProtocol: plugin.ListenerProtocolHTTP,
Env: env,
Node: node,
Push: push,
}
p.OnOutboundRouteConfiguration(in, routeCfg)
}
return routeCfg, nil
}
// to process HTTP and HTTPS servers along with virtualService.HTTP rules
func (configgen *ConfigGeneratorImpl) createGatewayHTTPFilterChainOpts(
node *model.Proxy, env *model.Environment, push *model.PushContext, servers []*networking.Server, gatewaysForWorkload map[string]bool) []*filterChainOpts {
httpListeners := make([]*filterChainOpts, 0, len(servers))
// Are we processing plaintext servers or HTTPS servers?
// If plain text, we have to combine all servers into a single listener
if model.ParseProtocol(servers[0].Port.Protocol).IsHTTP() {
rdsName := model.GatewayRDSRouteName(servers[0])
o := &filterChainOpts{
// This works because we validate that only HTTPS servers can have same port but still different port names
// and that no two non-HTTPS servers can be on same port or share port names.
// Validation is done per gateway and also during merging
sniHosts: nil,
tlsContext: nil,
httpOpts: &httpListenerOpts{
rds: rdsName,
useRemoteAddress: true,
direction: http_conn.EGRESS, // viewed as from gateway to internal
connectionManager: &http_conn.HttpConnectionManager{
// Forward client cert if connection is mTLS
ForwardClientCertDetails: http_conn.SANITIZE_SET,
SetCurrentClientCertDetails: &http_conn.HttpConnectionManager_SetCurrentClientCertDetails{
Subject: &types.BoolValue{Value: true},
Uri: true,
Dns: true,
},
},
},
}
httpListeners = append(httpListeners, o)
} else {
// Build a filter chain for each HTTPS server
// We know that this is a HTTPS server because this function is called only for ports of type HTTP/HTTPS
// where HTTPS server's TLS mode is not passthrough and not nil
for _, server := range servers {
o := &filterChainOpts{
// This works because we validate that only HTTPS servers can have same port but still different port names
// and that no two non-HTTPS servers can be on same port or share port names.
// Validation is done per gateway and also during merging
sniHosts: getSNIHostsForServer(server),
tlsContext: buildGatewayListenerTLSContext(server),
httpOpts: &httpListenerOpts{
rds: model.GatewayRDSRouteName(server),
useRemoteAddress: true,
direction: http_conn.EGRESS, // viewed as from gateway to internal
connectionManager: &http_conn.HttpConnectionManager{
// Forward client cert if connection is mTLS
ForwardClientCertDetails: http_conn.SANITIZE_SET,
SetCurrentClientCertDetails: &http_conn.HttpConnectionManager_SetCurrentClientCertDetails{
Subject: &types.BoolValue{Value: true},
Uri: true,
Dns: true,
},
},
},
}
httpListeners = append(httpListeners, o)
}
}
return httpListeners
}
func buildGatewayListenerTLSContext(server *networking.Server) *auth.DownstreamTlsContext {
// Server.TLS cannot be nil or passthrough. But as a safety guard, return nil
if server.Tls == nil || server.Tls.Mode == networking.Server_TLSOptions_PASSTHROUGH {
return nil // We don't need to setup TLS context for passthrough mode
}
var certValidationContext *auth.CertificateValidationContext
var trustedCa *core.DataSource
if len(server.Tls.CaCertificates) != 0 {
trustedCa = &core.DataSource{
Specifier: &core.DataSource_Filename{
Filename: server.Tls.CaCertificates,
},
}
}
if trustedCa != nil || len(server.Tls.SubjectAltNames) > 0 {
certValidationContext = &auth.CertificateValidationContext{
TrustedCa: trustedCa,
VerifySubjectAltName: server.Tls.SubjectAltNames,
}
}
requireClientCert := server.Tls.Mode == networking.Server_TLSOptions_MUTUAL
// Set TLS parameters if they are non-default
var tlsParams *auth.TlsParameters
if len(server.Tls.CipherSuites) > 0 ||
server.Tls.MinProtocolVersion != networking.Server_TLSOptions_TLS_AUTO ||
server.Tls.MaxProtocolVersion != networking.Server_TLSOptions_TLS_AUTO {
tlsParams = &auth.TlsParameters{
TlsMinimumProtocolVersion: convertTLSProtocol(server.Tls.MinProtocolVersion),
TlsMaximumProtocolVersion: convertTLSProtocol(server.Tls.MaxProtocolVersion),
CipherSuites: server.Tls.CipherSuites,
}
}
return &auth.DownstreamTlsContext{
CommonTlsContext: &auth.CommonTlsContext{
TlsCertificates: []*auth.TlsCertificate{
{
CertificateChain: &core.DataSource{
Specifier: &core.DataSource_Filename{
Filename: server.Tls.ServerCertificate,
},
},
PrivateKey: &core.DataSource{
Specifier: &core.DataSource_Filename{
Filename: server.Tls.PrivateKey,
},
},
},
},
ValidationContextType: &auth.CommonTlsContext_ValidationContext{
ValidationContext: certValidationContext,
},
AlpnProtocols: ListenersALPNProtocols,
TlsParams: tlsParams,
},
RequireClientCertificate: &types.BoolValue{
Value: requireClientCert,
},
}
}
func | convertTLSProtocol | identifier_name | |
mod.rs | empty, which the scoring library doesn't handle
println!();
println!("An empty password puts your wallet at risk against an attacker with access to this device.");
println!("Use this only if you are sure that your device is safe from prying eyes!");
println!();
true
} else if let Some(feedback) = get_password_feedback(passphrase) {
// The scoring library provided feedback
println!();
println!(
"The password you chose is weak; a determined attacker with access to your device may be able to guess it."
);
println!("You may want to consider changing it to a stronger one.");
println!("Here are some suggestions:");
for suggestion in feedback {
println!("- {}", suggestion);
}
println!();
true
} else {
// The Force is strong with this one
false
}
}
/// Gets the password provided by command line argument or environment variable if available.
/// Otherwise prompts for the password to be typed in.
pub fn get_or_prompt_password(
arg_password: Option<SafePassword>,
config_password: Option<SafePassword>,
) -> Result<SafePassword, ExitError> {
if let Some(passphrase) = arg_password {
return Ok(passphrase);
}
let env = std::env::var_os(TARI_WALLET_PASSWORD);
if let Some(p) = env {
let env_password = p
.into_string()
.map_err(|_| ExitError::new(ExitCode::IOError, "Failed to convert OsString into String"))?;
return Ok(env_password.into());
}
if let Some(passphrase) = config_password {
return Ok(passphrase);
}
let password = prompt_password("Wallet password: ")?;
Ok(password)
}
fn prompt_password(prompt: &str) -> Result<SafePassword, ExitError> {
let password = prompt_password_stdout(prompt).map_err(|e| ExitError::new(ExitCode::IOError, e))?;
Ok(SafePassword::from(password))
}
/// Allows the user to change the password of the wallet.
pub async fn change_password(
config: &ApplicationConfig,
existing: SafePassword,
shutdown_signal: ShutdownSignal,
non_interactive_mode: bool,
) -> Result<(), ExitError> {
let mut wallet = init_wallet(
config,
existing.clone(),
None,
None,
shutdown_signal,
non_interactive_mode,
)
.await?;
// Get a new passphrase
let new = get_new_passphrase("New wallet passphrase: ", "Confirm new passphrase: ")?;
// Use the existing and new passphrases to attempt to change the wallet passphrase
wallet.db.change_passphrase(&existing, &new).map_err(|e| match e {
WalletStorageError::InvalidPassphrase => {
ExitError::new(ExitCode::IncorrectOrEmptyPassword, "Your password was not changed.")
},
_ => ExitError::new(ExitCode::DatabaseError, "Your password was not changed."),
})
}
/// Populates the PeerConfig struct from:
/// 1. The custom peer in the wallet config if it exists
/// 2. The custom peer in the wallet db if it exists
/// 3. The detected local base node if any
/// 4. The service peers defined in config they exist
/// 5. The peer seeds defined in config
pub async fn get_base_node_peer_config(
config: &ApplicationConfig,
wallet: &mut WalletSqlite,
non_interactive_mode: bool,
) -> Result<PeerConfig, ExitError> {
let mut use_custom_base_node_peer = false;
let mut selected_base_node = match config.wallet.custom_base_node {
Some(ref custom) => SeedPeer::from_str(custom)
.map(|node| Some(Peer::from(node)))
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed custom base node: {}", err)))?,
None => {
if let Some(custom_base_node_peer) = get_custom_base_node_peer_from_db(wallet) {
use_custom_base_node_peer = true;
Some(custom_base_node_peer)
} else {
None
}
},
};
// If the user has not explicitly set a base node in the config, we try detect one
if !non_interactive_mode && config.wallet.custom_base_node.is_none() && !use_custom_base_node_peer {
if let Some(detected_node) = detect_local_base_node(config.wallet.network).await {
match selected_base_node {
Some(ref base_node) if base_node.public_key == detected_node.public_key => {
// Skip asking because it's already set
},
Some(_) | None => {
println!(
"Local Base Node detected with public key {} and address {}",
detected_node.public_key,
detected_node
.addresses
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join(", ")
);
if prompt(
"Would you like to use this base node? IF YOU DID NOT START THIS BASE NODE YOU SHOULD SELECT \
NO (Y/n)",
) {
let address = detected_node.addresses.first().ok_or_else(|| {
ExitError::new(ExitCode::ConfigError, "No address found for detected base node")
})?;
set_custom_base_node_peer_in_db(wallet, &detected_node.public_key, address)?;
selected_base_node = Some(detected_node.into());
}
},
}
}
}
// config
let base_node_peers = config
.wallet
.base_node_service_peers
.iter()
.map(|s| SeedPeer::from_str(s))
.map(|r| r.map(Peer::from))
.collect::<Result<Vec<_>, _>>()
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed base node peer: {}", err)))?;
// peer seeds
let peer_seeds = config
.peer_seeds
.peer_seeds
.iter()
.map(|s| SeedPeer::from_str(s))
.map(|r| r.map(Peer::from))
.collect::<Result<Vec<_>, _>>()
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed seed peer: {}", err)))?;
let peer_config = PeerConfig::new(selected_base_node, base_node_peers, peer_seeds);
debug!(target: LOG_TARGET, "base node peer config: {:?}", peer_config);
Ok(peer_config)
}
/// Determines which mode the wallet should run in.
pub(crate) fn wallet_mode(cli: &Cli, boot_mode: WalletBoot) -> WalletMode {
// Recovery mode
if matches!(boot_mode, WalletBoot::Recovery) {
if cli.non_interactive_mode {
return WalletMode::RecoveryDaemon;
} else {
return WalletMode::RecoveryTui;
}
}
match (cli.non_interactive_mode, cli.input_file.clone(), cli.command2.clone()) {
// TUI mode
(false, None, None) => WalletMode::Tui,
// GRPC mode
(true, None, None) => WalletMode::Grpc,
// Script mode
(_, Some(path), None) => WalletMode::Script(path),
// Command mode
(_, None, Some(command)) => WalletMode::Command(Box::new(command)), // WalletMode::Command(command),
// Invalid combinations
_ => WalletMode::Invalid,
}
}
/// Set up the app environment and state for use by the UI
#[allow(clippy::too_many_lines)]
pub async fn init_wallet(
config: &ApplicationConfig,
arg_password: SafePassword,
seed_words_file_name: Option<PathBuf>,
recovery_seed: Option<CipherSeed>,
shutdown_signal: ShutdownSignal,
non_interactive_mode: bool,
) -> Result<WalletSqlite, ExitError> {
fs::create_dir_all(
config
.wallet
.db_file
.parent()
.expect("console_wallet_db_file cannot be set to a root directory"),
)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating Wallet folder. {}", e)))?;
fs::create_dir_all(&config.wallet.p2p.datastore_path)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating peer db folder. {}", e)))?;
debug!(target: LOG_TARGET, "Running Wallet database migrations");
let db_path = &config.wallet.db_file;
// wallet should be encrypted from the beginning, so we must require a password to be provided by the user
let (wallet_backend, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend) =
initialize_sqlite_database_backends(db_path, arg_password, config.wallet.db_connection_pool_size)?;
let wallet_db = WalletDatabase::new(wallet_backend);
let output_db = OutputManagerDatabase::new(output_manager_backend.clone());
debug!(target: LOG_TARGET, "Databases Initialized. Wallet is encrypted.",);
let node_addresses = if config.wallet.p2p.public_addresses.is_empty() {
match wallet_db.get_node_address()? {
Some(addr) => MultiaddrList::from(vec![addr]),
None => MultiaddrList::default(),
}
} else {
config.wallet.p2p.public_addresses.clone()
}; | let master_seed = read_or_create_master_seed(recovery_seed.clone(), | random_line_split | |
mod.rs |
let (wallet_backend, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend) =
initialize_sqlite_database_backends(db_path, arg_password, config.wallet.db_connection_pool_size)?;
let wallet_db = WalletDatabase::new(wallet_backend);
let output_db = OutputManagerDatabase::new(output_manager_backend.clone());
debug!(target: LOG_TARGET, "Databases Initialized. Wallet is encrypted.",);
let node_addresses = if config.wallet.p2p.public_addresses.is_empty() {
match wallet_db.get_node_address()? {
Some(addr) => MultiaddrList::from(vec![addr]),
None => MultiaddrList::default(),
}
} else {
config.wallet.p2p.public_addresses.clone()
};
let master_seed = read_or_create_master_seed(recovery_seed.clone(), &wallet_db)?;
let node_identity = match config.wallet.identity_file.as_ref() {
Some(identity_file) => {
warn!(
target: LOG_TARGET,
"Node identity overridden by file {}",
identity_file.to_string_lossy()
);
setup_node_identity(
identity_file,
node_addresses.to_vec(),
true,
PeerFeatures::COMMUNICATION_CLIENT,
)?
},
None => setup_identity_from_db(&wallet_db, &master_seed, node_addresses.to_vec())?,
};
let mut wallet_config = config.wallet.clone();
if let TransportType::Tor = config.wallet.p2p.transport.transport_type {
wallet_config.p2p.transport.tor.identity = wallet_db.get_tor_id()?;
}
let consensus_manager = ConsensusManager::builder(config.wallet.network)
.build()
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error consensus manager. {}", e)))?;
let factories = CryptoFactories::default();
let mut wallet = Wallet::start(
wallet_config,
config.peer_seeds.clone(),
config.auto_update.clone(),
node_identity,
consensus_manager,
factories,
wallet_db,
output_db,
transaction_backend,
output_manager_backend,
contacts_backend,
key_manager_backend,
shutdown_signal,
master_seed,
)
.await
.map_err(|e| match e {
WalletError::CommsInitializationError(cie) => cie.to_exit_error(),
e => ExitError::new(ExitCode::WalletError, format!("Error creating Wallet Container: {}", e)),
})?;
if let Some(hs) = wallet.comms.hidden_service() {
wallet
.db
.set_tor_identity(hs.tor_identity().clone())
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Problem writing tor identity. {}", e)))?;
}
if let Some(file_name) = seed_words_file_name {
let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?.join(" ");
let _result = fs::write(file_name, seed_words.reveal()).map_err(|e| {
ExitError::new(
ExitCode::WalletError,
format!("Problem writing seed words to file: {}", e),
)
});
};
Ok(wallet)
}
async fn detect_local_base_node(network: Network) -> Option<SeedPeer> {
use minotari_app_grpc::tari_rpc::{base_node_client::BaseNodeClient, Empty};
let addr = format!(
"http://127.0.0.1:{}",
grpc_default_port(ApplicationType::BaseNode, network)
);
debug!(target: LOG_TARGET, "Checking for local base node at {}", addr);
let mut node_conn = match BaseNodeClient::connect(addr).await.ok() {
Some(conn) => conn,
None => {
debug!(target: LOG_TARGET, "No local base node detected");
return None;
},
};
let resp = node_conn.identify(Empty {}).await.ok()?;
let identity = resp.get_ref();
let public_key = CommsPublicKey::from_bytes(&identity.public_key).ok()?;
let addresses = identity
.public_addresses
.iter()
.filter_map(|s| Multiaddr::from_str(s).ok())
.collect::<Vec<_>>();
debug!(
target: LOG_TARGET,
"Local base node found with pk={} and addresses={}",
public_key.to_hex(),
addresses.iter().map(|a| a.to_string()).collect::<Vec<_>>().join(",")
);
Some(SeedPeer::new(public_key, addresses))
}
fn setup_identity_from_db<D: WalletBackend + 'static>(
wallet_db: &WalletDatabase<D>,
master_seed: &CipherSeed,
node_addresses: Vec<Multiaddr>,
) -> Result<Arc<NodeIdentity>, ExitError> {
let node_features = wallet_db
.get_node_features()?
.unwrap_or(PeerFeatures::COMMUNICATION_CLIENT);
let identity_sig = wallet_db.get_comms_identity_signature()?;
let comms_secret_key = derive_comms_secret_key(master_seed)?;
// This checks if anything has changed by validating the previous signature and if invalid, setting identity_sig
// to None
let identity_sig = identity_sig.filter(|sig| {
let comms_public_key = CommsPublicKey::from_secret_key(&comms_secret_key);
sig.is_valid(&comms_public_key, node_features, &node_addresses)
});
// SAFETY: we are manually checking the validity of this signature before adding Some(..)
let node_identity = Arc::new(NodeIdentity::with_signature_unchecked(
comms_secret_key,
node_addresses,
node_features,
identity_sig,
));
if !node_identity.is_signed() {
node_identity.sign();
// unreachable panic: signed above
let sig = node_identity
.identity_signature_read()
.as_ref()
.expect("unreachable panic")
.clone();
wallet_db.set_comms_identity_signature(sig)?;
}
Ok(node_identity)
}
/// Starts the wallet by setting the base node peer, and restarting the transaction and broadcast protocols.
pub async fn start_wallet(
wallet: &mut WalletSqlite,
base_node: &Peer,
wallet_mode: &WalletMode,
) -> Result<(), ExitError> {
debug!(target: LOG_TARGET, "Setting base node peer");
let net_address = base_node
.addresses
.best()
.ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node has no address!"))?;
wallet
.set_base_node_peer(base_node.public_key.clone(), net_address.address().clone())
.await
.map_err(|e| {
ExitError::new(
ExitCode::WalletError,
format!("Error setting wallet base node peer. {}", e),
)
})?;
// Restart transaction protocols if not running in script or command modes
if !matches!(wallet_mode, WalletMode::Command(_)) && !matches!(wallet_mode, WalletMode::Script(_)) {
// NOTE: https://github.com/tari-project/tari/issues/5227
debug!("revalidating all transactions");
if let Err(e) = wallet.transaction_service.revalidate_all_transactions().await {
error!(target: LOG_TARGET, "Failed to revalidate all transactions: {}", e);
}
debug!("restarting transaction protocols");
if let Err(e) = wallet.transaction_service.restart_transaction_protocols().await {
error!(target: LOG_TARGET, "Problem restarting transaction protocols: {}", e);
}
debug!("validating transactions");
if let Err(e) = wallet.transaction_service.validate_transactions().await {
error!(
target: LOG_TARGET,
"Problem validating and restarting transaction protocols: {}", e
);
}
// validate transaction outputs
validate_txos(wallet).await?;
}
Ok(())
}
async fn validate_txos(wallet: &mut WalletSqlite) -> Result<(), ExitError> {
debug!(target: LOG_TARGET, "Starting TXO validations.");
wallet.output_manager_service.validate_txos().await.map_err(|e| {
error!(target: LOG_TARGET, "Error validating Unspent TXOs: {}", e);
ExitError::new(ExitCode::WalletError, e)
})?;
debug!(target: LOG_TARGET, "TXO validations started.");
Ok(())
}
pub(crate) fn confirm_seed_words(wallet: &mut WalletSqlite) -> Result<(), ExitError> {
let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?;
println!();
println!("=========================");
println!(" IMPORTANT! ");
println!("=========================");
println!("These are your wallet seed words.");
println!("They can be used to recover your wallet and funds.");
println!("WRITE THEM DOWN OR COPY THEM NOW. THIS IS YOUR ONLY CHANCE TO DO SO.");
println!();
println!("=========================");
println!("{}", seed_words.join(" ").reveal());
println!("=========================");
println!("\x07"); // beep!
let mut rl = Editor::<()>::new();
loop {
println!("I confirm that I will never see these seed words again.");
println!(r#"Type the word "confirm" to continue."#);
let readline = rl.readline(">> ");
match readline {
Ok(line) => match line.to_lowercase().as_ref() {
"confirm" => return Ok(()),
_ => continue,
},
Err(e) => {
return Err(ExitError::new(ExitCode::IOError, e));
},
}
}
}
/// Clear the terminal and print the Tari splash
pub fn | tari_splash_screen | identifier_name | |
mod.rs | .map(|r| r.map(Peer::from))
.collect::<Result<Vec<_>, _>>()
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed seed peer: {}", err)))?;
let peer_config = PeerConfig::new(selected_base_node, base_node_peers, peer_seeds);
debug!(target: LOG_TARGET, "base node peer config: {:?}", peer_config);
Ok(peer_config)
}
/// Determines which mode the wallet should run in.
pub(crate) fn wallet_mode(cli: &Cli, boot_mode: WalletBoot) -> WalletMode {
// Recovery mode
if matches!(boot_mode, WalletBoot::Recovery) {
if cli.non_interactive_mode {
return WalletMode::RecoveryDaemon;
} else {
return WalletMode::RecoveryTui;
}
}
match (cli.non_interactive_mode, cli.input_file.clone(), cli.command2.clone()) {
// TUI mode
(false, None, None) => WalletMode::Tui,
// GRPC mode
(true, None, None) => WalletMode::Grpc,
// Script mode
(_, Some(path), None) => WalletMode::Script(path),
// Command mode
(_, None, Some(command)) => WalletMode::Command(Box::new(command)), // WalletMode::Command(command),
// Invalid combinations
_ => WalletMode::Invalid,
}
}
/// Set up the app environment and state for use by the UI
#[allow(clippy::too_many_lines)]
pub async fn init_wallet(
config: &ApplicationConfig,
arg_password: SafePassword,
seed_words_file_name: Option<PathBuf>,
recovery_seed: Option<CipherSeed>,
shutdown_signal: ShutdownSignal,
non_interactive_mode: bool,
) -> Result<WalletSqlite, ExitError> {
fs::create_dir_all(
config
.wallet
.db_file
.parent()
.expect("console_wallet_db_file cannot be set to a root directory"),
)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating Wallet folder. {}", e)))?;
fs::create_dir_all(&config.wallet.p2p.datastore_path)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating peer db folder. {}", e)))?;
debug!(target: LOG_TARGET, "Running Wallet database migrations");
let db_path = &config.wallet.db_file;
// wallet should be encrypted from the beginning, so we must require a password to be provided by the user
let (wallet_backend, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend) =
initialize_sqlite_database_backends(db_path, arg_password, config.wallet.db_connection_pool_size)?;
let wallet_db = WalletDatabase::new(wallet_backend);
let output_db = OutputManagerDatabase::new(output_manager_backend.clone());
debug!(target: LOG_TARGET, "Databases Initialized. Wallet is encrypted.",);
let node_addresses = if config.wallet.p2p.public_addresses.is_empty() {
match wallet_db.get_node_address()? {
Some(addr) => MultiaddrList::from(vec![addr]),
None => MultiaddrList::default(),
}
} else {
config.wallet.p2p.public_addresses.clone()
};
let master_seed = read_or_create_master_seed(recovery_seed.clone(), &wallet_db)?;
let node_identity = match config.wallet.identity_file.as_ref() {
Some(identity_file) => {
warn!(
target: LOG_TARGET,
"Node identity overridden by file {}",
identity_file.to_string_lossy()
);
setup_node_identity(
identity_file,
node_addresses.to_vec(),
true,
PeerFeatures::COMMUNICATION_CLIENT,
)?
},
None => setup_identity_from_db(&wallet_db, &master_seed, node_addresses.to_vec())?,
};
let mut wallet_config = config.wallet.clone();
if let TransportType::Tor = config.wallet.p2p.transport.transport_type {
wallet_config.p2p.transport.tor.identity = wallet_db.get_tor_id()?;
}
let consensus_manager = ConsensusManager::builder(config.wallet.network)
.build()
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error consensus manager. {}", e)))?;
let factories = CryptoFactories::default();
let mut wallet = Wallet::start(
wallet_config,
config.peer_seeds.clone(),
config.auto_update.clone(),
node_identity,
consensus_manager,
factories,
wallet_db,
output_db,
transaction_backend,
output_manager_backend,
contacts_backend,
key_manager_backend,
shutdown_signal,
master_seed,
)
.await
.map_err(|e| match e {
WalletError::CommsInitializationError(cie) => cie.to_exit_error(),
e => ExitError::new(ExitCode::WalletError, format!("Error creating Wallet Container: {}", e)),
})?;
if let Some(hs) = wallet.comms.hidden_service() {
wallet
.db
.set_tor_identity(hs.tor_identity().clone())
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Problem writing tor identity. {}", e)))?;
}
if let Some(file_name) = seed_words_file_name {
let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?.join(" ");
let _result = fs::write(file_name, seed_words.reveal()).map_err(|e| {
ExitError::new(
ExitCode::WalletError,
format!("Problem writing seed words to file: {}", e),
)
});
};
Ok(wallet)
}
async fn detect_local_base_node(network: Network) -> Option<SeedPeer> {
use minotari_app_grpc::tari_rpc::{base_node_client::BaseNodeClient, Empty};
let addr = format!(
"http://127.0.0.1:{}",
grpc_default_port(ApplicationType::BaseNode, network)
);
debug!(target: LOG_TARGET, "Checking for local base node at {}", addr);
let mut node_conn = match BaseNodeClient::connect(addr).await.ok() {
Some(conn) => conn,
None => {
debug!(target: LOG_TARGET, "No local base node detected");
return None;
},
};
let resp = node_conn.identify(Empty {}).await.ok()?;
let identity = resp.get_ref();
let public_key = CommsPublicKey::from_bytes(&identity.public_key).ok()?;
let addresses = identity
.public_addresses
.iter()
.filter_map(|s| Multiaddr::from_str(s).ok())
.collect::<Vec<_>>();
debug!(
target: LOG_TARGET,
"Local base node found with pk={} and addresses={}",
public_key.to_hex(),
addresses.iter().map(|a| a.to_string()).collect::<Vec<_>>().join(",")
);
Some(SeedPeer::new(public_key, addresses))
}
fn setup_identity_from_db<D: WalletBackend + 'static>(
wallet_db: &WalletDatabase<D>,
master_seed: &CipherSeed,
node_addresses: Vec<Multiaddr>,
) -> Result<Arc<NodeIdentity>, ExitError> {
let node_features = wallet_db
.get_node_features()?
.unwrap_or(PeerFeatures::COMMUNICATION_CLIENT);
let identity_sig = wallet_db.get_comms_identity_signature()?;
let comms_secret_key = derive_comms_secret_key(master_seed)?;
// This checks if anything has changed by validating the previous signature and if invalid, setting identity_sig
// to None
let identity_sig = identity_sig.filter(|sig| {
let comms_public_key = CommsPublicKey::from_secret_key(&comms_secret_key);
sig.is_valid(&comms_public_key, node_features, &node_addresses)
});
// SAFETY: we are manually checking the validity of this signature before adding Some(..)
let node_identity = Arc::new(NodeIdentity::with_signature_unchecked(
comms_secret_key,
node_addresses,
node_features,
identity_sig,
));
if !node_identity.is_signed() {
node_identity.sign();
// unreachable panic: signed above
let sig = node_identity
.identity_signature_read()
.as_ref()
.expect("unreachable panic")
.clone();
wallet_db.set_comms_identity_signature(sig)?;
}
Ok(node_identity)
}
/// Starts the wallet by setting the base node peer, and restarting the transaction and broadcast protocols.
pub async fn start_wallet(
wallet: &mut WalletSqlite,
base_node: &Peer,
wallet_mode: &WalletMode,
) -> Result<(), ExitError> | {
debug!(target: LOG_TARGET, "Setting base node peer");
let net_address = base_node
.addresses
.best()
.ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node has no address!"))?;
wallet
.set_base_node_peer(base_node.public_key.clone(), net_address.address().clone())
.await
.map_err(|e| {
ExitError::new(
ExitCode::WalletError,
format!("Error setting wallet base node peer. {}", e),
)
})?;
// Restart transaction protocols if not running in script or command modes
if !matches!(wallet_mode, WalletMode::Command(_)) && !matches!(wallet_mode, WalletMode::Script(_)) { | identifier_body | |
mod.rs | , which the scoring library doesn't handle
println!();
println!("An empty password puts your wallet at risk against an attacker with access to this device.");
println!("Use this only if you are sure that your device is safe from prying eyes!");
println!();
true
} else if let Some(feedback) = get_password_feedback(passphrase) {
// The scoring library provided feedback
println!();
println!(
"The password you chose is weak; a determined attacker with access to your device may be able to guess it."
);
println!("You may want to consider changing it to a stronger one.");
println!("Here are some suggestions:");
for suggestion in feedback {
println!("- {}", suggestion);
}
println!();
true
} else {
// The Force is strong with this one
false
}
}
/// Gets the password provided by command line argument or environment variable if available.
/// Otherwise prompts for the password to be typed in.
pub fn get_or_prompt_password(
arg_password: Option<SafePassword>,
config_password: Option<SafePassword>,
) -> Result<SafePassword, ExitError> {
if let Some(passphrase) = arg_password {
return Ok(passphrase);
}
let env = std::env::var_os(TARI_WALLET_PASSWORD);
if let Some(p) = env {
let env_password = p
.into_string()
.map_err(|_| ExitError::new(ExitCode::IOError, "Failed to convert OsString into String"))?;
return Ok(env_password.into());
}
if let Some(passphrase) = config_password {
return Ok(passphrase);
}
let password = prompt_password("Wallet password: ")?;
Ok(password)
}
fn prompt_password(prompt: &str) -> Result<SafePassword, ExitError> {
let password = prompt_password_stdout(prompt).map_err(|e| ExitError::new(ExitCode::IOError, e))?;
Ok(SafePassword::from(password))
}
/// Allows the user to change the password of the wallet.
pub async fn change_password(
config: &ApplicationConfig,
existing: SafePassword,
shutdown_signal: ShutdownSignal,
non_interactive_mode: bool,
) -> Result<(), ExitError> {
let mut wallet = init_wallet(
config,
existing.clone(),
None,
None,
shutdown_signal,
non_interactive_mode,
)
.await?;
// Get a new passphrase
let new = get_new_passphrase("New wallet passphrase: ", "Confirm new passphrase: ")?;
// Use the existing and new passphrases to attempt to change the wallet passphrase
wallet.db.change_passphrase(&existing, &new).map_err(|e| match e {
WalletStorageError::InvalidPassphrase => | ,
_ => ExitError::new(ExitCode::DatabaseError, "Your password was not changed."),
})
}
/// Populates the PeerConfig struct from:
/// 1. The custom peer in the wallet config if it exists
/// 2. The custom peer in the wallet db if it exists
/// 3. The detected local base node if any
/// 4. The service peers defined in config they exist
/// 5. The peer seeds defined in config
pub async fn get_base_node_peer_config(
config: &ApplicationConfig,
wallet: &mut WalletSqlite,
non_interactive_mode: bool,
) -> Result<PeerConfig, ExitError> {
let mut use_custom_base_node_peer = false;
let mut selected_base_node = match config.wallet.custom_base_node {
Some(ref custom) => SeedPeer::from_str(custom)
.map(|node| Some(Peer::from(node)))
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed custom base node: {}", err)))?,
None => {
if let Some(custom_base_node_peer) = get_custom_base_node_peer_from_db(wallet) {
use_custom_base_node_peer = true;
Some(custom_base_node_peer)
} else {
None
}
},
};
// If the user has not explicitly set a base node in the config, we try detect one
if !non_interactive_mode && config.wallet.custom_base_node.is_none() && !use_custom_base_node_peer {
if let Some(detected_node) = detect_local_base_node(config.wallet.network).await {
match selected_base_node {
Some(ref base_node) if base_node.public_key == detected_node.public_key => {
// Skip asking because it's already set
},
Some(_) | None => {
println!(
"Local Base Node detected with public key {} and address {}",
detected_node.public_key,
detected_node
.addresses
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join(", ")
);
if prompt(
"Would you like to use this base node? IF YOU DID NOT START THIS BASE NODE YOU SHOULD SELECT \
NO (Y/n)",
) {
let address = detected_node.addresses.first().ok_or_else(|| {
ExitError::new(ExitCode::ConfigError, "No address found for detected base node")
})?;
set_custom_base_node_peer_in_db(wallet, &detected_node.public_key, address)?;
selected_base_node = Some(detected_node.into());
}
},
}
}
}
// config
let base_node_peers = config
.wallet
.base_node_service_peers
.iter()
.map(|s| SeedPeer::from_str(s))
.map(|r| r.map(Peer::from))
.collect::<Result<Vec<_>, _>>()
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed base node peer: {}", err)))?;
// peer seeds
let peer_seeds = config
.peer_seeds
.peer_seeds
.iter()
.map(|s| SeedPeer::from_str(s))
.map(|r| r.map(Peer::from))
.collect::<Result<Vec<_>, _>>()
.map_err(|err| ExitError::new(ExitCode::ConfigError, format!("Malformed seed peer: {}", err)))?;
let peer_config = PeerConfig::new(selected_base_node, base_node_peers, peer_seeds);
debug!(target: LOG_TARGET, "base node peer config: {:?}", peer_config);
Ok(peer_config)
}
/// Determines which mode the wallet should run in.
pub(crate) fn wallet_mode(cli: &Cli, boot_mode: WalletBoot) -> WalletMode {
// Recovery mode
if matches!(boot_mode, WalletBoot::Recovery) {
if cli.non_interactive_mode {
return WalletMode::RecoveryDaemon;
} else {
return WalletMode::RecoveryTui;
}
}
match (cli.non_interactive_mode, cli.input_file.clone(), cli.command2.clone()) {
// TUI mode
(false, None, None) => WalletMode::Tui,
// GRPC mode
(true, None, None) => WalletMode::Grpc,
// Script mode
(_, Some(path), None) => WalletMode::Script(path),
// Command mode
(_, None, Some(command)) => WalletMode::Command(Box::new(command)), // WalletMode::Command(command),
// Invalid combinations
_ => WalletMode::Invalid,
}
}
/// Set up the app environment and state for use by the UI
#[allow(clippy::too_many_lines)]
pub async fn init_wallet(
config: &ApplicationConfig,
arg_password: SafePassword,
seed_words_file_name: Option<PathBuf>,
recovery_seed: Option<CipherSeed>,
shutdown_signal: ShutdownSignal,
non_interactive_mode: bool,
) -> Result<WalletSqlite, ExitError> {
fs::create_dir_all(
config
.wallet
.db_file
.parent()
.expect("console_wallet_db_file cannot be set to a root directory"),
)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating Wallet folder. {}", e)))?;
fs::create_dir_all(&config.wallet.p2p.datastore_path)
.map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error creating peer db folder. {}", e)))?;
debug!(target: LOG_TARGET, "Running Wallet database migrations");
let db_path = &config.wallet.db_file;
// wallet should be encrypted from the beginning, so we must require a password to be provided by the user
let (wallet_backend, transaction_backend, output_manager_backend, contacts_backend, key_manager_backend) =
initialize_sqlite_database_backends(db_path, arg_password, config.wallet.db_connection_pool_size)?;
let wallet_db = WalletDatabase::new(wallet_backend);
let output_db = OutputManagerDatabase::new(output_manager_backend.clone());
debug!(target: LOG_TARGET, "Databases Initialized. Wallet is encrypted.",);
let node_addresses = if config.wallet.p2p.public_addresses.is_empty() {
match wallet_db.get_node_address()? {
Some(addr) => MultiaddrList::from(vec![addr]),
None => MultiaddrList::default(),
}
} else {
config.wallet.p2p.public_addresses.clone()
};
let master_seed = read_or_create_master_seed(recovery_seed.clone | {
ExitError::new(ExitCode::IncorrectOrEmptyPassword, "Your password was not changed.")
} | conditional_block |
ChatEventCtrl.ts | iliang.huang
* @Date: 2019-03-22 13:32:05
* @Last Modified by: jiangping
* @Last Modified time: 2021-10-13 16:43:36
*/
const { ccclass, property } = cc._decorator;
@ccclass
export default class ChatEventCtrl extends cc.Component {
testFunc(event, param) {
// console.log("testFunc", param)
}
equipClick(event, param) {
let info: icmsg.EquipInfo = JSON.parse(param);
let item: BagItem = {
series: info.equipId,
itemId: info.equipId,
type: BagType.EQUIP,
itemNum: 1,
extInfo: info
};
gdk.panel.setArgs(PanelId.EquipTips, { itemInfo: item, noBtn: true, isOther: true });
gdk.panel.open(PanelId.EquipTips);
}
itemClick(event, param) {
// 找出param参数中的道具id
let id = param.replace(/{(.*)}/, "$1")
id = parseInt(id)
let type = BagUtils.getItemTypeById(id)
let item: BagItem = {
series: id,
itemId: id,
type: type,
itemNum: 1,
extInfo: null
}
GlobalUtil.openItemTips(item, true)
}
heroClick(event, param) {
let id = param.replace(/{(.*)}/, "$1")
id = parseInt(id)
let heroCfg = ConfigManager.getItemById(HeroCfg, id);
gdk.panel.open(PanelId.HeroDetail, (node: cc.Node) => {
let comp = node.getComponent(HeroDetailViewCtrl)
comp.initHeroInfo(heroCfg)
})
}
heroImageClick(event, param) {
let data: icmsg.RoleHeroImageRsp = new icmsg.RoleHeroImageRsp()
data.hero = JSON.parse(param)
data.type = 1
gdk.panel.setArgs(PanelId.MainSetHeroInfoTip, data)
gdk.panel.open(PanelId.MainSetHeroInfoTip);
}
playerClick(event, param) {
// let str: String = param.replace(/{(.*)}/, "$1");
// let arr = str.split(",");
// let newArr: number[] = []
// arr.forEach(e => {
// newArr.push(parseInt(e));
// })
// let id: Uint8Array = new Uint8Array(newArr);
// let msg = new RoleImageReq()
// msg.playerId = parseInt(param);
// NetManager.send(msg)
gdk.panel.setArgs(PanelId.MainSet, parseInt(param))
gdk.panel.open(PanelId.MainSet)
}
joinGuildClick(event, param) {
let roleModel = ModelManager.get(RoleModel)
let joinLv = ConfigManager.getItemById(SystemCfg, 2400).openLv
if (roleModel.level < joinLv) {
gdk.gui.showMessage(`指挥官${joinLv}级才可加入`)
return
}
let guildId = parseInt(param)
let msg = new icmsg.GuildJoinReq()
msg.guildId = guildId
NetManager.send(msg, (data: icmsg.GuildJoinRsp) => {
// //正常加入
// if (data.error == -1) {
// gdk.gui.showMessage("申请成功,等待会长审核")
// } else if (data.error == 0) {
// if (data.guildId && data.camp) {
// gdk.panel.hide(PanelId.Friend)
// gdk.panel.hide(PanelId.Chat)
// gdk.gui.showMessage(`成功加入${data.camp.guild.name}公会`)
// roleModel.guildId = data.guildId
// roleModel.guildName = data.camp.guild.name
// gdk.panel.open(PanelId.GuildMain)
// }
// } else {
// gdk.gui.showMessage(ErrorManager.get(data.error, [data.minLv]))
// }
}, this)
}
/**打开赏金板 */
bountyClick(event, param) {
gdk.panel.hide(PanelId.Chat)
gdk.panel.open(PanelId.BountyList)
}
/**月卡点击 */
monthCardClick(event, param) {
gdk.panel.hide(PanelId.Chat)
let index = parseInt(param)
gdk.panel.setArgs(PanelId.MonthCard, index)
gdk.panel.open(PanelId.MonthCard)
}
vipClick(event, param) {
gdk.panel.hide(PanelId.Chat)
JumpUtils.openRechargeView([2])
}
tqClick(event, param) {
gdk.panel.hide(PanelId.Chat)
JumpUtils.openRechargeView([0])
}
//打开爬塔副本
towerClick() {
if (!JumpUtils.ifSysOpen(705)) {
return;
}
gdk.panel.hide(PanelId.Chat)
gdk.panel.open(PanelId.TowerPanel)
}
dailyRechargeClick() {
if (!JumpUtils.ifSysOpen(2834)) {
return;
}
gdk.panel.hide(PanelId.Chat);
gdk.panel.open(PanelId.DailyFirstRecharge) | gdk.panel.hide(PanelId.HelpTipsPanel);
JumpUtils.openRechargeView([0])
}
scoreSysClick() {
gdk.panel.open(PanelId.ScoreSytemView);
}
adventureClick() {
JumpUtils.openActivityMain([9])
}
shareHeroClick(event, param) {
let msg = new icmsg.ShareInfoReq()
msg.shareId = param
NetManager.send(msg, (data: icmsg.ShareInfoRsp) => {
gdk.panel.open(PanelId.LookHeroView, (node: cc.Node) => {
let model = ModelManager.get(HeroModel)
model.heroImage = data.info
let comp = node.getComponent(LookHeroViewCtrl)
comp.updateHeroInfo()
})
})
}
shareHeroCommentClick(event, param) {
let ids = (param as string).split("@")
gdk.panel.setArgs(PanelId.SubHeroCommentPanel, ids[0], ids[1], ids[2])
gdk.panel.open(PanelId.SubHeroCommentPanel)
}
relicCallGuildATK(event, param) {
if (!JumpUtils.ifSysOpen(2861, true)) {
return;
}
gdk.panel.hide(PanelId.Chat);
let m = ModelManager.get(RelicModel);
m.jumpArgs = param;
gdk.panel.open(PanelId.RelicMainView);
}
relicGoToATK(event, param) {
if (!JumpUtils.ifSysOpen(2861, true)) {
return;
}
gdk.panel.hide(PanelId.Chat);
gdk.panel.hide(PanelId.RelicUnderAtkNoticeView);
let m = ModelManager.get(RelicModel);
m.jumpArgs = param;
gdk.panel.open(PanelId.RelicMainView);
}
joinCooperation(event, param) {
let guildId = parseInt(param)
let footHoldModel = ModelManager.get(FootHoldModel)
let roleModel = ModelManager.get(RoleModel)
if (roleModel.guildId == 0) {
let info: AskInfoType = {
sureCb: () => {
gdk.panel.hide(PanelId.FHCooperationMain)
gdk.panel.setArgs(PanelId.GuildJoin, guildId, false)
gdk.panel.open(PanelId.GuildJoin)
},
closeCb: () => {
gdk.panel.hide(PanelId.FHCooperationMain)
gdk.panel.open(PanelId.GuildList)
},
sureText: "加入该公会",
closeText: "公会列表",
descText: `加入公会后才可参与据点争夺战,推荐先加入公会`,
thisArg: this,
}
GlobalUtil.openAskPanel(info)
return
}
let msg = new icmsg.FootholdCoopApplyAskReq()
msg.guildId = guildId
NetManager.send(msg, (data: icmsg.FootholdCoopApplyAskRsp) => {
if (data.autoJoin) {
footHoldModel.coopGuildId = data.guildId
gdk.gui.showMessage("成功加入协战,请前往据点争夺战战场")
} else {
gdk.gui.showMessage("申请成功,请敬候佳音")
}
}, this)
}
replayBounty(event, param) {
let msg1 = new icmsg.BountyQueryReq()
msg1.missionId = parseInt(param)
NetManager.send(msg1, (data1: icmsg.BountyQueryRsp) => {
let msg2 = new icmsg.BountyFightReplyReq()
msg2.missionId = parseInt(param)
NetManager.send(msg2, (data2: icmsg.BountyFightReplyRsp) => {
gdk.panel.setArgs(PanelId.BountyItemReplay, data1.mission, data2)
gdk.panel.open(PanelId.BountyItemReplay)
})
})
}
goToguildPower(event) {
JumpUtils.openGuildPowerView()
}
| ;
}
goToTQStore() {
| conditional_block |
ChatEventCtrl.ts | weiliang.huang
* @Date: 2019-03-22 13:32:05
* @Last Modified by: jiangping
* @Last Modified time: 2021-10-13 16:43:36
*/
const { ccclass, property } = cc._decorator;
@ccclass
export default class ChatEventCtrl extends cc.Component {
testFunc(event, param) {
// console.log("testFunc", param)
}
equipClick(event, param) {
let info: icmsg.EquipInfo = JSON.parse(param);
let item: BagItem = {
series: info.equipId,
itemId: info.equipId,
type: BagType.EQUIP,
itemNum: 1,
extInfo: info
};
gdk.panel.setArgs(PanelId.EquipTips, { itemInfo: item, noBtn: true, isOther: true });
gdk.panel.open(PanelId.EquipTips);
}
itemClick(event, param) {
// 找出param参数中的道具id
let id = param.replace(/{(.*)}/, "$1")
id = parseInt(id)
let type = BagUtils.getItemTypeById(id)
let item: BagItem = {
series: id,
itemId: id,
type: type,
itemNum: 1,
extInfo: null
}
GlobalUtil.openItemTips(item, true)
}
heroClick(event, param) {
let id = param.replace(/{(.*)}/, "$1")
id = parseInt(id)
let heroCfg = ConfigManager.getItemById(HeroCfg, id);
gdk.panel.open(PanelId.HeroDetail, (node: cc.Node) => {
let comp = node.getComponent(HeroDetailViewCtrl)
comp.initHeroInfo(heroCfg)
})
}
heroImageClick(event, param) {
let data: icmsg.RoleHeroImageRsp = new icmsg.RoleHeroImageRsp()
data.hero = JSON.parse(param)
data.type = 1
gdk.panel.setArgs(PanelId.MainSetHeroInfoTip, data)
gdk.panel.open(PanelId.MainSetHeroInfoTip);
}
playerClick(event, param) {
// let str: String = param.replace(/{(.*)}/, "$1");
// let arr = str.split(",");
// let newArr: number[] = []
// arr.forEach(e => {
// newArr.push(parseInt(e));
// })
// let id: Uint8Array = new Uint8Array(newArr);
// let msg = new RoleImageReq()
// msg.playerId = parseInt(param);
// NetManager.send(msg)
gdk.panel.setArgs(PanelId.MainSet, parseInt(param))
gdk.panel.open(PanelId.MainSet)
}
joinGuildClick(event, param) {
let roleModel = ModelManager.get(RoleModel)
let joinLv = ConfigManager.getItemById(SystemCfg, 2400).openLv
if (roleModel.level < joinLv) {
gdk.gui.showMessage(`指挥官${joinLv}级才可加入`)
return
}
let guildId = parseInt(param)
let msg = new icmsg.GuildJoinReq()
msg.guildId = guildId
NetManager.send(msg, (data: icmsg.GuildJoinRsp) => {
// //正常加入
// if (data.error == -1) {
// gdk.gui.showMessage("申请成功,等待会长审核")
// } else if (data.error == 0) {
// if (data.guildId && data.camp) { | // roleModel.guildId = data.guildId
// roleModel.guildName = data.camp.guild.name
// gdk.panel.open(PanelId.GuildMain)
// }
// } else {
// gdk.gui.showMessage(ErrorManager.get(data.error, [data.minLv]))
// }
}, this)
}
/**打开赏金板 */
bountyClick(event, param) {
gdk.panel.hide(PanelId.Chat)
gdk.panel.open(PanelId.BountyList)
}
/**月卡点击 */
monthCardClick(event, param) {
gdk.panel.hide(PanelId.Chat)
let index = parseInt(param)
gdk.panel.setArgs(PanelId.MonthCard, index)
gdk.panel.open(PanelId.MonthCard)
}
vipClick(event, param) {
gdk.panel.hide(PanelId.Chat)
JumpUtils.openRechargeView([2])
}
tqClick(event, param) {
gdk.panel.hide(PanelId.Chat)
JumpUtils.openRechargeView([0])
}
//打开爬塔副本
towerClick() {
if (!JumpUtils.ifSysOpen(705)) {
return;
}
gdk.panel.hide(PanelId.Chat)
gdk.panel.open(PanelId.TowerPanel)
}
dailyRechargeClick() {
if (!JumpUtils.ifSysOpen(2834)) {
return;
}
gdk.panel.hide(PanelId.Chat);
gdk.panel.open(PanelId.DailyFirstRecharge);
}
goToTQStore() {
gdk.panel.hide(PanelId.HelpTipsPanel);
JumpUtils.openRechargeView([0])
}
scoreSysClick() {
gdk.panel.open(PanelId.ScoreSytemView);
}
adventureClick() {
JumpUtils.openActivityMain([9])
}
shareHeroClick(event, param) {
let msg = new icmsg.ShareInfoReq()
msg.shareId = param
NetManager.send(msg, (data: icmsg.ShareInfoRsp) => {
gdk.panel.open(PanelId.LookHeroView, (node: cc.Node) => {
let model = ModelManager.get(HeroModel)
model.heroImage = data.info
let comp = node.getComponent(LookHeroViewCtrl)
comp.updateHeroInfo()
})
})
}
shareHeroCommentClick(event, param) {
let ids = (param as string).split("@")
gdk.panel.setArgs(PanelId.SubHeroCommentPanel, ids[0], ids[1], ids[2])
gdk.panel.open(PanelId.SubHeroCommentPanel)
}
relicCallGuildATK(event, param) {
if (!JumpUtils.ifSysOpen(2861, true)) {
return;
}
gdk.panel.hide(PanelId.Chat);
let m = ModelManager.get(RelicModel);
m.jumpArgs = param;
gdk.panel.open(PanelId.RelicMainView);
}
relicGoToATK(event, param) {
if (!JumpUtils.ifSysOpen(2861, true)) {
return;
}
gdk.panel.hide(PanelId.Chat);
gdk.panel.hide(PanelId.RelicUnderAtkNoticeView);
let m = ModelManager.get(RelicModel);
m.jumpArgs = param;
gdk.panel.open(PanelId.RelicMainView);
}
joinCooperation(event, param) {
let guildId = parseInt(param)
let footHoldModel = ModelManager.get(FootHoldModel)
let roleModel = ModelManager.get(RoleModel)
if (roleModel.guildId == 0) {
let info: AskInfoType = {
sureCb: () => {
gdk.panel.hide(PanelId.FHCooperationMain)
gdk.panel.setArgs(PanelId.GuildJoin, guildId, false)
gdk.panel.open(PanelId.GuildJoin)
},
closeCb: () => {
gdk.panel.hide(PanelId.FHCooperationMain)
gdk.panel.open(PanelId.GuildList)
},
sureText: "加入该公会",
closeText: "公会列表",
descText: `加入公会后才可参与据点争夺战,推荐先加入公会`,
thisArg: this,
}
GlobalUtil.openAskPanel(info)
return
}
let msg = new icmsg.FootholdCoopApplyAskReq()
msg.guildId = guildId
NetManager.send(msg, (data: icmsg.FootholdCoopApplyAskRsp) => {
if (data.autoJoin) {
footHoldModel.coopGuildId = data.guildId
gdk.gui.showMessage("成功加入协战,请前往据点争夺战战场")
} else {
gdk.gui.showMessage("申请成功,请敬候佳音")
}
}, this)
}
replayBounty(event, param) {
let msg1 = new icmsg.BountyQueryReq()
msg1.missionId = parseInt(param)
NetManager.send(msg1, (data1: icmsg.BountyQueryRsp) => {
let msg2 = new icmsg.BountyFightReplyReq()
msg2.missionId = parseInt(param)
NetManager.send(msg2, (data2: icmsg.BountyFightReplyRsp) => {
gdk.panel.setArgs(PanelId.BountyItemReplay, data1.mission, data2)
gdk.panel.open(PanelId.BountyItemReplay)
})
})
}
goToguildPower(event) {
JumpUtils.openGuildPowerView()
}
goTo | // gdk.panel.hide(PanelId.Friend)
// gdk.panel.hide(PanelId.Chat)
// gdk.gui.showMessage(`成功加入${data.camp.guild.name}公会`) | random_line_split |
ChatEventCtrl.ts | weiliang.huang
* @Date: 2019-03-22 13:32:05
* @Last Modified by: jiangping
* @Last Modified time: 2021-10-13 16:43:36
*/
const { ccclass, property } = cc._decorator;
@ccclass
export default class ChatEventCtrl extends cc.Component {
testFunc(event, param) {
// console.log("testFunc", param)
}
equipClick(event, param) {
let info: icmsg.EquipInfo = JSON.parse(param);
let item: BagItem = {
series: info.equipId,
itemId: info.equipId,
type: BagType.EQUIP,
itemNum: 1,
extInfo: info
};
gdk.panel.setArgs(PanelId.EquipTips, { itemInfo: item, noBtn: true, isOther: true });
gdk.panel.open(PanelId.EquipTips);
}
itemClick(event, param) {
// 找出param参数中的道具id
let id = param.replace(/{(.*)}/, "$1")
id = parseInt(id)
let type = BagUtils.getItemTypeById(id)
let item: BagItem = {
series: id,
itemId: id,
type: type,
itemNum: 1,
extInfo: null
}
GlobalUtil.openItemTips(item, true)
}
heroClick(event, param) {
let id = param.replace(/{(.*)}/, "$1")
id = parseInt(id)
let heroCfg = ConfigManager.getItemById(HeroCfg, id);
gdk.panel.open(PanelId.HeroDetail, (node: cc.Node) => {
let comp = node.getComponent(HeroDetailViewCtrl)
comp.initHeroInfo(heroCfg)
})
}
heroImageClick(event, param) {
let data: icmsg.RoleHeroImageRsp = new icmsg.RoleHeroImageRsp()
data.hero = JSON.parse(param)
data.type = 1
gdk.panel.setArgs(PanelId.MainSetHeroInfoTip, data)
gdk.panel.open(PanelId.MainSetHeroInfoTip);
}
playerClick(event, param) {
// let str: String = param.replace(/{(.*)}/, "$1");
// let arr = str.split(",");
// let newArr: number[] = []
// arr.forEach(e => {
// newArr.push(parseInt(e));
// })
// let id: Uint8Array = new Uint8Array(newArr);
// let msg = new RoleImageReq()
// msg.playerId = parseInt(param);
// NetManager.send(msg)
gdk.panel.setArgs(PanelId.MainSet, parseInt(param))
gdk.panel.open(PanelId.MainSet)
}
joinGuildClick(event, param) {
let roleModel = ModelM | // gdk.panel.open(PanelId.GuildMain)
// }
// } else {
// gdk.gui.showMessage(ErrorManager.get(data.error, [data.minLv]))
// }
}, this)
}
/**打开赏金板 */
bountyClick(event, param) {
gdk.panel.hide(PanelId.Chat)
gdk.panel.open(PanelId.BountyList)
}
/**月卡点击 */
monthCardClick(event, param) {
gdk.panel.hide(PanelId.Chat)
let index = parseInt(param)
gdk.panel.setArgs(PanelId.MonthCard, index)
gdk.panel.open(PanelId.MonthCard)
}
vipClick(event, param) {
gdk.panel.hide(PanelId.Chat)
JumpUtils.openRechargeView([2])
}
tqClick(event, param) {
gdk.panel.hide(PanelId.Chat)
JumpUtils.openRechargeView([0])
}
//打开爬塔副本
towerClick() {
if (!JumpUtils.ifSysOpen(705)) {
return;
}
gdk.panel.hide(PanelId.Chat)
gdk.panel.open(PanelId.TowerPanel)
}
dailyRechargeClick() {
if (!JumpUtils.ifSysOpen(2834)) {
return;
}
gdk.panel.hide(PanelId.Chat);
gdk.panel.open(PanelId.DailyFirstRecharge);
}
goToTQStore() {
gdk.panel.hide(PanelId.HelpTipsPanel);
JumpUtils.openRechargeView([0])
}
scoreSysClick() {
gdk.panel.open(PanelId.ScoreSytemView);
}
adventureClick() {
JumpUtils.openActivityMain([9])
}
shareHeroClick(event, param) {
let msg = new icmsg.ShareInfoReq()
msg.shareId = param
NetManager.send(msg, (data: icmsg.ShareInfoRsp) => {
gdk.panel.open(PanelId.LookHeroView, (node: cc.Node) => {
let model = ModelManager.get(HeroModel)
model.heroImage = data.info
let comp = node.getComponent(LookHeroViewCtrl)
comp.updateHeroInfo()
})
})
}
shareHeroCommentClick(event, param) {
let ids = (param as string).split("@")
gdk.panel.setArgs(PanelId.SubHeroCommentPanel, ids[0], ids[1], ids[2])
gdk.panel.open(PanelId.SubHeroCommentPanel)
}
relicCallGuildATK(event, param) {
if (!JumpUtils.ifSysOpen(2861, true)) {
return;
}
gdk.panel.hide(PanelId.Chat);
let m = ModelManager.get(RelicModel);
m.jumpArgs = param;
gdk.panel.open(PanelId.RelicMainView);
}
relicGoToATK(event, param) {
if (!JumpUtils.ifSysOpen(2861, true)) {
return;
}
gdk.panel.hide(PanelId.Chat);
gdk.panel.hide(PanelId.RelicUnderAtkNoticeView);
let m = ModelManager.get(RelicModel);
m.jumpArgs = param;
gdk.panel.open(PanelId.RelicMainView);
}
joinCooperation(event, param) {
let guildId = parseInt(param)
let footHoldModel = ModelManager.get(FootHoldModel)
let roleModel = ModelManager.get(RoleModel)
if (roleModel.guildId == 0) {
let info: AskInfoType = {
sureCb: () => {
gdk.panel.hide(PanelId.FHCooperationMain)
gdk.panel.setArgs(PanelId.GuildJoin, guildId, false)
gdk.panel.open(PanelId.GuildJoin)
},
closeCb: () => {
gdk.panel.hide(PanelId.FHCooperationMain)
gdk.panel.open(PanelId.GuildList)
},
sureText: "加入该公会",
closeText: "公会列表",
descText: `加入公会后才可参与据点争夺战,推荐先加入公会`,
thisArg: this,
}
GlobalUtil.openAskPanel(info)
return
}
let msg = new icmsg.FootholdCoopApplyAskReq()
msg.guildId = guildId
NetManager.send(msg, (data: icmsg.FootholdCoopApplyAskRsp) => {
if (data.autoJoin) {
footHoldModel.coopGuildId = data.guildId
gdk.gui.showMessage("成功加入协战,请前往据点争夺战战场")
} else {
gdk.gui.showMessage("申请成功,请敬候佳音")
}
}, this)
}
replayBounty(event, param) {
let msg1 = new icmsg.BountyQueryReq()
msg1.missionId = parseInt(param)
NetManager.send(msg1, (data1: icmsg.BountyQueryRsp) => {
let msg2 = new icmsg.BountyFightReplyReq()
msg2.missionId = parseInt(param)
NetManager.send(msg2, (data2: icmsg.BountyFightReplyRsp) => {
gdk.panel.setArgs(PanelId.BountyItemReplay, data1.mission, data2)
gdk.panel.open(PanelId.BountyItemReplay)
})
})
}
goToguildPower(event) {
JumpUtils.openGuildPowerView()
}
| anager.get(RoleModel)
let joinLv = ConfigManager.getItemById(SystemCfg, 2400).openLv
if (roleModel.level < joinLv) {
gdk.gui.showMessage(`指挥官${joinLv}级才可加入`)
return
}
let guildId = parseInt(param)
let msg = new icmsg.GuildJoinReq()
msg.guildId = guildId
NetManager.send(msg, (data: icmsg.GuildJoinRsp) => {
// //正常加入
// if (data.error == -1) {
// gdk.gui.showMessage("申请成功,等待会长审核")
// } else if (data.error == 0) {
// if (data.guildId && data.camp) {
// gdk.panel.hide(PanelId.Friend)
// gdk.panel.hide(PanelId.Chat)
// gdk.gui.showMessage(`成功加入${data.camp.guild.name}公会`)
// roleModel.guildId = data.guildId
// roleModel.guildName = data.camp.guild.name | identifier_body |
ChatEventCtrl.ts | iliang.huang
* @Date: 2019-03-22 13:32:05
* @Last Modified by: jiangping
* @Last Modified time: 2021-10-13 16:43:36
*/
const { ccclass, property } = cc._decorator;
@ccclass
export default class ChatEventCtrl extends cc.Component {
testFunc(event, param) {
// console.log("testFunc", param)
}
equipClick(event, param) {
let info: icmsg.EquipInfo = JSON.parse(param);
let item: BagItem = {
series: info.equipId,
itemId: info.equipId,
type: BagType.EQUIP,
itemNum: 1,
extInfo: info
};
gdk.panel.setArgs(PanelId.EquipTips, { itemInfo: item, noBtn: true, isOther: true });
gdk.panel.open(PanelId.EquipTips);
}
itemClick(event, param) {
// 找出param参数中的道具id
let id = param.replace(/{(.*)}/, "$1")
id = parseInt(id)
let type = BagUtils.getItemTypeById(id)
let item: BagItem = {
series: id,
itemId: id,
type: type,
itemNum: 1,
extInfo: null
}
GlobalUtil.openItemTips(item, true)
}
heroClick(event, param) {
let id = param.replace(/{(.*)}/, "$1")
id = parseInt(id)
let heroCfg = ConfigManager.getItemById(HeroCfg, id);
gdk.panel.open(PanelId.HeroDetail, (node: cc.Node) => {
let comp = node.getComponent(HeroDetailViewCtrl)
comp.initHeroInfo(heroCfg)
})
}
heroImageClick(event, param) {
let data: icmsg.RoleHeroImageRsp = new icmsg.RoleHeroImageRsp()
data.hero = JSON.parse(param)
data.type = 1
gdk.panel.setArgs(PanelId.MainSetHeroInfoTip, data)
gdk.panel.open(PanelId.MainSetHeroInfoTip);
}
playerClick(event, param) {
// let str: String = param.replace(/{(.*)}/, "$1");
// let arr = str.split(",");
// let newArr: number[] = []
// arr.forEach(e => {
// newArr.push(parseInt(e));
// })
// let id: Uint8Array = new Uint8Array(newArr);
// let msg = new RoleImageReq()
// msg.playerId = parseInt(param);
// NetManager.send(msg)
gdk.panel.setArgs(PanelId.MainSet, parseInt(param))
gdk.panel.open(PanelId.MainSet)
}
joinGuildClick(event, param) {
let roleModel = ModelManager.get(RoleModel)
let joinLv = ConfigManager.getItemById(SystemCfg, 2400).openLv
if (roleModel.level < joinLv) {
gdk.gui.showMessage(`指挥官${joinLv}级才可加入`)
return
}
let guildId = parseInt(param)
let msg = new icmsg.GuildJoinReq()
msg.guildId = guildId
NetManager.send(msg, (data: icmsg.GuildJoinRsp) => {
// //正常加入
// if (data.error == -1) {
// gdk.gui.showMessage("申请成功,等待会长审核")
// } else if (data.error == 0) {
// if (data.guildId && data.camp) {
// gdk.panel.hide(PanelId.Friend)
// gdk.panel.hide(PanelId.Chat)
// gdk.gui.showMessage(`成功加入${data.camp.guild.name}公会`)
// roleModel.guildId = data.guildId
// roleModel.guildName = data.camp.guild.name
// gdk.panel.open(PanelId.GuildMain)
// }
// } else {
// gdk.gui.showMessage(ErrorManager.get(data.error, [data.minLv]))
// }
}, this)
}
/**打开赏金板 */
bountyClick(event, param) {
gdk.panel.hide(PanelId.Chat)
gdk.panel.open(PanelId.BountyList)
}
/**月卡点击 */
monthCardClick(event, param) {
gdk.panel.hide(PanelId.Chat)
let index = parseInt(param)
gdk.panel.setArgs(PanelId.MonthCard, index)
gdk.panel.open(PanelId.MonthCard)
}
vipClick(event, param) {
gdk.panel.hide(PanelId.Chat)
JumpUtils.openRechargeView([2])
}
tqClick(event, param) {
gdk.panel.hide(PanelId.Chat)
JumpUtils.openRechargeView([0])
}
//打开爬塔副本
towerClick() {
if (!JumpUtils.ifSysOpen(705)) {
return;
}
gdk.panel.hide(PanelId.Chat)
gdk.panel.open(PanelId.TowerPanel)
}
dailyRechargeClick() {
if (!JumpUtils.ifSysOpen(2834)) {
return;
}
gdk.panel.hide(PanelId.Chat);
gdk.panel.open(PanelId.DailyFirstRecharge);
}
goToTQStore() {
gdk.panel.hide(PanelId.HelpTipsPanel);
JumpUtils.openRechargeView([0])
}
scoreSysClick() {
gdk.panel.open(PanelId.ScoreSytemView);
}
adventureClick() {
JumpUtils.openActivityMain([9])
}
shareHeroClick(event, param) {
let msg = | eInfoReq()
msg.shareId = param
NetManager.send(msg, (data: icmsg.ShareInfoRsp) => {
gdk.panel.open(PanelId.LookHeroView, (node: cc.Node) => {
let model = ModelManager.get(HeroModel)
model.heroImage = data.info
let comp = node.getComponent(LookHeroViewCtrl)
comp.updateHeroInfo()
})
})
}
shareHeroCommentClick(event, param) {
let ids = (param as string).split("@")
gdk.panel.setArgs(PanelId.SubHeroCommentPanel, ids[0], ids[1], ids[2])
gdk.panel.open(PanelId.SubHeroCommentPanel)
}
relicCallGuildATK(event, param) {
if (!JumpUtils.ifSysOpen(2861, true)) {
return;
}
gdk.panel.hide(PanelId.Chat);
let m = ModelManager.get(RelicModel);
m.jumpArgs = param;
gdk.panel.open(PanelId.RelicMainView);
}
relicGoToATK(event, param) {
if (!JumpUtils.ifSysOpen(2861, true)) {
return;
}
gdk.panel.hide(PanelId.Chat);
gdk.panel.hide(PanelId.RelicUnderAtkNoticeView);
let m = ModelManager.get(RelicModel);
m.jumpArgs = param;
gdk.panel.open(PanelId.RelicMainView);
}
joinCooperation(event, param) {
let guildId = parseInt(param)
let footHoldModel = ModelManager.get(FootHoldModel)
let roleModel = ModelManager.get(RoleModel)
if (roleModel.guildId == 0) {
let info: AskInfoType = {
sureCb: () => {
gdk.panel.hide(PanelId.FHCooperationMain)
gdk.panel.setArgs(PanelId.GuildJoin, guildId, false)
gdk.panel.open(PanelId.GuildJoin)
},
closeCb: () => {
gdk.panel.hide(PanelId.FHCooperationMain)
gdk.panel.open(PanelId.GuildList)
},
sureText: "加入该公会",
closeText: "公会列表",
descText: `加入公会后才可参与据点争夺战,推荐先加入公会`,
thisArg: this,
}
GlobalUtil.openAskPanel(info)
return
}
let msg = new icmsg.FootholdCoopApplyAskReq()
msg.guildId = guildId
NetManager.send(msg, (data: icmsg.FootholdCoopApplyAskRsp) => {
if (data.autoJoin) {
footHoldModel.coopGuildId = data.guildId
gdk.gui.showMessage("成功加入协战,请前往据点争夺战战场")
} else {
gdk.gui.showMessage("申请成功,请敬候佳音")
}
}, this)
}
replayBounty(event, param) {
let msg1 = new icmsg.BountyQueryReq()
msg1.missionId = parseInt(param)
NetManager.send(msg1, (data1: icmsg.BountyQueryRsp) => {
let msg2 = new icmsg.BountyFightReplyReq()
msg2.missionId = parseInt(param)
NetManager.send(msg2, (data2: icmsg.BountyFightReplyRsp) => {
gdk.panel.setArgs(PanelId.BountyItemReplay, data1.mission, data2)
gdk.panel.open(PanelId.BountyItemReplay)
})
})
}
goToguildPower(event) {
JumpUtils.openGuildPowerView()
}
| new icmsg.Shar | identifier_name |
goldenFile.go | init() {
// gfc.AddUpdateFlag()
// }
// ...
// gfc.Check(t, "my value test", t.Name(), val)
//
// Then to update the golden files you would invoke the test command as follows:
//
// go test -upd-gf
//
// Similarly with the KeepBadResultsFlag.
//
// Give the -v argument to go test to see what is being updated.
//
// An advantage of using this method (over using the
// testhelper.CheckAgainstGoldenFile function) is that this will show the
// name of the flag to use in order to update the files. You save the hassle
// of scanning the code to find out what you called the flag.
func (gfc GoldenFileCfg) Check(t *testing.T, id, gfName string, val []byte) bool {
t.Helper()
if gfc.UpdFlagName != "" && !gfc.updFlagAdded {
panic(fmt.Errorf(
"the name of the flag to update the golden files has been"+
" given (%q) but the flag has not been added."+
" You should call the AddUpdateFlag() method"+
" (typically in an init() function)",
gfc.UpdFlagName))
}
if gfc.KeepBadResultsFlagName != "" && !gfc.keepBadResultsFlagAdded {
panic(fmt.Errorf(
"the name of the flag to keep bad results has been"+
" given (%q) but the flag has not been added."+
" You should call the AddKeepBadResultsFlag() method"+
" (typically in an init() function)",
gfc.KeepBadResultsFlagName))
}
return gfc.checkFile(t, id, gfc.PathName(gfName), val)
}
// PathName will return the name of a golden file. It applies the directory
// names and any prefix or suffix to the supplied string to give a well-formed
// name using the appropriate filepath separators for the operating system. A
// suggested name to pass to this method might be the name of the current
// test as given by the Name() method on testing.T.
//
// Note that any supplied name is "cleaned" by removing any part prior to an
// embedded filepath.Separator.
func (gfc GoldenFileCfg) PathName(name string) string {
fNameParts := make([]string, 0, 3)
if gfc.Pfx != "" {
fNameParts = append(fNameParts, gfc.Pfx)
}
fNameParts = append(fNameParts, filepath.Base(name))
if gfc.Sfx != "" {
fNameParts = append(fNameParts, gfc.Sfx)
}
fName := strings.Join(fNameParts, ".")
pathParts := make([]string, 0, len(gfc.DirNames)+1)
pathParts = append(pathParts, gfc.DirNames...)
pathParts = append(pathParts, fName)
return filepath.Join(pathParts...)
}
// CheckAgainstGoldenFile confirms that the value given matches the contents
// of the golden file and returns true if it does, false otherwise. It will
// report any errors it finds including any problems reading from or writing
// to the golden file itself. If the updGF flag is set to true then the
// golden file will be updated with the supplied value. You can set this
// value through a command-line parameter to the test and then pass that to
// this function as follows
//
// var upd = flag.Bool("upd-gf", false, "update the golden files")
// gfc := testhelper.GoldenFileCfg{
// DirNames: []string{"testdata"},
// Pfx: "values",
// Sfx: "txt",
// }
// ...
// testhelper.CheckAgainstGoldenFile(t,
// "my value test",
// val,
// gfc.PathName(t.Name()),
// *upd)
//
// Then to update the golden files you would invoke the test command as follows
//
// go test -upd-gf
//
// Give the -v argument to go test to see what is being updated.
//
// Deprecated: use the Check method on the GoldenFileCfg
func CheckAgainstGoldenFile(t *testing.T, testID string, val []byte, gfName string, updGF bool) bool {
t.Helper()
return checkFile(t, testID, gfName, val, updGF)
}
// getExpVal reads the contents of the golden file. If the updGF flag is set
// then if will write the contents of the file before reading it. It returns
// the contents and true if all went well, nil and false otherwise. It will
// report any errors it finds including any problems reading from or writing
// to the golden file itself.
func getExpVal(t *testing.T, id, gfName string, val []byte, updGF bool) ([]byte, bool) {
t.Helper()
if updGF {
if !updateGoldenFile(t, gfName, val) {
return nil, false
}
}
expVal, err := os.ReadFile(gfName) // nolint: gosec
if err != nil {
t.Log(id)
t.Logf("\t: Problem with the golden file: %q", gfName)
t.Errorf("\t: Couldn't read the expected value. Error: %s", err)
return nil, false
}
return expVal, true
}
// checkFile confirms that the value given matches the contents of the golden
// file and returns true if it does, false otherwise. It will report any
// errors it finds including any problems reading from or writing to the
// golden file itself. If the updGF flag is set to true then the golden file
// will be updated with the supplied value.
func checkFile(t *testing.T, id, gfName string, val []byte, updGF bool) bool {
t.Helper()
expVal, ok := getExpVal(t, id, gfName, val, updGF)
if !ok {
t.Errorf("\t: Actual\n" + string(val))
return false
}
return actEqualsExp(t, id, gfName, val, expVal)
}
// checkFile confirms that the value given matches the contents of the golden
// file and returns true if it does, false otherwise. It will report any
// errors it finds including any problems reading from or writing to the
// golden file itself. If the updGF flag is set to true then the golden file
// will be updated with the supplied value.
func (gfc GoldenFileCfg) checkFile(t *testing.T, id, gfName string, val []byte) bool {
t.Helper()
expVal, ok := getExpVal(t, id, gfName, val, gfc.updFlag)
if !ok {
if gfc.UpdFlagName != "" {
t.Errorf("\t: To update the golden file with the new value"+
" pass %q to the go test command", "-"+gfc.UpdFlagName)
}
t.Errorf("\t: Actual\n" + string(val))
return false
}
if actEqualsExp(t, id, gfName, val, expVal) {
return true
}
if gfc.UpdFlagName != "" {
t.Errorf("\t: To update the golden file with the new value"+
" pass %q to the go test command", "-"+gfc.UpdFlagName)
}
if gfc.keepBadResultsFlag {
keepBadResults(t, gfName, val)
} else if gfc.KeepBadResultsFlagName != "" {
t.Errorf("\t: To keep the (bad) Actual results for later"+
" investigation pass %q to the go test command",
"-"+gfc.KeepBadResultsFlagName)
}
return false
}
// actEqualsExp compares the expected value against the actual and reports any
// difference. It will return true if they are equal and false otherwise
func actEqualsExp(t *testing.T, id, gfName string, actVal, expVal []byte) bool {
t.Helper()
if bytes.Equal(actVal, expVal) {
return true
}
t.Log(id)
t.Log("\t: Expected\n" + string(expVal))
t.Log("\t: Actual\n" + string(actVal))
t.Errorf("\t: The value given differs from the golden file value: %q",
gfName)
return false
}
// updateGoldenFile will attempt to update the golden file with the new
// content and return true if it succeeds or false otherwise. If there is an
// existing golden file it will try to preverve the contents so that they can
// be compared with the new file. It reports its progress; if the file hasn't
// changed it does nothing.
func updateGoldenFile(t *testing.T, gfName string, val []byte) bool | {
t.Helper()
origVal, err := os.ReadFile(gfName) // nolint: gosec
if err == nil {
if bytes.Equal(val, origVal) {
return true
}
origFileName := gfName + ".orig"
writeFile(t, origFileName, "original contents", origVal)
} else if !os.IsNotExist(err) {
t.Log("Couldn't preserve the original contents")
t.Logf("\t: Couldn't read the golden file: %q", gfName)
t.Error("\t: ", err)
}
if !writeFile(t, gfName, "golden", val) {
return false
} | identifier_body | |
goldenFile.go | to have a long
// string in the body of a test.
//
// DirNames is a slice of strings holding the parts of the directory path
// to the file
//
// Pfx is an optional prefix - leave it as an empty string to exclude it
//
// Sfx is an optional suffix - as for the prefix
//
// UpdFlagName is the name of a flag that will set a bool used to decide
// whether or not to update the golden file. If it is not set then it is
// ignored. If you have set this then you should also call the AddUpdateFlag
// method (typically in an init() function) and then use the Check method
// to compare with the file
//
// KeepBadResultsFlagName is the name of a flag that will set a bool used
// to decide whether or not to keep bad results. If it is not set then it
// is ignored. If you have set this then you should also call the
// AddKeepBadResultsFlag method (typically in an init() function) and then
// use the Check method to compare with the file
type GoldenFileCfg struct {
DirNames []string
Pfx string
Sfx string
UpdFlagName string
updFlag bool
updFlagAdded bool
KeepBadResultsFlagName string
keepBadResultsFlag bool
keepBadResultsFlagAdded bool
}
// Check confirms that the value given matches the contents of the golden
// file and returns true if it does, false otherwise. It will report any
// errors it finds including any problems reading from or writing to the
// golden file itself.
//
// If UpdFlagName is not empty and the AddUpdateFlag method
// has been called (typically in an init() function) then the corresponding
// flag value will be looked up and if the flag is set to true the golden
// file will be updated with the supplied value. You can set this value
// through a command-line parameter to the test and then pass that to this
// function as follows:
//
// gfc := testhelper.GoldenFileCfg{
// DirNames: []string{"testdata"},
// Pfx: "values",
// Sfx: "txt",
// UpdFlagName: "upd-gf",
// }
//
// func init() {
// gfc.AddUpdateFlag()
// }
// ...
// gfc.Check(t, "my value test", t.Name(), val)
//
// Then to update the golden files you would invoke the test command as follows:
//
// go test -upd-gf
//
// Similarly with the KeepBadResultsFlag.
//
// Give the -v argument to go test to see what is being updated.
//
// An advantage of using this method (over using the
// testhelper.CheckAgainstGoldenFile function) is that this will show the
// name of the flag to use in order to update the files. You save the hassle
// of scanning the code to find out what you called the flag.
func (gfc GoldenFileCfg) Check(t *testing.T, id, gfName string, val []byte) bool {
t.Helper()
if gfc.UpdFlagName != "" && !gfc.updFlagAdded {
panic(fmt.Errorf(
"the name of the flag to update the golden files has been"+
" given (%q) but the flag has not been added."+
" You should call the AddUpdateFlag() method"+
" (typically in an init() function)",
gfc.UpdFlagName))
}
if gfc.KeepBadResultsFlagName != "" && !gfc.keepBadResultsFlagAdded {
panic(fmt.Errorf(
"the name of the flag to keep bad results has been"+
" given (%q) but the flag has not been added."+
" You should call the AddKeepBadResultsFlag() method"+
" (typically in an init() function)",
gfc.KeepBadResultsFlagName))
}
return gfc.checkFile(t, id, gfc.PathName(gfName), val)
}
// PathName will return the name of a golden file. It applies the directory
// names and any prefix or suffix to the supplied string to give a well-formed
// name using the appropriate filepath separators for the operating system. A
// suggested name to pass to this method might be the name of the current
// test as given by the Name() method on testing.T.
//
// Note that any supplied name is "cleaned" by removing any part prior to an
// embedded filepath.Separator.
func (gfc GoldenFileCfg) PathName(name string) string {
fNameParts := make([]string, 0, 3)
if gfc.Pfx != "" {
fNameParts = append(fNameParts, gfc.Pfx)
}
fNameParts = append(fNameParts, filepath.Base(name))
if gfc.Sfx != "" {
fNameParts = append(fNameParts, gfc.Sfx)
}
fName := strings.Join(fNameParts, ".")
pathParts := make([]string, 0, len(gfc.DirNames)+1)
pathParts = append(pathParts, gfc.DirNames...)
pathParts = append(pathParts, fName)
return filepath.Join(pathParts...)
}
// CheckAgainstGoldenFile confirms that the value given matches the contents
// of the golden file and returns true if it does, false otherwise. It will
// report any errors it finds including any problems reading from or writing
// to the golden file itself. If the updGF flag is set to true then the
// golden file will be updated with the supplied value. You can set this
// value through a command-line parameter to the test and then pass that to
// this function as follows
//
// var upd = flag.Bool("upd-gf", false, "update the golden files")
// gfc := testhelper.GoldenFileCfg{
// DirNames: []string{"testdata"},
// Pfx: "values",
// Sfx: "txt",
// }
// ...
// testhelper.CheckAgainstGoldenFile(t,
// "my value test",
// val,
// gfc.PathName(t.Name()),
// *upd)
//
// Then to update the golden files you would invoke the test command as follows
//
// go test -upd-gf
//
// Give the -v argument to go test to see what is being updated.
//
// Deprecated: use the Check method on the GoldenFileCfg
func CheckAgainstGoldenFile(t *testing.T, testID string, val []byte, gfName string, updGF bool) bool {
t.Helper()
return checkFile(t, testID, gfName, val, updGF)
}
// getExpVal reads the contents of the golden file. If the updGF flag is set
// then if will write the contents of the file before reading it. It returns
// the contents and true if all went well, nil and false otherwise. It will
// report any errors it finds including any problems reading from or writing
// to the golden file itself.
func getExpVal(t *testing.T, id, gfName string, val []byte, updGF bool) ([]byte, bool) {
t.Helper()
if updGF {
if !updateGoldenFile(t, gfName, val) {
return nil, false
}
}
expVal, err := os.ReadFile(gfName) // nolint: gosec
if err != nil {
t.Log(id)
t.Logf("\t: Problem with the golden file: %q", gfName)
t.Errorf("\t: Couldn't read the expected value. Error: %s", err)
return nil, false
}
return expVal, true
}
// checkFile confirms that the value given matches the contents of the golden
// file and returns true if it does, false otherwise. It will report any
// errors it finds including any problems reading from or writing to the
// golden file itself. If the updGF flag is set to true then the golden file
// will be updated with the supplied value.
func checkFile(t *testing.T, id, gfName string, val []byte, updGF bool) bool {
t.Helper()
expVal, ok := getExpVal(t, id, gfName, val, updGF)
if !ok {
t.Errorf("\t: Actual\n" + string(val))
return false
}
return actEqualsExp(t, id, gfName, val, expVal)
}
// checkFile confirms that the value given matches the contents of the golden
// file and returns true if it does, false otherwise. It will report any
// errors it finds including any problems reading from or writing to the
// golden file itself. If the updGF flag is set to true then the golden file
// will be updated with the supplied value.
func (gfc GoldenFileCfg) checkFile(t *testing.T, id, gfName string, val []byte) bool {
t.Helper()
expVal, ok := getExpVal(t, id, gfName, val, gfc.updFlag)
if !ok {
if gfc.UpdFlagName != "" {
t.Errorf("\t: To update the golden file with the new value"+
" pass %q to the go test command", "-"+gfc.UpdFlagName)
}
t.Errorf("\t: Actual\n" + string(val))
return false
}
| if actEqualsExp(t, id, gfName, val, expVal) {
return true | random_line_split | |
goldenFile.go | report the flag name to use if any is
// available.
func (gfc *GoldenFileCfg) AddKeepBadResultsFlag() {
if gfc.keepBadResultsFlagAdded {
return
}
gfGlob := gfc.PathName("*")
if gfc.KeepBadResultsFlagName == "" {
panic(errors.New(
"AddKeepBadResultsFlag has been called for files in " + gfGlob +
" but the GoldenFileCfg has no flag name set"))
}
flag.BoolVar(&gfc.keepBadResultsFlag, gfc.KeepBadResultsFlagName, false,
"set this flag to keep bad results in"+gfGlob)
gfc.keepBadResultsFlagAdded = true
}
// GoldenFileCfg holds common configuration details for a collection of
// golden files. It helps with consistent naming of golden files without
// having to repeat common parts throughout the code.
//
// A golden file is a file that holds expected output (typically lengthy)
// that can be compared as part of a test. It avoids the need to have a long
// string in the body of a test.
//
// DirNames is a slice of strings holding the parts of the directory path
// to the file
//
// Pfx is an optional prefix - leave it as an empty string to exclude it
//
// Sfx is an optional suffix - as for the prefix
//
// UpdFlagName is the name of a flag that will set a bool used to decide
// whether or not to update the golden file. If it is not set then it is
// ignored. If you have set this then you should also call the AddUpdateFlag
// method (typically in an init() function) and then use the Check method
// to compare with the file
//
// KeepBadResultsFlagName is the name of a flag that will set a bool used
// to decide whether or not to keep bad results. If it is not set then it
// is ignored. If you have set this then you should also call the
// AddKeepBadResultsFlag method (typically in an init() function) and then
// use the Check method to compare with the file
type GoldenFileCfg struct {
DirNames []string
Pfx string
Sfx string
UpdFlagName string
updFlag bool
updFlagAdded bool
KeepBadResultsFlagName string
keepBadResultsFlag bool
keepBadResultsFlagAdded bool
}
// Check confirms that the value given matches the contents of the golden
// file and returns true if it does, false otherwise. It will report any
// errors it finds including any problems reading from or writing to the
// golden file itself.
//
// If UpdFlagName is not empty and the AddUpdateFlag method
// has been called (typically in an init() function) then the corresponding
// flag value will be looked up and if the flag is set to true the golden
// file will be updated with the supplied value. You can set this value
// through a command-line parameter to the test and then pass that to this
// function as follows:
//
// gfc := testhelper.GoldenFileCfg{
// DirNames: []string{"testdata"},
// Pfx: "values",
// Sfx: "txt",
// UpdFlagName: "upd-gf",
// }
//
// func init() {
// gfc.AddUpdateFlag()
// }
// ...
// gfc.Check(t, "my value test", t.Name(), val)
//
// Then to update the golden files you would invoke the test command as follows:
//
// go test -upd-gf
//
// Similarly with the KeepBadResultsFlag.
//
// Give the -v argument to go test to see what is being updated.
//
// An advantage of using this method (over using the
// testhelper.CheckAgainstGoldenFile function) is that this will show the
// name of the flag to use in order to update the files. You save the hassle
// of scanning the code to find out what you called the flag.
func (gfc GoldenFileCfg) Check(t *testing.T, id, gfName string, val []byte) bool {
t.Helper()
if gfc.UpdFlagName != "" && !gfc.updFlagAdded {
panic(fmt.Errorf(
"the name of the flag to update the golden files has been"+
" given (%q) but the flag has not been added."+
" You should call the AddUpdateFlag() method"+
" (typically in an init() function)",
gfc.UpdFlagName))
}
if gfc.KeepBadResultsFlagName != "" && !gfc.keepBadResultsFlagAdded {
panic(fmt.Errorf(
"the name of the flag to keep bad results has been"+
" given (%q) but the flag has not been added."+
" You should call the AddKeepBadResultsFlag() method"+
" (typically in an init() function)",
gfc.KeepBadResultsFlagName))
}
return gfc.checkFile(t, id, gfc.PathName(gfName), val)
}
// PathName will return the name of a golden file. It applies the directory
// names and any prefix or suffix to the supplied string to give a well-formed
// name using the appropriate filepath separators for the operating system. A
// suggested name to pass to this method might be the name of the current
// test as given by the Name() method on testing.T.
//
// Note that any supplied name is "cleaned" by removing any part prior to an
// embedded filepath.Separator.
func (gfc GoldenFileCfg) PathName(name string) string {
fNameParts := make([]string, 0, 3)
if gfc.Pfx != "" {
fNameParts = append(fNameParts, gfc.Pfx)
}
fNameParts = append(fNameParts, filepath.Base(name))
if gfc.Sfx != "" {
fNameParts = append(fNameParts, gfc.Sfx)
}
fName := strings.Join(fNameParts, ".")
pathParts := make([]string, 0, len(gfc.DirNames)+1)
pathParts = append(pathParts, gfc.DirNames...)
pathParts = append(pathParts, fName)
return filepath.Join(pathParts...)
}
// CheckAgainstGoldenFile confirms that the value given matches the contents
// of the golden file and returns true if it does, false otherwise. It will
// report any errors it finds including any problems reading from or writing
// to the golden file itself. If the updGF flag is set to true then the
// golden file will be updated with the supplied value. You can set this
// value through a command-line parameter to the test and then pass that to
// this function as follows
//
// var upd = flag.Bool("upd-gf", false, "update the golden files")
// gfc := testhelper.GoldenFileCfg{
// DirNames: []string{"testdata"},
// Pfx: "values",
// Sfx: "txt",
// }
// ...
// testhelper.CheckAgainstGoldenFile(t,
// "my value test",
// val,
// gfc.PathName(t.Name()),
// *upd)
//
// Then to update the golden files you would invoke the test command as follows
//
// go test -upd-gf
//
// Give the -v argument to go test to see what is being updated.
//
// Deprecated: use the Check method on the GoldenFileCfg
func | (t *testing.T, testID string, val []byte, gfName string, updGF bool) bool {
t.Helper()
return checkFile(t, testID, gfName, val, updGF)
}
// getExpVal reads the contents of the golden file. If the updGF flag is set
// then if will write the contents of the file before reading it. It returns
// the contents and true if all went well, nil and false otherwise. It will
// report any errors it finds including any problems reading from or writing
// to the golden file itself.
func getExpVal(t *testing.T, id, gfName string, val []byte, updGF bool) ([]byte, bool) {
t.Helper()
if updGF {
if !updateGoldenFile(t, gfName, val) {
return nil, false
}
}
expVal, err := os.ReadFile(gfName) // nolint: gosec
if err != nil {
t.Log(id)
t.Logf("\t: Problem with the golden file: %q", gfName)
t.Errorf("\t: Couldn't read the expected value. Error: %s", err)
return nil, false
}
return expVal, true
}
// checkFile confirms that the value given matches the contents of the golden
// file and returns true if it does, false otherwise. It will report any
// errors it finds including any problems reading from or writing to the
// golden file itself. If the updGF flag is set to true then the golden file
// will be updated with the supplied value.
func checkFile(t *testing.T, id, gfName string, val []byte, updGF bool) bool {
t.Helper()
expVal, ok := getExpVal(t, id, gfName, val, updGF)
if !ok {
t.Errorf("\t: Actual\n" + string(val))
return false
}
return actEqualsExp(t, id, gfName, val, | CheckAgainstGoldenFile | identifier_name |
goldenFile.go | report the flag name to use if any is
// available.
func (gfc *GoldenFileCfg) AddKeepBadResultsFlag() {
if gfc.keepBadResultsFlagAdded {
return
}
gfGlob := gfc.PathName("*")
if gfc.KeepBadResultsFlagName == "" {
panic(errors.New(
"AddKeepBadResultsFlag has been called for files in " + gfGlob +
" but the GoldenFileCfg has no flag name set"))
}
flag.BoolVar(&gfc.keepBadResultsFlag, gfc.KeepBadResultsFlagName, false,
"set this flag to keep bad results in"+gfGlob)
gfc.keepBadResultsFlagAdded = true
}
// GoldenFileCfg holds common configuration details for a collection of
// golden files. It helps with consistent naming of golden files without
// having to repeat common parts throughout the code.
//
// A golden file is a file that holds expected output (typically lengthy)
// that can be compared as part of a test. It avoids the need to have a long
// string in the body of a test.
//
// DirNames is a slice of strings holding the parts of the directory path
// to the file
//
// Pfx is an optional prefix - leave it as an empty string to exclude it
//
// Sfx is an optional suffix - as for the prefix
//
// UpdFlagName is the name of a flag that will set a bool used to decide
// whether or not to update the golden file. If it is not set then it is
// ignored. If you have set this then you should also call the AddUpdateFlag
// method (typically in an init() function) and then use the Check method
// to compare with the file
//
// KeepBadResultsFlagName is the name of a flag that will set a bool used
// to decide whether or not to keep bad results. If it is not set then it
// is ignored. If you have set this then you should also call the
// AddKeepBadResultsFlag method (typically in an init() function) and then
// use the Check method to compare with the file
type GoldenFileCfg struct {
DirNames []string
Pfx string
Sfx string
UpdFlagName string
updFlag bool
updFlagAdded bool
KeepBadResultsFlagName string
keepBadResultsFlag bool
keepBadResultsFlagAdded bool
}
// Check confirms that the value given matches the contents of the golden
// file and returns true if it does, false otherwise. It will report any
// errors it finds including any problems reading from or writing to the
// golden file itself.
//
// If UpdFlagName is not empty and the AddUpdateFlag method
// has been called (typically in an init() function) then the corresponding
// flag value will be looked up and if the flag is set to true the golden
// file will be updated with the supplied value. You can set this value
// through a command-line parameter to the test and then pass that to this
// function as follows:
//
// gfc := testhelper.GoldenFileCfg{
// DirNames: []string{"testdata"},
// Pfx: "values",
// Sfx: "txt",
// UpdFlagName: "upd-gf",
// }
//
// func init() {
// gfc.AddUpdateFlag()
// }
// ...
// gfc.Check(t, "my value test", t.Name(), val)
//
// Then to update the golden files you would invoke the test command as follows:
//
// go test -upd-gf
//
// Similarly with the KeepBadResultsFlag.
//
// Give the -v argument to go test to see what is being updated.
//
// An advantage of using this method (over using the
// testhelper.CheckAgainstGoldenFile function) is that this will show the
// name of the flag to use in order to update the files. You save the hassle
// of scanning the code to find out what you called the flag.
func (gfc GoldenFileCfg) Check(t *testing.T, id, gfName string, val []byte) bool {
t.Helper()
if gfc.UpdFlagName != "" && !gfc.updFlagAdded {
panic(fmt.Errorf(
"the name of the flag to update the golden files has been"+
" given (%q) but the flag has not been added."+
" You should call the AddUpdateFlag() method"+
" (typically in an init() function)",
gfc.UpdFlagName))
}
if gfc.KeepBadResultsFlagName != "" && !gfc.keepBadResultsFlagAdded {
panic(fmt.Errorf(
"the name of the flag to keep bad results has been"+
" given (%q) but the flag has not been added."+
" You should call the AddKeepBadResultsFlag() method"+
" (typically in an init() function)",
gfc.KeepBadResultsFlagName))
}
return gfc.checkFile(t, id, gfc.PathName(gfName), val)
}
// PathName will return the name of a golden file. It applies the directory
// names and any prefix or suffix to the supplied string to give a well-formed
// name using the appropriate filepath separators for the operating system. A
// suggested name to pass to this method might be the name of the current
// test as given by the Name() method on testing.T.
//
// Note that any supplied name is "cleaned" by removing any part prior to an
// embedded filepath.Separator.
func (gfc GoldenFileCfg) PathName(name string) string {
fNameParts := make([]string, 0, 3)
if gfc.Pfx != "" {
fNameParts = append(fNameParts, gfc.Pfx)
}
fNameParts = append(fNameParts, filepath.Base(name))
if gfc.Sfx != "" {
fNameParts = append(fNameParts, gfc.Sfx)
}
fName := strings.Join(fNameParts, ".")
pathParts := make([]string, 0, len(gfc.DirNames)+1)
pathParts = append(pathParts, gfc.DirNames...)
pathParts = append(pathParts, fName)
return filepath.Join(pathParts...)
}
// CheckAgainstGoldenFile confirms that the value given matches the contents
// of the golden file and returns true if it does, false otherwise. It will
// report any errors it finds including any problems reading from or writing
// to the golden file itself. If the updGF flag is set to true then the
// golden file will be updated with the supplied value. You can set this
// value through a command-line parameter to the test and then pass that to
// this function as follows
//
// var upd = flag.Bool("upd-gf", false, "update the golden files")
// gfc := testhelper.GoldenFileCfg{
// DirNames: []string{"testdata"},
// Pfx: "values",
// Sfx: "txt",
// }
// ...
// testhelper.CheckAgainstGoldenFile(t,
// "my value test",
// val,
// gfc.PathName(t.Name()),
// *upd)
//
// Then to update the golden files you would invoke the test command as follows
//
// go test -upd-gf
//
// Give the -v argument to go test to see what is being updated.
//
// Deprecated: use the Check method on the GoldenFileCfg
func CheckAgainstGoldenFile(t *testing.T, testID string, val []byte, gfName string, updGF bool) bool {
t.Helper()
return checkFile(t, testID, gfName, val, updGF)
}
// getExpVal reads the contents of the golden file. If the updGF flag is set
// then if will write the contents of the file before reading it. It returns
// the contents and true if all went well, nil and false otherwise. It will
// report any errors it finds including any problems reading from or writing
// to the golden file itself.
func getExpVal(t *testing.T, id, gfName string, val []byte, updGF bool) ([]byte, bool) {
t.Helper()
if updGF {
if !updateGoldenFile(t, gfName, val) {
return nil, false
}
}
expVal, err := os.ReadFile(gfName) // nolint: gosec
if err != nil |
return expVal, true
}
// checkFile confirms that the value given matches the contents of the golden
// file and returns true if it does, false otherwise. It will report any
// errors it finds including any problems reading from or writing to the
// golden file itself. If the updGF flag is set to true then the golden file
// will be updated with the supplied value.
func checkFile(t *testing.T, id, gfName string, val []byte, updGF bool) bool {
t.Helper()
expVal, ok := getExpVal(t, id, gfName, val, updGF)
if !ok {
t.Errorf("\t: Actual\n" + string(val))
return false
}
return actEqualsExp(t, id, gfName, val | {
t.Log(id)
t.Logf("\t: Problem with the golden file: %q", gfName)
t.Errorf("\t: Couldn't read the expected value. Error: %s", err)
return nil, false
} | conditional_block |
record_io.py | .FLAGS
try:
FLAGS.__delattr__('output_path')
FLAGS.__delattr__('f')
except:
pass
tf.app.flags.DEFINE_string('f', '', 'kernel')
flags.DEFINE_string('output_path', path, '')
FLAGS = flags.FLAGS
print("New record file : {}".format(flags.FLAGS.output_path))
return tf.app.flags.FLAGS.output_path
def create_record_file(image_path, output_path, examples_dict, class_to_index):
output_path = _create_record_file_path(output_path)
writer = tf.python_io.TFRecordWriter(output_path)
for key, val in examples_dict.items():
example = val
example["filename"] = key
tf_example = create_tf_example(example, image_path, class_to_index)
writer.write(tf_example.SerializeToString())
writer.close()
print("Wrote {} examples".format(len(examples_dict)))
def create_tf_example(example, path, class_mapping):
"""
Create a single Tensorflow Example object to be used in creating record
Parameters
----------
example : dict
A single object; the dictionary should contains the keys "filename" referring to the jpg containing
the object, and "box_coords" which gives the location of the object, and "class" the name of the object
path : str
The path to the image files.
Returns
-------
The tf Example object
"""
path = (path + os.sep).encode('ascii')
filename = example['filename'].encode('ascii')
image_format = b'jpg'
image = plt.imread(path +filename, "jpg")
height, width = image.shape[:2]
# Encode the jpg to byte form
with tf.gfile.GFile(path+filename, 'rb') as fid:
encoded_jpg = bytes(fid.read())
# normalize the box coordinates
xmins = [box[0]/width for box in example['box_coords']]
ymins = [box[1]/height for box in example['box_coords']]
xmaxs = [box[2]/width for box in example['box_coords']]
ymaxs = [box[3]/height for box in example['box_coords']]
classes_text = [cls.encode('ascii') for cls in example["class"]]
classes = [class_mapping[cls] for cls in example["class"]]
# create the example
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height' : dataset_util.int64_feature(height),
'image/width' : dataset_util.int64_feature(width),
'image/filename' : dataset_util.bytes_feature(filename),
'image/source_id' : dataset_util.bytes_feature(filename),
'image/encoded' : dataset_util.bytes_feature(encoded_jpg),
'image/format' : dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin' : dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax' : dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin' : dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax' : dataset_util.float_list_feature(ymaxs),
'image/object/class/text' : dataset_util.bytes_list_feature(classes_text),
'image/object/class/label' : dataset_util.int64_list_feature(classes),
}))
return tf_example
# ---------------------------------------------------------------------------------------------------------------------
# Diagnostics
# ---------------------------------------------------------------------------------------------------------------------
def peek_in_record(path, plot=True):
| height = result.context.feature['image/height'].int64_list.value[0]
xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value)
ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value)
xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value)
ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value)
xmins *= width
xmaxs *= width
ymins *= height
ymaxs *= height
img_shapes += [[height, width]]
for xmin, ymin, xmax, ymax, name in zip(xmins, ymins, xmaxs, ymaxs, names):
obj_shapes += [[ymax-ymin, xmax-xmin]]
if (ymin < 0) or (ymax > height) or (xmin < 0) or (xmax > width):
print("WARNING : Object {} outisde of image region".format(name))
total_objects = sum(objects.values())
obj_shapes = np.array(obj_shapes)
img_shapes = np.array(img_shapes)
print("="*100)
print("Total Images : {0}".format(total_images))
print("Total Objects : {0}".format(total_objects))
print("Ave. Objects per Image : {0}".format(total_objects/total_images))
print("Classes : {0}".format(len(objects)))
print("="*100)
if plot:
fig, axes = plt.subplots(2,2,figsize=(12,12))
ax = axes[1,0]
ax.scatter(obj_shapes[:,1], obj_shapes[:,0])
ax.set_ylim([0.9*obj_shapes[:,0].min(), 1.1*obj_shapes[:,0].max()])
ax.set_xlim([0.9*obj_shapes[:,1].min(), 1.1*obj_shapes[:,1].max()])
ax.set_xlabel("Width", fontsize=18)
ax.set_ylabel("Height", fontsize=18)
ax.set_title("Object Shapes", fontsize=18)
ax = axes[1,1]
ax.scatter(img_shapes[:,1], img_shapes[:,0])
ax.set_ylim([0.9*img_shapes[:,0].min(), 1.1*img_shapes[:,0].max()])
ax.set_xlim([0.9*img_shapes[:,1].min(), 1.1*img_shapes[:,1].max()])
ax.set_xlabel("Width", fontsize=18)
ax.set_ylabel("Height", fontsize=18)
ax.set_title("Image Shapes", fontsize=18)
ax = axes[0,1]
ax.hist(obj_per_img, bins = np.arange(0.5,max(obj_per_img)+1.5,1),density=1.0)
ax.set_xlabel("Objects per image", fontsize=18)
ax = axes[0,0]
labels = [x.decode() for x in objects.keys()]
sizes = list(objects.values())
ax.pie(sizes, labels=labels, autopct='%1.1f%%',
shadow=False, startangle=90)
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
# ---------------------------------------------------------------------------------------------------------------------
# Reading Record Files
# ---------------------------------------------------------------------------------------------------------------------
def load_tf_record_file(path):
for example in tf.python_io.tf_record_iterator(path):
yield tf.train.SequenceExample.FromString(example)
def read_record_file(path, index_to_class, return_dict=True, plot=True, **plot_kwargs):
record = dict()
for result in load_tf_record_file(path):
fname = str(result.context.feature['image/filename'].bytes_list.value[0], "utf-8")
width = result.context.feature['image/width'].int64_list.value[0]
height = result.context.feature['image/height'].int64_list.value[0]
data = result.context.feature['image/encoded'].bytes_list.value[0]
img = Image.open(io.BytesIO(data), mode="r")
img = np.asarray(img)
xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value)
ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value)
xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value)
ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value)
xmins *= width
xmaxs *= width
ymins *= height
ymaxs *= height
labels = np.array(result.context.feature['image/object/class/label'].int64_list.value, dtype=int)
if return_dict:
record[fname] = dict()
record[fname]["width"] = width
record[fname]["height"] = height
record[fname]["image"] = img
record[fname]["xmins"] = xmins
record[fname]["xmaxs"] = xmaxs
record[fname]["ymins"] = ymins
record[fname]["ymaxs"] = ymaxs
if plot:
fig, ax = plt.subplots(1,1,figsize=plot_kwargs.get | objects = dict()
obj_per_img = []
obj_shapes = []
img_shapes = []
total_images = 0
for result in load_tf_record_file(path):
total_images += 1
names = result.context.feature['image/object/class/text'].bytes_list.value
for name in names:
if name not in objects:
objects[name] = 1
else:
objects[name] += 1
obj_per_img += [len(names)]
width = result.context.feature['image/width'].int64_list.value[0] | identifier_body |
record_io.py | .FLAGS
try:
FLAGS.__delattr__('output_path')
FLAGS.__delattr__('f')
except:
pass
tf.app.flags.DEFINE_string('f', '', 'kernel')
flags.DEFINE_string('output_path', path, '')
FLAGS = flags.FLAGS
print("New record file : {}".format(flags.FLAGS.output_path))
return tf.app.flags.FLAGS.output_path
def create_record_file(image_path, output_path, examples_dict, class_to_index):
output_path = _create_record_file_path(output_path)
writer = tf.python_io.TFRecordWriter(output_path)
for key, val in examples_dict.items():
example = val
example["filename"] = key
tf_example = create_tf_example(example, image_path, class_to_index)
writer.write(tf_example.SerializeToString())
writer.close()
print("Wrote {} examples".format(len(examples_dict)))
def create_tf_example(example, path, class_mapping):
"""
Create a single Tensorflow Example object to be used in creating record
Parameters
----------
example : dict
A single object; the dictionary should contains the keys "filename" referring to the jpg containing
the object, and "box_coords" which gives the location of the object, and "class" the name of the object
path : str
The path to the image files.
Returns
-------
The tf Example object
"""
path = (path + os.sep).encode('ascii')
filename = example['filename'].encode('ascii')
image_format = b'jpg'
image = plt.imread(path +filename, "jpg")
height, width = image.shape[:2]
# Encode the jpg to byte form
with tf.gfile.GFile(path+filename, 'rb') as fid:
encoded_jpg = bytes(fid.read())
# normalize the box coordinates
xmins = [box[0]/width for box in example['box_coords']]
ymins = [box[1]/height for box in example['box_coords']]
xmaxs = [box[2]/width for box in example['box_coords']]
ymaxs = [box[3]/height for box in example['box_coords']]
classes_text = [cls.encode('ascii') for cls in example["class"]]
classes = [class_mapping[cls] for cls in example["class"]]
# create the example
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height' : dataset_util.int64_feature(height),
'image/width' : dataset_util.int64_feature(width),
'image/filename' : dataset_util.bytes_feature(filename),
'image/source_id' : dataset_util.bytes_feature(filename),
'image/encoded' : dataset_util.bytes_feature(encoded_jpg),
'image/format' : dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin' : dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax' : dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin' : dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax' : dataset_util.float_list_feature(ymaxs),
'image/object/class/text' : dataset_util.bytes_list_feature(classes_text),
'image/object/class/label' : dataset_util.int64_list_feature(classes),
}))
return tf_example
# ---------------------------------------------------------------------------------------------------------------------
# Diagnostics
# ---------------------------------------------------------------------------------------------------------------------
def peek_in_record(path, plot=True):
objects = dict()
obj_per_img = []
obj_shapes = []
img_shapes = []
total_images = 0
for result in load_tf_record_file(path):
total_images += 1
names = result.context.feature['image/object/class/text'].bytes_list.value
for name in names:
if name not in objects:
objects[name] = 1
else:
objects[name] += 1
obj_per_img += [len(names)]
width = result.context.feature['image/width'].int64_list.value[0]
height = result.context.feature['image/height'].int64_list.value[0]
xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value)
ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value)
xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value)
ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value)
xmins *= width
xmaxs *= width
ymins *= height
ymaxs *= height
img_shapes += [[height, width]]
for xmin, ymin, xmax, ymax, name in zip(xmins, ymins, xmaxs, ymaxs, names):
obj_shapes += [[ymax-ymin, xmax-xmin]]
if (ymin < 0) or (ymax > height) or (xmin < 0) or (xmax > width):
print("WARNING : Object {} outisde of image region".format(name))
total_objects = sum(objects.values())
obj_shapes = np.array(obj_shapes)
img_shapes = np.array(img_shapes)
print("="*100)
print("Total Images : {0}".format(total_images))
print("Total Objects : {0}".format(total_objects))
print("Ave. Objects per Image : {0}".format(total_objects/total_images))
print("Classes : {0}".format(len(objects)))
print("="*100)
if plot:
fig, axes = plt.subplots(2,2,figsize=(12,12))
ax = axes[1,0]
ax.scatter(obj_shapes[:,1], obj_shapes[:,0])
ax.set_ylim([0.9*obj_shapes[:,0].min(), 1.1*obj_shapes[:,0].max()])
ax.set_xlim([0.9*obj_shapes[:,1].min(), 1.1*obj_shapes[:,1].max()])
ax.set_xlabel("Width", fontsize=18)
ax.set_ylabel("Height", fontsize=18)
ax.set_title("Object Shapes", fontsize=18)
ax = axes[1,1]
ax.scatter(img_shapes[:,1], img_shapes[:,0])
ax.set_ylim([0.9*img_shapes[:,0].min(), 1.1*img_shapes[:,0].max()])
ax.set_xlim([0.9*img_shapes[:,1].min(), 1.1*img_shapes[:,1].max()])
ax.set_xlabel("Width", fontsize=18)
ax.set_ylabel("Height", fontsize=18)
ax.set_title("Image Shapes", fontsize=18)
ax = axes[0,1]
ax.hist(obj_per_img, bins = np.arange(0.5,max(obj_per_img)+1.5,1),density=1.0)
ax.set_xlabel("Objects per image", fontsize=18)
ax = axes[0,0]
labels = [x.decode() for x in objects.keys()]
sizes = list(objects.values())
ax.pie(sizes, labels=labels, autopct='%1.1f%%',
shadow=False, startangle=90)
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
# ---------------------------------------------------------------------------------------------------------------------
# Reading Record Files
# ---------------------------------------------------------------------------------------------------------------------
def load_tf_record_file(path):
for example in tf.python_io.tf_record_iterator(path):
yield tf.train.SequenceExample.FromString(example)
def | (path, index_to_class, return_dict=True, plot=True, **plot_kwargs):
record = dict()
for result in load_tf_record_file(path):
fname = str(result.context.feature['image/filename'].bytes_list.value[0], "utf-8")
width = result.context.feature['image/width'].int64_list.value[0]
height = result.context.feature['image/height'].int64_list.value[0]
data = result.context.feature['image/encoded'].bytes_list.value[0]
img = Image.open(io.BytesIO(data), mode="r")
img = np.asarray(img)
xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value)
ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value)
xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value)
ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value)
xmins *= width
xmaxs *= width
ymins *= height
ymaxs *= height
labels = np.array(result.context.feature['image/object/class/label'].int64_list.value, dtype=int)
if return_dict:
record[fname] = dict()
record[fname]["width"] = width
record[fname]["height"] = height
record[fname]["image"] = img
record[fname]["xmins"] = xmins
record[fname]["xmaxs"] = xmaxs
record[fname]["ymins"] = ymins
record[fname]["ymaxs"] = ymaxs
if plot:
fig, ax = plt.subplots(1,1,figsize=plot_kwargs | read_record_file | identifier_name |
record_io.py | .FLAGS
try:
FLAGS.__delattr__('output_path')
FLAGS.__delattr__('f')
except:
pass
tf.app.flags.DEFINE_string('f', '', 'kernel')
flags.DEFINE_string('output_path', path, '')
FLAGS = flags.FLAGS
print("New record file : {}".format(flags.FLAGS.output_path))
return tf.app.flags.FLAGS.output_path
def create_record_file(image_path, output_path, examples_dict, class_to_index):
output_path = _create_record_file_path(output_path)
writer = tf.python_io.TFRecordWriter(output_path)
for key, val in examples_dict.items():
example = val
example["filename"] = key
tf_example = create_tf_example(example, image_path, class_to_index)
writer.write(tf_example.SerializeToString())
writer.close()
print("Wrote {} examples".format(len(examples_dict)))
def create_tf_example(example, path, class_mapping):
"""
Create a single Tensorflow Example object to be used in creating record
Parameters
----------
example : dict
A single object; the dictionary should contains the keys "filename" referring to the jpg containing
the object, and "box_coords" which gives the location of the object, and "class" the name of the object
path : str
The path to the image files.
Returns
-------
The tf Example object
"""
path = (path + os.sep).encode('ascii')
filename = example['filename'].encode('ascii')
image_format = b'jpg'
image = plt.imread(path +filename, "jpg")
height, width = image.shape[:2]
# Encode the jpg to byte form
with tf.gfile.GFile(path+filename, 'rb') as fid:
encoded_jpg = bytes(fid.read())
# normalize the box coordinates
xmins = [box[0]/width for box in example['box_coords']]
ymins = [box[1]/height for box in example['box_coords']]
xmaxs = [box[2]/width for box in example['box_coords']]
ymaxs = [box[3]/height for box in example['box_coords']]
classes_text = [cls.encode('ascii') for cls in example["class"]]
classes = [class_mapping[cls] for cls in example["class"]]
# create the example
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height' : dataset_util.int64_feature(height),
'image/width' : dataset_util.int64_feature(width),
'image/filename' : dataset_util.bytes_feature(filename),
'image/source_id' : dataset_util.bytes_feature(filename),
'image/encoded' : dataset_util.bytes_feature(encoded_jpg),
'image/format' : dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin' : dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax' : dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin' : dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax' : dataset_util.float_list_feature(ymaxs),
'image/object/class/text' : dataset_util.bytes_list_feature(classes_text),
'image/object/class/label' : dataset_util.int64_list_feature(classes),
}))
return tf_example
# ---------------------------------------------------------------------------------------------------------------------
# Diagnostics
# ---------------------------------------------------------------------------------------------------------------------
def peek_in_record(path, plot=True):
objects = dict()
obj_per_img = []
obj_shapes = []
img_shapes = []
total_images = 0
for result in load_tf_record_file(path):
total_images += 1
names = result.context.feature['image/object/class/text'].bytes_list.value
for name in names:
if name not in objects:
objects[name] = 1
else:
objects[name] += 1
obj_per_img += [len(names)]
width = result.context.feature['image/width'].int64_list.value[0]
height = result.context.feature['image/height'].int64_list.value[0]
xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value)
ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value)
xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value)
ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value)
xmins *= width
xmaxs *= width
ymins *= height
ymaxs *= height
img_shapes += [[height, width]]
for xmin, ymin, xmax, ymax, name in zip(xmins, ymins, xmaxs, ymaxs, names):
obj_shapes += [[ymax-ymin, xmax-xmin]]
if (ymin < 0) or (ymax > height) or (xmin < 0) or (xmax > width):
print("WARNING : Object {} outisde of image region".format(name))
total_objects = sum(objects.values())
obj_shapes = np.array(obj_shapes)
img_shapes = np.array(img_shapes)
print("="*100)
print("Total Images : {0}".format(total_images))
print("Total Objects : {0}".format(total_objects))
print("Ave. Objects per Image : {0}".format(total_objects/total_images))
print("Classes : {0}".format(len(objects)))
print("="*100)
if plot:
fig, axes = plt.subplots(2,2,figsize=(12,12))
ax = axes[1,0]
ax.scatter(obj_shapes[:,1], obj_shapes[:,0])
ax.set_ylim([0.9*obj_shapes[:,0].min(), 1.1*obj_shapes[:,0].max()])
ax.set_xlim([0.9*obj_shapes[:,1].min(), 1.1*obj_shapes[:,1].max()])
ax.set_xlabel("Width", fontsize=18)
ax.set_ylabel("Height", fontsize=18)
ax.set_title("Object Shapes", fontsize=18)
ax = axes[1,1]
ax.scatter(img_shapes[:,1], img_shapes[:,0])
ax.set_ylim([0.9*img_shapes[:,0].min(), 1.1*img_shapes[:,0].max()])
ax.set_xlim([0.9*img_shapes[:,1].min(), 1.1*img_shapes[:,1].max()])
ax.set_xlabel("Width", fontsize=18)
ax.set_ylabel("Height", fontsize=18)
ax.set_title("Image Shapes", fontsize=18)
ax = axes[0,1]
ax.hist(obj_per_img, bins = np.arange(0.5,max(obj_per_img)+1.5,1),density=1.0)
ax.set_xlabel("Objects per image", fontsize=18)
ax = axes[0,0]
labels = [x.decode() for x in objects.keys()]
sizes = list(objects.values())
ax.pie(sizes, labels=labels, autopct='%1.1f%%',
shadow=False, startangle=90)
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
# ---------------------------------------------------------------------------------------------------------------------
# Reading Record Files
# ---------------------------------------------------------------------------------------------------------------------
def load_tf_record_file(path):
for example in tf.python_io.tf_record_iterator(path):
yield tf.train.SequenceExample.FromString(example)
def read_record_file(path, index_to_class, return_dict=True, plot=True, **plot_kwargs):
record = dict()
for result in load_tf_record_file(path):
fname = str(result.context.feature['image/filename'].bytes_list.value[0], "utf-8")
width = result.context.feature['image/width'].int64_list.value[0]
height = result.context.feature['image/height'].int64_list.value[0]
data = result.context.feature['image/encoded'].bytes_list.value[0]
img = Image.open(io.BytesIO(data), mode="r")
img = np.asarray(img)
xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value)
ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value)
xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value)
ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value)
xmins *= width
xmaxs *= width
ymins *= height
ymaxs *= height
labels = np.array(result.context.feature['image/object/class/label'].int64_list.value, dtype=int)
if return_dict:
|
if plot:
fig, ax = plt.subplots(1,1,figsize=plot_kwargs.get | record[fname] = dict()
record[fname]["width"] = width
record[fname]["height"] = height
record[fname]["image"] = img
record[fname]["xmins"] = xmins
record[fname]["xmaxs"] = xmaxs
record[fname]["ymins"] = ymins
record[fname]["ymaxs"] = ymaxs | conditional_block |
record_io.py | .FLAGS
try:
FLAGS.__delattr__('output_path')
FLAGS.__delattr__('f')
except:
pass
tf.app.flags.DEFINE_string('f', '', 'kernel')
flags.DEFINE_string('output_path', path, '')
FLAGS = flags.FLAGS
print("New record file : {}".format(flags.FLAGS.output_path))
return tf.app.flags.FLAGS.output_path
def create_record_file(image_path, output_path, examples_dict, class_to_index):
output_path = _create_record_file_path(output_path)
writer = tf.python_io.TFRecordWriter(output_path)
for key, val in examples_dict.items():
example = val
example["filename"] = key
tf_example = create_tf_example(example, image_path, class_to_index)
writer.write(tf_example.SerializeToString())
writer.close()
print("Wrote {} examples".format(len(examples_dict)))
def create_tf_example(example, path, class_mapping):
"""
Create a single Tensorflow Example object to be used in creating record
Parameters
----------
example : dict
A single object; the dictionary should contains the keys "filename" referring to the jpg containing
the object, and "box_coords" which gives the location of the object, and "class" the name of the object
path : str
The path to the image files.
Returns
-------
The tf Example object
"""
path = (path + os.sep).encode('ascii')
filename = example['filename'].encode('ascii')
image_format = b'jpg'
image = plt.imread(path +filename, "jpg")
height, width = image.shape[:2]
# Encode the jpg to byte form
with tf.gfile.GFile(path+filename, 'rb') as fid:
encoded_jpg = bytes(fid.read())
# normalize the box coordinates
xmins = [box[0]/width for box in example['box_coords']]
ymins = [box[1]/height for box in example['box_coords']]
xmaxs = [box[2]/width for box in example['box_coords']]
ymaxs = [box[3]/height for box in example['box_coords']]
classes_text = [cls.encode('ascii') for cls in example["class"]]
classes = [class_mapping[cls] for cls in example["class"]]
# create the example
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height' : dataset_util.int64_feature(height),
'image/width' : dataset_util.int64_feature(width),
'image/filename' : dataset_util.bytes_feature(filename),
'image/source_id' : dataset_util.bytes_feature(filename),
'image/encoded' : dataset_util.bytes_feature(encoded_jpg),
'image/format' : dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin' : dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax' : dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin' : dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax' : dataset_util.float_list_feature(ymaxs),
'image/object/class/text' : dataset_util.bytes_list_feature(classes_text),
'image/object/class/label' : dataset_util.int64_list_feature(classes),
}))
return tf_example
# ---------------------------------------------------------------------------------------------------------------------
# Diagnostics
# ---------------------------------------------------------------------------------------------------------------------
| def peek_in_record(path, plot=True):
objects = dict()
obj_per_img = []
obj_shapes = []
img_shapes = []
total_images = 0
for result in load_tf_record_file(path):
total_images += 1
names = result.context.feature['image/object/class/text'].bytes_list.value
for name in names:
if name not in objects:
objects[name] = 1
else:
objects[name] += 1
obj_per_img += [len(names)]
width = result.context.feature['image/width'].int64_list.value[0]
height = result.context.feature['image/height'].int64_list.value[0]
xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value)
ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value)
xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value)
ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value)
xmins *= width
xmaxs *= width
ymins *= height
ymaxs *= height
img_shapes += [[height, width]]
for xmin, ymin, xmax, ymax, name in zip(xmins, ymins, xmaxs, ymaxs, names):
obj_shapes += [[ymax-ymin, xmax-xmin]]
if (ymin < 0) or (ymax > height) or (xmin < 0) or (xmax > width):
print("WARNING : Object {} outisde of image region".format(name))
total_objects = sum(objects.values())
obj_shapes = np.array(obj_shapes)
img_shapes = np.array(img_shapes)
print("="*100)
print("Total Images : {0}".format(total_images))
print("Total Objects : {0}".format(total_objects))
print("Ave. Objects per Image : {0}".format(total_objects/total_images))
print("Classes : {0}".format(len(objects)))
print("="*100)
if plot:
fig, axes = plt.subplots(2,2,figsize=(12,12))
ax = axes[1,0]
ax.scatter(obj_shapes[:,1], obj_shapes[:,0])
ax.set_ylim([0.9*obj_shapes[:,0].min(), 1.1*obj_shapes[:,0].max()])
ax.set_xlim([0.9*obj_shapes[:,1].min(), 1.1*obj_shapes[:,1].max()])
ax.set_xlabel("Width", fontsize=18)
ax.set_ylabel("Height", fontsize=18)
ax.set_title("Object Shapes", fontsize=18)
ax = axes[1,1]
ax.scatter(img_shapes[:,1], img_shapes[:,0])
ax.set_ylim([0.9*img_shapes[:,0].min(), 1.1*img_shapes[:,0].max()])
ax.set_xlim([0.9*img_shapes[:,1].min(), 1.1*img_shapes[:,1].max()])
ax.set_xlabel("Width", fontsize=18)
ax.set_ylabel("Height", fontsize=18)
ax.set_title("Image Shapes", fontsize=18)
ax = axes[0,1]
ax.hist(obj_per_img, bins = np.arange(0.5,max(obj_per_img)+1.5,1),density=1.0)
ax.set_xlabel("Objects per image", fontsize=18)
ax = axes[0,0]
labels = [x.decode() for x in objects.keys()]
sizes = list(objects.values())
ax.pie(sizes, labels=labels, autopct='%1.1f%%',
shadow=False, startangle=90)
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
# ---------------------------------------------------------------------------------------------------------------------
# Reading Record Files
# ---------------------------------------------------------------------------------------------------------------------
def load_tf_record_file(path):
for example in tf.python_io.tf_record_iterator(path):
yield tf.train.SequenceExample.FromString(example)
def read_record_file(path, index_to_class, return_dict=True, plot=True, **plot_kwargs):
record = dict()
for result in load_tf_record_file(path):
fname = str(result.context.feature['image/filename'].bytes_list.value[0], "utf-8")
width = result.context.feature['image/width'].int64_list.value[0]
height = result.context.feature['image/height'].int64_list.value[0]
data = result.context.feature['image/encoded'].bytes_list.value[0]
img = Image.open(io.BytesIO(data), mode="r")
img = np.asarray(img)
xmins = np.array(result.context.feature['image/object/bbox/xmin'].float_list.value)
ymins = np.array(result.context.feature['image/object/bbox/ymin'].float_list.value)
xmaxs = np.array(result.context.feature['image/object/bbox/xmax'].float_list.value)
ymaxs = np.array(result.context.feature['image/object/bbox/ymax'].float_list.value)
xmins *= width
xmaxs *= width
ymins *= height
ymaxs *= height
labels = np.array(result.context.feature['image/object/class/label'].int64_list.value, dtype=int)
if return_dict:
record[fname] = dict()
record[fname]["width"] = width
record[fname]["height"] = height
record[fname]["image"] = img
record[fname]["xmins"] = xmins
record[fname]["xmaxs"] = xmaxs
record[fname]["ymins"] = ymins
record[fname]["ymaxs"] = ymaxs
if plot:
fig, ax = plt.subplots(1,1,figsize=plot_kwargs.get | random_line_split | |
data_tools.py | _gen, y_enf_gen, y_enf_gen_pixl, idx_IN_X_and_y, idx_enf_gen
def get_TRAIN_relevant(TRAIN, words):
# IMPORTANT: we preserve the ORDER of TRAIN (so that we can recover information afterwards)
TRAIN_relevant, rel_ids, OBJ_ctr_sd = {}, [], []
print('Getting *relevant* instances, from a total of: ' + str(len(TRAIN['subj'])))
var_names = [key for key in TRAIN]
# INITIALIZE TRAIN_relavant
for varname in var_names:
TRAIN_relevant[varname] = []
for i in range(len( TRAIN['subj'] )): # Samples loop
we_have_it = True if ((TRAIN['subj'][i] in words) and (TRAIN['rel'][i] in words) and (TRAIN['obj'][i] in words)) else False # if we have the complete triplet
if we_have_it == True:
for varname in var_names:
TRAIN_relevant[varname].append(TRAIN[varname][i])
rel_ids.append(TRAIN['rel_id'][i])
OBJ_ctr_sd.append([TRAIN['img_idx'][i], TRAIN['rel_id'][i], TRAIN['subj'][i], TRAIN['rel'][i],
TRAIN['obj'][i], TRAIN['subj_sd_x'][i], TRAIN['subj_sd_y'][i],
TRAIN['subj_ctr_x'][i], TRAIN['subj_ctr_y'][i], TRAIN['obj_sd_x'][i],
TRAIN['obj_sd_y'][i], TRAIN['obj_ctr_x'][i], TRAIN['obj_ctr_y'][i]])
OBJ_ctr_sd = np.array(OBJ_ctr_sd)
print('We have gotten ' + str(len(TRAIN_relevant['subj'])) + ' RELEVANT instances')
return OBJ_ctr_sd, rel_ids, TRAIN_relevant
def get_random_EMB(actual_EMB):
# Returns embedding matrix of the original shape with random normal vectors (dimension-wise)
mu, sigma, vec_size = np.mean(actual_EMB), np.mean(np.std(actual_EMB, axis=0)), len(actual_EMB[0, :])
rand_EMB = []
for i in range(actual_EMB.shape[0]): # build a dictionary of random vectors
rand_EMB.append(np.random.normal(mu, sigma, vec_size))
rand_EMB = np.array(rand_EMB)
return rand_EMB
def coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl):
'''
This function works with an individual example (extending it to many examples, where e.g., obj_sd_x is a vector, is easy)
:param obj_sd_x (and the rest): real number (not vectors!)
:param n_side_pixl: number of pixels as output (hyperparameter)
:return y_pixl: matrix of pixels, i.e., a 2D tensor (n_side_pixl, n_side_pixl)
'''
# continuous bounding box corners (prevent problems of predictions outside [0,1])
A_left_x, A_right_x = max((obj_ctr_x - obj_sd_x), 0), min((obj_ctr_x + obj_sd_x), 1)
A_low_y, A_top_y = min((obj_ctr_y + obj_sd_y), 1), max((obj_ctr_y - obj_sd_y), 0)
# translate continuous bounding box corners into indices in a n_side_pixl x n_side_pixl matrix
i_left, i_right = np.rint( (n_side_pixl - 1)*A_left_x).astype(np.int), np.rint((n_side_pixl - 1)*A_right_x).astype(np.int)
j_low, j_top = np.rint((n_side_pixl - 1)*A_low_y).astype(np.int), np.rint((n_side_pixl - 1)*A_top_y).astype(np.int)
pixl_matr = np.zeros( (n_side_pixl, n_side_pixl) )
# add ones inside of the bounding box
i_range = range( i_left, i_right )
i_range = [i_left] if ((i_left == i_right) or (i_range == [])) else i_range # AVOID THE CASE where width is 0 AND i_range=[] (as upper bound < lower bound)
j_range = range( j_top, j_low )
j_range = [j_low] if ((j_low == j_top) or (j_range == [])) else j_range # AVOID THE CASE where height is 0 AND i_range=[] (as upper bound < lower bound)
pixl_matr[ np.array(i_range)[:, None], np.array(j_range)] = 1 # (IMPORTANT: indices must be np.arrays) put a 1 everywhere inside of the bounding box
pixl_matr = pixl_matr.reshape((-1))
return pixl_matr
def pixl_idx2coord_all_examples(y_pixl):
'''
Transforms the whole set of predicted matrices y_pixl into their continuous CENTER coordinates (Obj_ctr)
:param y_pixl: array of MATRICES with predicted heatmaps (pixels). Each matrix = 1 example
:return: PRED_obj_ctr_x, PRED_obj_ctr_y: arrays of length = number of examples
'''
PRED_obj_ctr_x, PRED_obj_ctr_y = [], []
n_side_pixl = y_pixl.shape[1] #get automatically the number of pixels from the pixel matrix side
for i in range( y_pixl.shape[0] ): # loop on number of examples
idx_maximums = get_maximums_idx(y_pixl[i]) # get indices of maximum (allow for multiple of them)
ctr_x, ctr_y = pixl_idx2coord_indiv(idx_maximums, n_side_pixl) # transform pixel indices into continuous coordinates
PRED_obj_ctr_x.append(ctr_x)
PRED_obj_ctr_y.append(ctr_y)
PRED_obj_ctr_x, PRED_obj_ctr_y = np.array(PRED_obj_ctr_x), np.array(PRED_obj_ctr_y)
return PRED_obj_ctr_x, PRED_obj_ctr_y
def get_maximums_idx( heat_matrix ):
# Given a matrix of activations, it outputs the indices corresponding to its maximum values
# INPUT: heat_matrix: matrix of continuous activations (within [0,1]) of size n_side_pixl x n_side_pixl
# OUTPUT: maximums: indices corresponding to where the activations are maximum (accounts for multiple maximums)
#maximums = np.unravel_index(np.argmax(heat_matrix), heat_matrix.shape) # gives the index of the FIRST largest element. Doesn't account for multiple maximums!
maximums = np.where(heat_matrix == heat_matrix.max()) # This one accounts for multiple maximums!
return np.array(maximums)
def pixl_idx2coord_indiv(idx_maximums, n_side_pixl):
'''
This function receives input from get_maximums_indices()
Given discrete pixels indices (i,j) where i,j = 0,...,n_side_pixl (where activations are maximal),
it transforms them to (continuous) coordinates in [0,1]
IMPORTANT: It only computes the CENTER of the Obj (not sd's). So it's useful for measures that only use Obj_ctr
:param idx_maximums: index of maximums from get_maximums_idx()
:param n_side_pixl: side of the activation matrix (necessary to transform indices to coordinates)
:return pred_obj_ctr_x, pred_obj_ctr_y: predicted (continuous) coordinates in [0,1] (Obj_ctr)
'''
coord = np.mean(idx_maximums, axis = 1)
PRED_coord = coord.astype(np.float)/float(n_side_pixl - 1) # Transform pixel indices to (continuous) coordinates
pred_obj_ctr_x, pred_obj_ctr_y = PRED_coord[0], PRED_coord[1]
return pred_obj_ctr_x, pred_obj_ctr_y
def get_folds(n_samples, n_folds):
indices = np.random.permutation(np.arange(n_samples))
n_test = int(np.floor(n_samples / n_folds))
kf = [(np.delete(indices, np.arange(i * n_test, (i + 1) * n_test)), # train
indices[i * n_test:(i + 1) * n_test]) for i in range(n_folds)] # test
return kf
def mirror_x(subj_ctr_x, obj_ctr_x):
# Computes the absolute value of the obj_ctr_x variable (to make it symmetric)
aux_obj_ctr_x = [ (1 - float(obj_ctr_x[i])) if float(obj_ctr_x[i]) <= float(subj_ctr_x[i]) else float(obj_ctr_x[i]) for i in range(len(obj_ctr_x)) ]
aux_subj_ctr_x = [ (1 - float(subj_ctr_x[i])) if float(obj_ctr_x[i]) <= float(subj_ctr_x[i]) else float(subj_ctr_x[i]) for i in range(len(obj_ctr_x)) ]
subj_ctr_x, obj_ctr_x = aux_subj_ctr_x, aux_obj_ctr_x
return subj_ctr_x, obj_ctr_x
def build_emb_dict(words, EMB):
#Input: words= word list, EMB= embeddings in a np.array format
#Output: Dictionary of embeddings
| EMB_dict = {}
for i in range(len(words)):
EMB_dict[words[i]] = EMB[i,:]
return EMB_dict | identifier_body | |
data_tools.py | _enf_gen['subj'].append(subj_list.index(TRAIN_relevant['subj'][i]))
X_enf_gen['pred'].append(pred_list.index(TRAIN_relevant['rel'][i]))
X_enf_gen['obj'].append(obj_list.index(TRAIN_relevant['obj'][i]))
else: # if either the triplet/word is not generalized or we aren't enforcing generalization
X['subj'].append(subj_list.index(TRAIN_relevant['subj'][i]))
X['pred'].append(pred_list.index(TRAIN_relevant['rel'][i]))
X['obj'].append(obj_list.index(TRAIN_relevant['obj'][i]))
# Reshape
X['subj'] = np.array(X['subj']).reshape((-1, 1))
X['pred'] = np.array(X['pred']).reshape((-1, 1))
X['obj'] = np.array(X['obj']).reshape((-1, 1))
# FORMAT: if we have gotten some zero shot instances
if X_enf_gen['subj'] != []:
X_enf_gen['subj'] = np.array(X_enf_gen['subj']).reshape(
(-1, 1)) # get them in the right FORMAT for the merged (SEP) model!
X_enf_gen['pred'] = np.array(X_enf_gen['pred']).reshape((-1, 1))
X_enf_gen['obj'] = np.array(X_enf_gen['obj']).reshape((-1, 1))
else:
X_enf_gen['subj'], X_enf_gen['pred'], X_enf_gen['obj'] = None, None, None
# Get Y (if model_type = PIX we output the regular y besides y_pixl!)
y, y_pixl, y_enf_gen, idx_IN_X_and_y, idx_enf_gen, y_enf_gen_pixl = [], [], [], [], [], []
for i in range(len(TRAIN_relevant['subj'])):
y_new_row = []
for k in range(len(y_vars)):
y_new_row.extend([float(TRAIN_relevant[y_vars[k]][i])]) # IMPORTANT: We assume that the variables are NUMERIC
if model_type == 'PIX':
obj_sd_x, obj_sd_y = float(TRAIN_relevant['obj_sd_x'][i]), float(TRAIN_relevant['obj_sd_y'][i])
obj_ctr_x, obj_ctr_y = float(TRAIN_relevant['obj_ctr_x'][i]), float(TRAIN_relevant['obj_ctr_y'][i])
y_pixl_new_row = coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl)
# get stuff for the generalzed setting:
triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i])
if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']):
y_enf_gen.append(y_new_row)
if model_type == 'PIX':
y_enf_gen_pixl.append(y_pixl_new_row)
idx_enf_gen.append(i)
elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet):
y_enf_gen.append(y_new_row)
if model_type == 'PIX':
y_enf_gen_pixl.append(y_pixl_new_row)
idx_enf_gen.append(i)
else: # NON GENERALIZED
y.append(y_new_row)
if model_type == 'PIX':
y_pixl.append(y_pixl_new_row)
idx_IN_X_and_y.append(i)
y = np.array(y)
y_enf_gen = np.array(y_enf_gen) if y_enf_gen != [] else None
if model_type == 'PIX':
y_pixl = np.array(y_pixl)
y_enf_gen_pixl = np.array(y_enf_gen_pixl) if y_enf_gen_pixl != [] else None
else:
y_pixl = [[[]]] # necessary because we get the index 0 of y_pixl (if model_type != 'PIX') to save memory in learn_and_evaluate()
print('We have gotten ' + str(len(idx_IN_X_and_y)) + ' instances (for both, train & test)')
# Get X_extra
X_extra, X_extra_enf_gen = [], []
if X_vars != []:
for i in range(len(TRAIN_relevant['subj'])):
X_extra_new_row = []
for k in range(len(X_vars)): # we already ASSUME that we have at least one y-variable
X_extra_new_row.extend(
[float(TRAIN_relevant[X_vars[k]][i])]) # IMPORTANT: We assume that the variables are NUMERIC
# get stuff for the generalized:
triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i])
if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']):
X_extra_enf_gen.append(X_extra_new_row)
elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet):
X_extra_enf_gen.append(X_extra_new_row)
else:
X_extra.append(X_extra_new_row)
X_extra = np.array(X_extra) if X_extra != [] else None # IMPORTANT: we only make it a numpy array if we have something, because we use == [] as condition in models_learn
X_extra_enf_gen = np.array(X_extra_enf_gen) if X_extra_enf_gen != [] else None
return X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, idx_IN_X_and_y, idx_enf_gen
def get_TRAIN_relevant(TRAIN, words):
# IMPORTANT: we preserve the ORDER of TRAIN (so that we can recover information afterwards)
TRAIN_relevant, rel_ids, OBJ_ctr_sd = {}, [], []
print('Getting *relevant* instances, from a total of: ' + str(len(TRAIN['subj'])))
var_names = [key for key in TRAIN]
# INITIALIZE TRAIN_relavant
for varname in var_names:
TRAIN_relevant[varname] = []
for i in range(len( TRAIN['subj'] )): # Samples loop
we_have_it = True if ((TRAIN['subj'][i] in words) and (TRAIN['rel'][i] in words) and (TRAIN['obj'][i] in words)) else False # if we have the complete triplet
if we_have_it == True:
for varname in var_names:
TRAIN_relevant[varname].append(TRAIN[varname][i])
rel_ids.append(TRAIN['rel_id'][i])
OBJ_ctr_sd.append([TRAIN['img_idx'][i], TRAIN['rel_id'][i], TRAIN['subj'][i], TRAIN['rel'][i],
TRAIN['obj'][i], TRAIN['subj_sd_x'][i], TRAIN['subj_sd_y'][i],
TRAIN['subj_ctr_x'][i], TRAIN['subj_ctr_y'][i], TRAIN['obj_sd_x'][i],
TRAIN['obj_sd_y'][i], TRAIN['obj_ctr_x'][i], TRAIN['obj_ctr_y'][i]])
OBJ_ctr_sd = np.array(OBJ_ctr_sd)
print('We have gotten ' + str(len(TRAIN_relevant['subj'])) + ' RELEVANT instances')
return OBJ_ctr_sd, rel_ids, TRAIN_relevant
def get_random_EMB(actual_EMB):
# Returns embedding matrix of the original shape with random normal vectors (dimension-wise)
mu, sigma, vec_size = np.mean(actual_EMB), np.mean(np.std(actual_EMB, axis=0)), len(actual_EMB[0, :])
rand_EMB = []
for i in range(actual_EMB.shape[0]): # build a dictionary of random vectors
rand_EMB.append(np.random.normal(mu, sigma, vec_size))
rand_EMB = np.array(rand_EMB)
return rand_EMB
| :param n_side_pixl: number of pixels as output (hyperparameter)
:return y_pixl: matrix of pixels, i.e., a 2D tensor (n_side_pixl, n_side_pixl)
'''
# continuous bounding box corners (prevent problems of predictions outside [0,1])
A_left_x, A_right_x = max((obj_ctr_x - obj_sd_x), 0), min((obj_ctr_x + obj_sd_x), 1)
A_low_y, A_top_y = min((obj_ctr_y + obj_sd_y), 1), max((obj_ctr_y - obj_sd_y), 0)
# translate continuous bounding box corners into indices in a n_side_pixl x n_side_pixl matrix
i_left, i_right = np.rint( (n_side_pixl - 1)*A_left_x).astype(np.int), np.rint((n_side_pixl - 1)*A_right_x).astype(np.int)
| def coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl):
'''
This function works with an individual example (extending it to many examples, where e.g., obj_sd_x is a vector, is easy)
:param obj_sd_x (and the rest): real number (not vectors!) | random_line_split |
data_tools.py | f_gen['subj'].append(subj_list.index(TRAIN_relevant['subj'][i]))
X_enf_gen['pred'].append(pred_list.index(TRAIN_relevant['rel'][i]))
X_enf_gen['obj'].append(obj_list.index(TRAIN_relevant['obj'][i]))
else: # if either the triplet/word is not generalized or we aren't enforcing generalization
X['subj'].append(subj_list.index(TRAIN_relevant['subj'][i]))
X['pred'].append(pred_list.index(TRAIN_relevant['rel'][i]))
X['obj'].append(obj_list.index(TRAIN_relevant['obj'][i]))
# Reshape
X['subj'] = np.array(X['subj']).reshape((-1, 1))
X['pred'] = np.array(X['pred']).reshape((-1, 1))
X['obj'] = np.array(X['obj']).reshape((-1, 1))
# FORMAT: if we have gotten some zero shot instances
if X_enf_gen['subj'] != []:
X_enf_gen['subj'] = np.array(X_enf_gen['subj']).reshape(
(-1, 1)) # get them in the right FORMAT for the merged (SEP) model!
X_enf_gen['pred'] = np.array(X_enf_gen['pred']).reshape((-1, 1))
X_enf_gen['obj'] = np.array(X_enf_gen['obj']).reshape((-1, 1))
else:
X_enf_gen['subj'], X_enf_gen['pred'], X_enf_gen['obj'] = None, None, None
# Get Y (if model_type = PIX we output the regular y besides y_pixl!)
y, y_pixl, y_enf_gen, idx_IN_X_and_y, idx_enf_gen, y_enf_gen_pixl = [], [], [], [], [], []
for i in range(len(TRAIN_relevant['subj'])):
y_new_row = []
for k in range(len(y_vars)):
y_new_row.extend([float(TRAIN_relevant[y_vars[k]][i])]) # IMPORTANT: We assume that the variables are NUMERIC
if model_type == 'PIX':
obj_sd_x, obj_sd_y = float(TRAIN_relevant['obj_sd_x'][i]), float(TRAIN_relevant['obj_sd_y'][i])
obj_ctr_x, obj_ctr_y = float(TRAIN_relevant['obj_ctr_x'][i]), float(TRAIN_relevant['obj_ctr_y'][i])
y_pixl_new_row = coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl)
# get stuff for the generalzed setting:
triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i])
if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']):
y_enf_gen.append(y_new_row)
if model_type == 'PIX':
y_enf_gen_pixl.append(y_pixl_new_row)
idx_enf_gen.append(i)
elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet):
y_enf_gen.append(y_new_row)
if model_type == 'PIX':
y_enf_gen_pixl.append(y_pixl_new_row)
idx_enf_gen.append(i)
else: # NON GENERALIZED
|
y = np.array(y)
y_enf_gen = np.array(y_enf_gen) if y_enf_gen != [] else None
if model_type == 'PIX':
y_pixl = np.array(y_pixl)
y_enf_gen_pixl = np.array(y_enf_gen_pixl) if y_enf_gen_pixl != [] else None
else:
y_pixl = [[[]]] # necessary because we get the index 0 of y_pixl (if model_type != 'PIX') to save memory in learn_and_evaluate()
print('We have gotten ' + str(len(idx_IN_X_and_y)) + ' instances (for both, train & test)')
# Get X_extra
X_extra, X_extra_enf_gen = [], []
if X_vars != []:
for i in range(len(TRAIN_relevant['subj'])):
X_extra_new_row = []
for k in range(len(X_vars)): # we already ASSUME that we have at least one y-variable
X_extra_new_row.extend(
[float(TRAIN_relevant[X_vars[k]][i])]) # IMPORTANT: We assume that the variables are NUMERIC
# get stuff for the generalized:
triplet = (TRAIN_relevant['subj'][i], TRAIN_relevant['rel'][i], TRAIN_relevant['obj'][i])
if (enforce_gen['eval'] == 'triplets') and (triplet in enforce_gen['triplets']):
X_extra_enf_gen.append(X_extra_new_row)
elif (enforce_gen['eval'] == 'words') and any(word in enforce_gen['words'] for word in triplet):
X_extra_enf_gen.append(X_extra_new_row)
else:
X_extra.append(X_extra_new_row)
X_extra = np.array(X_extra) if X_extra != [] else None # IMPORTANT: we only make it a numpy array if we have something, because we use == [] as condition in models_learn
X_extra_enf_gen = np.array(X_extra_enf_gen) if X_extra_enf_gen != [] else None
return X, X_extra, y, y_pixl, X_extra_enf_gen, X_enf_gen, y_enf_gen, y_enf_gen_pixl, idx_IN_X_and_y, idx_enf_gen
def get_TRAIN_relevant(TRAIN, words):
# IMPORTANT: we preserve the ORDER of TRAIN (so that we can recover information afterwards)
TRAIN_relevant, rel_ids, OBJ_ctr_sd = {}, [], []
print('Getting *relevant* instances, from a total of: ' + str(len(TRAIN['subj'])))
var_names = [key for key in TRAIN]
# INITIALIZE TRAIN_relavant
for varname in var_names:
TRAIN_relevant[varname] = []
for i in range(len( TRAIN['subj'] )): # Samples loop
we_have_it = True if ((TRAIN['subj'][i] in words) and (TRAIN['rel'][i] in words) and (TRAIN['obj'][i] in words)) else False # if we have the complete triplet
if we_have_it == True:
for varname in var_names:
TRAIN_relevant[varname].append(TRAIN[varname][i])
rel_ids.append(TRAIN['rel_id'][i])
OBJ_ctr_sd.append([TRAIN['img_idx'][i], TRAIN['rel_id'][i], TRAIN['subj'][i], TRAIN['rel'][i],
TRAIN['obj'][i], TRAIN['subj_sd_x'][i], TRAIN['subj_sd_y'][i],
TRAIN['subj_ctr_x'][i], TRAIN['subj_ctr_y'][i], TRAIN['obj_sd_x'][i],
TRAIN['obj_sd_y'][i], TRAIN['obj_ctr_x'][i], TRAIN['obj_ctr_y'][i]])
OBJ_ctr_sd = np.array(OBJ_ctr_sd)
print('We have gotten ' + str(len(TRAIN_relevant['subj'])) + ' RELEVANT instances')
return OBJ_ctr_sd, rel_ids, TRAIN_relevant
def get_random_EMB(actual_EMB):
# Returns embedding matrix of the original shape with random normal vectors (dimension-wise)
mu, sigma, vec_size = np.mean(actual_EMB), np.mean(np.std(actual_EMB, axis=0)), len(actual_EMB[0, :])
rand_EMB = []
for i in range(actual_EMB.shape[0]): # build a dictionary of random vectors
rand_EMB.append(np.random.normal(mu, sigma, vec_size))
rand_EMB = np.array(rand_EMB)
return rand_EMB
def coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl):
'''
This function works with an individual example (extending it to many examples, where e.g., obj_sd_x is a vector, is easy)
:param obj_sd_x (and the rest): real number (not vectors!)
:param n_side_pixl: number of pixels as output (hyperparameter)
:return y_pixl: matrix of pixels, i.e., a 2D tensor (n_side_pixl, n_side_pixl)
'''
# continuous bounding box corners (prevent problems of predictions outside [0,1])
A_left_x, A_right_x = max((obj_ctr_x - obj_sd_x), 0), min((obj_ctr_x + obj_sd_x), 1)
A_low_y, A_top_y = min((obj_ctr_y + obj_sd_y), 1), max((obj_ctr_y - obj_sd_y), 0)
# translate continuous bounding box corners into indices in a n_side_pixl x n_side_pixl matrix
i_left, i_right = np.rint( (n_side_pixl - 1)*A_left_x).astype(np.int), np.rint((n_side_pixl - 1)*A_right_x).astype(np.int)
| y.append(y_new_row)
if model_type == 'PIX':
y_pixl.append(y_pixl_new_row)
idx_IN_X_and_y.append(i) | conditional_block |
data_tools.py | _enf_gen_pixl, idx_IN_X_and_y, idx_enf_gen
def get_TRAIN_relevant(TRAIN, words):
# IMPORTANT: we preserve the ORDER of TRAIN (so that we can recover information afterwards)
TRAIN_relevant, rel_ids, OBJ_ctr_sd = {}, [], []
print('Getting *relevant* instances, from a total of: ' + str(len(TRAIN['subj'])))
var_names = [key for key in TRAIN]
# INITIALIZE TRAIN_relavant
for varname in var_names:
TRAIN_relevant[varname] = []
for i in range(len( TRAIN['subj'] )): # Samples loop
we_have_it = True if ((TRAIN['subj'][i] in words) and (TRAIN['rel'][i] in words) and (TRAIN['obj'][i] in words)) else False # if we have the complete triplet
if we_have_it == True:
for varname in var_names:
TRAIN_relevant[varname].append(TRAIN[varname][i])
rel_ids.append(TRAIN['rel_id'][i])
OBJ_ctr_sd.append([TRAIN['img_idx'][i], TRAIN['rel_id'][i], TRAIN['subj'][i], TRAIN['rel'][i],
TRAIN['obj'][i], TRAIN['subj_sd_x'][i], TRAIN['subj_sd_y'][i],
TRAIN['subj_ctr_x'][i], TRAIN['subj_ctr_y'][i], TRAIN['obj_sd_x'][i],
TRAIN['obj_sd_y'][i], TRAIN['obj_ctr_x'][i], TRAIN['obj_ctr_y'][i]])
OBJ_ctr_sd = np.array(OBJ_ctr_sd)
print('We have gotten ' + str(len(TRAIN_relevant['subj'])) + ' RELEVANT instances')
return OBJ_ctr_sd, rel_ids, TRAIN_relevant
def get_random_EMB(actual_EMB):
# Returns embedding matrix of the original shape with random normal vectors (dimension-wise)
mu, sigma, vec_size = np.mean(actual_EMB), np.mean(np.std(actual_EMB, axis=0)), len(actual_EMB[0, :])
rand_EMB = []
for i in range(actual_EMB.shape[0]): # build a dictionary of random vectors
rand_EMB.append(np.random.normal(mu, sigma, vec_size))
rand_EMB = np.array(rand_EMB)
return rand_EMB
def coord2pixel_indiv(obj_sd_x, obj_sd_y, obj_ctr_x, obj_ctr_y, n_side_pixl):
'''
This function works with an individual example (extending it to many examples, where e.g., obj_sd_x is a vector, is easy)
:param obj_sd_x (and the rest): real number (not vectors!)
:param n_side_pixl: number of pixels as output (hyperparameter)
:return y_pixl: matrix of pixels, i.e., a 2D tensor (n_side_pixl, n_side_pixl)
'''
# continuous bounding box corners (prevent problems of predictions outside [0,1])
A_left_x, A_right_x = max((obj_ctr_x - obj_sd_x), 0), min((obj_ctr_x + obj_sd_x), 1)
A_low_y, A_top_y = min((obj_ctr_y + obj_sd_y), 1), max((obj_ctr_y - obj_sd_y), 0)
# translate continuous bounding box corners into indices in a n_side_pixl x n_side_pixl matrix
i_left, i_right = np.rint( (n_side_pixl - 1)*A_left_x).astype(np.int), np.rint((n_side_pixl - 1)*A_right_x).astype(np.int)
j_low, j_top = np.rint((n_side_pixl - 1)*A_low_y).astype(np.int), np.rint((n_side_pixl - 1)*A_top_y).astype(np.int)
pixl_matr = np.zeros( (n_side_pixl, n_side_pixl) )
# add ones inside of the bounding box
i_range = range( i_left, i_right )
i_range = [i_left] if ((i_left == i_right) or (i_range == [])) else i_range # AVOID THE CASE where width is 0 AND i_range=[] (as upper bound < lower bound)
j_range = range( j_top, j_low )
j_range = [j_low] if ((j_low == j_top) or (j_range == [])) else j_range # AVOID THE CASE where height is 0 AND i_range=[] (as upper bound < lower bound)
pixl_matr[ np.array(i_range)[:, None], np.array(j_range)] = 1 # (IMPORTANT: indices must be np.arrays) put a 1 everywhere inside of the bounding box
pixl_matr = pixl_matr.reshape((-1))
return pixl_matr
def pixl_idx2coord_all_examples(y_pixl):
'''
Transforms the whole set of predicted matrices y_pixl into their continuous CENTER coordinates (Obj_ctr)
:param y_pixl: array of MATRICES with predicted heatmaps (pixels). Each matrix = 1 example
:return: PRED_obj_ctr_x, PRED_obj_ctr_y: arrays of length = number of examples
'''
PRED_obj_ctr_x, PRED_obj_ctr_y = [], []
n_side_pixl = y_pixl.shape[1] #get automatically the number of pixels from the pixel matrix side
for i in range( y_pixl.shape[0] ): # loop on number of examples
idx_maximums = get_maximums_idx(y_pixl[i]) # get indices of maximum (allow for multiple of them)
ctr_x, ctr_y = pixl_idx2coord_indiv(idx_maximums, n_side_pixl) # transform pixel indices into continuous coordinates
PRED_obj_ctr_x.append(ctr_x)
PRED_obj_ctr_y.append(ctr_y)
PRED_obj_ctr_x, PRED_obj_ctr_y = np.array(PRED_obj_ctr_x), np.array(PRED_obj_ctr_y)
return PRED_obj_ctr_x, PRED_obj_ctr_y
def get_maximums_idx( heat_matrix ):
# Given a matrix of activations, it outputs the indices corresponding to its maximum values
# INPUT: heat_matrix: matrix of continuous activations (within [0,1]) of size n_side_pixl x n_side_pixl
# OUTPUT: maximums: indices corresponding to where the activations are maximum (accounts for multiple maximums)
#maximums = np.unravel_index(np.argmax(heat_matrix), heat_matrix.shape) # gives the index of the FIRST largest element. Doesn't account for multiple maximums!
maximums = np.where(heat_matrix == heat_matrix.max()) # This one accounts for multiple maximums!
return np.array(maximums)
def pixl_idx2coord_indiv(idx_maximums, n_side_pixl):
'''
This function receives input from get_maximums_indices()
Given discrete pixels indices (i,j) where i,j = 0,...,n_side_pixl (where activations are maximal),
it transforms them to (continuous) coordinates in [0,1]
IMPORTANT: It only computes the CENTER of the Obj (not sd's). So it's useful for measures that only use Obj_ctr
:param idx_maximums: index of maximums from get_maximums_idx()
:param n_side_pixl: side of the activation matrix (necessary to transform indices to coordinates)
:return pred_obj_ctr_x, pred_obj_ctr_y: predicted (continuous) coordinates in [0,1] (Obj_ctr)
'''
coord = np.mean(idx_maximums, axis = 1)
PRED_coord = coord.astype(np.float)/float(n_side_pixl - 1) # Transform pixel indices to (continuous) coordinates
pred_obj_ctr_x, pred_obj_ctr_y = PRED_coord[0], PRED_coord[1]
return pred_obj_ctr_x, pred_obj_ctr_y
def get_folds(n_samples, n_folds):
indices = np.random.permutation(np.arange(n_samples))
n_test = int(np.floor(n_samples / n_folds))
kf = [(np.delete(indices, np.arange(i * n_test, (i + 1) * n_test)), # train
indices[i * n_test:(i + 1) * n_test]) for i in range(n_folds)] # test
return kf
def mirror_x(subj_ctr_x, obj_ctr_x):
# Computes the absolute value of the obj_ctr_x variable (to make it symmetric)
aux_obj_ctr_x = [ (1 - float(obj_ctr_x[i])) if float(obj_ctr_x[i]) <= float(subj_ctr_x[i]) else float(obj_ctr_x[i]) for i in range(len(obj_ctr_x)) ]
aux_subj_ctr_x = [ (1 - float(subj_ctr_x[i])) if float(obj_ctr_x[i]) <= float(subj_ctr_x[i]) else float(subj_ctr_x[i]) for i in range(len(obj_ctr_x)) ]
subj_ctr_x, obj_ctr_x = aux_subj_ctr_x, aux_obj_ctr_x
return subj_ctr_x, obj_ctr_x
def build_emb_dict(words, EMB):
#Input: words= word list, EMB= embeddings in a np.array format
#Output: Dictionary of embeddings
EMB_dict = {}
for i in range(len(words)):
EMB_dict[words[i]] = EMB[i,:]
return EMB_dict
def | wordlist2emb_matrix | identifier_name | |
optimizer-output.component.ts | .dots('Loading...');
this.apiServices.scenario_planner_listdetails(this.valueSelected).subscribe((res:any)=>{
console.log(res,"listDetails");
Notiflix.Loading.remove();
let response=res;
if(res.code==200 && res.status=='success'){
this.resetFilter();
let filterData:any=response['data'][0].json_data;
this.groupedOnPackType=groupByJson(filterData,'pack_type');
this.segmentList=Object.keys(this.groupedOnPackType);
this.selectedSegmentList = this.segmentList;
filterData = filterData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"]));
if(this.selectedplacementTypes.length!=0){
let to_find:any=[...this.selectedplacementTypes];
console.log(to_find,"to_find");
filterData=recursiveFind(filterData,to_find);
console.log(to_find,"to_find")
}
filterData=filterData.sort((a:any, b:any) => b.processed_lift - a.processed_lift);
console.log(filterData,"filterData");
this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData);
this.dataSource.paginator = this.paginator;
this.chartInit(filterData);
Notiflix.Notify.success('Senario is loaded successfully !!!');
this.filterData=filterData;
this.modalService.dismissAll();
}
});
}else{
//load default data
let filterData:any = this.defaultData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"]));
if(this.selectedplacementTypes.length!=0){
let to_find:any=[...this.selectedplacementTypes];
filterData=recursiveFind(filterData,to_find);
}
this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData);
this.dataSource.paginator = this.paginator;
this.chartInit(filterData);
this.modalService.dismissAll();
}
}
saveScenario(){
let planner_type='';
if(this.SOURCE=='from_opt_activation'){
planner_type='optimizer';
}else{
planner_type='simulation'
}
let payload={
"name":this.FileName,
"json_data":this.filterData,
"planner_type":planner_type
}
if(this.FileName.trim()!=''){
this.apiServices.scenario_planner_simulate_save(payload).subscribe((res:any)=>{
console.log(res,"res")
if(res.code==200){
this.modalService.dismissAll();
Notiflix.Notify.success('Simulation is Saved Successfully');
this.getSavedData();
this.FileName='';
}else{
if(res.status=='Failed'){
Notiflix.Notify.failure('Failed to save record');
}
}
});
}else{
Notiflix.Notify.failure('Please Enter The Scenario Name')
}
}
getSavedData(){
this.apiServices.scenario_planner_list().subscribe((res:any) =>{
console.log(res,"scenatio_list");
this.saveList=[];
if(res.code==200 && res.status=='success'){
if(this.SOURCE=='from_opt_activation'){
// planner_type='optimizer';
this.saveList=[{'name':'Default','id':0}];
this.saveList.push(...res.data['optimizer']);
}else{
// planner_type='simulation'
//this.saveList=res.data['simulation'];
this.saveList=[{'name':'Default','id':0}];
this.saveList.push(...res.data['simulation']);
}
}
console.log(this.saveList,"saveList");
});
}
getpackTypeList(filterData:any,byPacktype:any){
this.TATS_ARRAY=[];
for(let [key,value] of Object.entries(this.activationLIB)){
this.TATS_ARRAY.push({'name':value,'value':this.TATS[key]})
}
this.TATSPack_ARRAY=[];
if(this.packTypeList){
for(let [key,value] of Object.entries(this.packTypeList)){
let values:any=value;
this.TATSPack_ARRAY.push({'name':values.name,'value':this.TATS[key]})
}
for(let [key,value] of Object.entries(byPacktype)){
let lvalue:any=value;
this.TATS_BY_PACK[key.toLowerCase()]=lvalue.length;
}
}
}
downloadProducts(){
let filename="Scenario-Planner - OPTIMIZER"
var options = {
fieldSeparator: ',',
quoteStrings: '"',
decimalseparator: '.',
showLabels: true,
showTitle: true,
title: filename,
useBom: true,
noDownload: false,
headers: ['Pack Type', 'Product Sub Type', 'Activity','Cost','Incremental Sales','Expected Lift','CSV ROAS'],
nullToEmptyString: true,
};
this.renderedData.map((item:any)=>
{
for(let [key,value] of Object.entries(item)){
let values:any=value;
if(!this.displayedColumns.includes(key)){
delete item[key];
}else{
if(key=='processed_lift'){
item[key]=values.toFixed(2)+"%";
}
else if(key=='csv_roas'){
item[key]=values+"%";
}
else if(key=='total_activation_cost'){
item[key]=values.toFixed(2);
}
else if(key=='total_incremental_sales'){
item[key]=values.toFixed(2);
}
//'total_activation_cost','total_incremental_sales'
}
}
});
new Angular5Csv(this.renderedData, filename, options);
}
test_filter(){
}
decrementRange(value:any){
value.discount=value.discount-5;
}
incrementRange(value:any){
value.discount=value.discount+5;
}
goBack(){
console.log(this.SOURCE,"this.SOURCE")
if(this.SOURCE=='from_opt_activation'){
this.routes.navigate(['/optimizer'],{ state: {'source':'from_output','data':[this.ELEMENT_DATA_CONSTRAINTS,this.selectedData,this.response_data,this.Ratecardjson]}});
}else{
this.routes.navigate(['/simulator'],{ state: {'source':'from_output','data':[this.ELEMENT_DATA_CONSTRAINTS,this.selectedData,this.response_data,this.Ratecardjson]}});
}
}
resetFilter(){
this.dataSource = new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
this.dataSource.paginator = this.paginator;
this.chartInit(this.ELEMENT_DATA);
}
doFilter(){
this.incremantalCSV=0;
console.log(this.selectedSegmentList,"Segmentedlist")
let filterData:any = this.ELEMENT_DATA.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"]));
if(this.selectedplacementTypes.length!=0){
let to_find:any=[...this.selectedplacementTypes];
filterData=recursiveFind(filterData,to_find);
}
this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData);
this.dataSource.paginator = this.paginator;
this.chartInit(filterData);
}
chartInit(filterData:any){
this.TATS={};
this.incremantalCSV=0;
this.totalActivationCost=0;
this.totalscvROAS=0;
this.optimizedLift=0;
this.totalLift=0;
this.DynActivationColumns.forEach((element:any) => {
this.TATS[element.value]=0;
//this.Chartpoints_pla_rev[element.value]=0;
//this.incremantalCSV+=element.total_incremental_sales;
});
let gbActivity=groupByJson(filterData,'activation_type');
console.log(gbActivity,"gbActivity")
let gbActivityList=Object.keys(gbActivity);
gbActivityList.forEach((item)=>{
this.Chartpoints_pla_rev[item]=0;
});
let predictedSales=0;
filterData.forEach((element:any)=>{
this.incremantalCSV+=element.total_incremental_sales;
this.totalActivationCost+=element.total_activation_cost;
this.totalscvROAS+=element.total_incremental_sales/element.total_activation_cost;
this.optimizedLift+=element.total_activation_cost;
//this.totalLift+=element.processed_lift;
// calculation = item["total_incremental_sales"] /(item["predicted_sales"] - item["total_incremental_sales"])
predictedSales+=element.predicted_sales;
});
this.totalLift=this.incremantalCSV/(predictedSales-this.incremantalCSV)*100;
this.optimizedLift=this.optimizedLift.toFixed()
this.optimizedLift= numberWithCommas(this.optimizedLift);
gbActivityList.forEach((item)=>{
filterData.forEach((element:any)=>{
if(element.activation_type.includes(item)){
this.Chartpoints_pla_rev[item]=element.total_incremental_sales
}
});
});
for(let [key,value] of Object.entries(this.activationLIB)){
filterData.forEach((element:any)=>{
if(element.activation_type.includes(value)){
this.TATS[key]+=1;
//this.Chartpoints_pla_rev[key]+=element.total_incremental_sales.toFixed(2);
}
});
}
console.log(this.Chartpoints_pla_rev,"===");
let byPacktype=groupByJson(filterData,'pack_type');
console.log(filterData,byPacktype,"1");
this.chartRender(this.Chartpoints_pla_rev,filterData);
this.chartExpLift(filterData,byPacktype);
this.getpackTypeList(filterData,byPacktype);
}
chartRender(data:any,filterData:any){
this.reload=false;
let data_points:any=[];
this.dataSetLabel=[];
console.log(data,"data")
let gbActivity=groupByJson(filterData,'activation_type');
console.log(gbActivity,"gbActivity") | random_line_split | ||
optimizer-output.component.ts | text: 'Incremental Revenue by Placements',
display: true
} };
dataSetLabel1:any=[];
saveList:any=[{'name':'SELECT','id':0},
{'name':'Load1','id':1}]
selectedplacementTypes='';
dataSet1:any={ data: [], label: 'Expected Lift by Pack type' };
//'total_activation_cost','total_incremental_sales','processed_lift'
displayedColumns: string[] = ['pack_sub_type','pack_type','activation_type','total_activation_cost','total_incremental_sales','csv_roas','processed_lift',];
dataSource = new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
selection = new SelectionModel<ScenarioPlanner>(true, []);
sortedData: ScenarioPlanner[]=[];
selectedData:any=[];
skuList: ScenarioPlanner[] = [];
activityType: ScenarioPlanner[] = [];
activityLift:any = '';
activityROI:any = '';
renderedData: any;
closeModal: any;
liftSliderValue:any = [5,60];
roiSliderValue:any = [5,40];
groupedOnPackType=[];
// Configuration for the filters
skuSelected:any = [];
placementTypes = new FormControl();
//segment
Segment = new FormControl();
segmentList: any[] = [];
selectedSegmentList: any = [];
constraint_list=[]
ngOnInit(): void {
Notiflix.Loading.dots('Loading...');
this.budgetConstraintSubscribe = this.dataservice.BudgetConstraintOb.subscribe((constraint:any) => {
if(constraint){
this.totalBudget=constraint['total'];
}
console.log(constraint,"constraintz");
console.log(this.totalBudget,"totalbudget")
});
this.apiServices.getActivationList().subscribe((res:any)=>{
console.log(res,"RES");
Notiflix.Loading.remove();
if(res.code==200){
this.DynActivationColumns=res.data;
for(let [key,value] of Object.entries(res.data)){
let values:any=value;
this.activationLIB[values.value]=values.name;
this.PlacementLabel.push(values.name);
}
if(this.datastream){
this.SOURCE=this.datastream.source
if(this.datastream.source=='from_opt_activation'){
this.ELEMENT_DATA_CONSTRAINTS=this.datastream.data[0] || [];
this.selectedData=this.datastream.data[1] || [];
this.response_data=this.datastream.data[2] || [];
this.filterData=this.datastream.data[3] || [];
this.defaultData=this.datastream.data[3] || [];
this.Ratecardjson=this.datastream.data[4] || [];
this.ELEMENT_DATA_CONSTRAINTS.forEach((element:any) => {
let itemlist=[];
for( const [key,value] of Object.entries(element)){
if((value) && (this.activationLIB[key]!=undefined)){
itemlist.push(this.activationLIB[key]);
}
}
this.activationLIBSelected[element.pack_type]=itemlist;
});
}
this.ELEMENT_DATA=this.filterData;
this.ngAfterViewInit();
this.getSavedData();
this.groupedOnPackType=groupByJson(this.filterData,'pack_type');
this.segmentList=Object.keys(this.groupedOnPackType);
this.selectedSegmentList = this.segmentList;
this.chartInit(this.ELEMENT_DATA);
}else{
this.routes.navigate(['/planner']);
}
}
});
}
@ViewChild(MatPaginator) paginator: any;
ngAfterViewInit() {
console.log(this.ELEMENT_DATA,"this.ELEMENT_DATA__");
this.ELEMENT_DATA=this.ELEMENT_DATA.sort((a:any, b:any) => b.processed_lift - a.processed_lift);
this.ELEMENT_DATA.forEach((element:any) => {
element['csv_roas']=((element.total_incremental_sales/element.total_activation_cost)*100).toFixed()
});
this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
this.dataSource.paginator = this.paginator;
this.dataSource.connect().subscribe(d => {
this.renderedData = d});
}
// File Reader ( EXCEL OR CSV) to JSON Format
// Input Handler for the promocode upload
async testData(event:any){
// let promoList:any=await this.onFileChange(event);
// let FilteredSet=promoList['sheet1'];
// this.ELEMENT_DATA=FilteredSet;
// this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
// this.ngAfterViewInit();
}
saveScenarioTrigger(content:any) {
this.modalService.open(content, this.modalOptions).result.then((result) => {
});
}
deleteSavedList(){
let that=this;
Notiflix.Confirm.show('Confirm Delete','Are you sure you want to delete this item?','Yes','No',
()=>{
//scenario_planner_listdelete
this.apiServices.scenario_planner_listdelete(this.valueSelected).subscribe((res:any)=>{
if(res.code==200 && res.status=='success'){
that.getSavedData();
Notiflix.Notify.success('Deleted Successfully ! ');
}
});
});
}
LoadSaveList(){
this.incremantalCSV=0;
this.totalActivationCost=0;
this.totalscvROAS=0;
if(this.valueSelected!=0){
//load data
Notiflix.Loading.dots('Loading...');
this.apiServices.scenario_planner_listdetails(this.valueSelected).subscribe((res:any)=>{
console.log(res,"listDetails");
Notiflix.Loading.remove();
let response=res;
if(res.code==200 && res.status=='success'){
this.resetFilter();
let filterData:any=response['data'][0].json_data;
this.groupedOnPackType=groupByJson(filterData,'pack_type');
this.segmentList=Object.keys(this.groupedOnPackType);
this.selectedSegmentList = this.segmentList;
filterData = filterData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"]));
if(this.selectedplacementTypes.length!=0){
let to_find:any=[...this.selectedplacementTypes];
console.log(to_find,"to_find");
filterData=recursiveFind(filterData,to_find);
console.log(to_find,"to_find")
}
filterData=filterData.sort((a:any, b:any) => b.processed_lift - a.processed_lift);
console.log(filterData,"filterData");
this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData);
this.dataSource.paginator = this.paginator;
this.chartInit(filterData);
Notiflix.Notify.success('Senario is loaded successfully !!!');
this.filterData=filterData;
this.modalService.dismissAll();
}
});
}else{
//load default data
let filterData:any = this.defaultData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"]));
if(this.selectedplacementTypes.length!=0){
let to_find:any=[...this.selectedplacementTypes];
filterData=recursiveFind(filterData,to_find);
}
this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData);
this.dataSource.paginator = this.paginator;
this.chartInit(filterData);
this.modalService.dismissAll();
}
}
saveScenario(){
let planner_type='';
if(this.SOURCE=='from_opt_activation'){
planner_type='optimizer';
}else{
planner_type='simulation'
}
let payload={
"name":this.FileName,
"json_data":this.filterData,
"planner_type":planner_type
}
if(this.FileName.trim()!=''){
this.apiServices.scenario_planner_simulate_save(payload).subscribe((res:any)=>{
console.log(res,"res")
if(res.code==200){
this.modalService.dismissAll();
Notiflix.Notify.success('Simulation is Saved Successfully');
this.getSavedData();
this.FileName='';
}else{
if(res.status=='Failed'){
Notiflix.Notify.failure('Failed to save record');
}
}
});
}else{
Notiflix.Notify.failure('Please Enter The Scenario Name')
}
}
getSavedData(){
this.apiServices.scenario_planner_list().subscribe((res:any) =>{
console.log(res,"scenatio_list");
this.saveList=[];
if(res.code==200 && res.status=='success'){
if(this.SOURCE=='from_opt_activation'){
// planner_type='optimizer';
this.saveList=[{'name':'Default','id':0}];
this.saveList.push(...res.data['optimizer']);
}else{
// planner_type='simulation'
//this.saveList=res.data['simulation'];
this.saveList=[{'name':'Default','id':0}];
this.saveList.push(...res.data['simulation']);
}
}
console.log(this.saveList,"saveList");
});
}
getpackTypeList(filterData:any,byPacktype:any) | {
this.TATS_ARRAY=[];
for(let [key,value] of Object.entries(this.activationLIB)){
this.TATS_ARRAY.push({'name':value,'value':this.TATS[key]})
}
this.TATSPack_ARRAY=[];
if(this.packTypeList){
for(let [key,value] of Object.entries(this.packTypeList)){
let values:any=value;
this.TATSPack_ARRAY.push({'name':values.name,'value':this.TATS[key]})
}
for(let [key,value] of Object.entries(byPacktype)){
let lvalue:any=value;
this.TATS_BY_PACK[key.toLowerCase()]=lvalue.length;
}
}
} | identifier_body | |
optimizer-output.component.ts | @Input() dataSet:any={ data: [0, 0, 0, 0, 0],
title: {
text: 'Incremental Revenue by Placements',
display: true
} };
dataSetLabel1:any=[];
saveList:any=[{'name':'SELECT','id':0},
{'name':'Load1','id':1}]
selectedplacementTypes='';
dataSet1:any={ data: [], label: 'Expected Lift by Pack type' };
//'total_activation_cost','total_incremental_sales','processed_lift'
displayedColumns: string[] = ['pack_sub_type','pack_type','activation_type','total_activation_cost','total_incremental_sales','csv_roas','processed_lift',];
dataSource = new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
selection = new SelectionModel<ScenarioPlanner>(true, []);
sortedData: ScenarioPlanner[]=[];
selectedData:any=[];
skuList: ScenarioPlanner[] = [];
activityType: ScenarioPlanner[] = [];
activityLift:any = '';
activityROI:any = '';
renderedData: any;
closeModal: any;
liftSliderValue:any = [5,60];
roiSliderValue:any = [5,40];
groupedOnPackType=[];
// Configuration for the filters
skuSelected:any = [];
placementTypes = new FormControl();
//segment
Segment = new FormControl();
segmentList: any[] = [];
selectedSegmentList: any = [];
constraint_list=[]
ngOnInit(): void {
Notiflix.Loading.dots('Loading...');
this.budgetConstraintSubscribe = this.dataservice.BudgetConstraintOb.subscribe((constraint:any) => {
if(constraint){
this.totalBudget=constraint['total'];
}
console.log(constraint,"constraintz");
console.log(this.totalBudget,"totalbudget")
});
this.apiServices.getActivationList().subscribe((res:any)=>{
console.log(res,"RES");
Notiflix.Loading.remove();
if(res.code==200){
this.DynActivationColumns=res.data;
for(let [key,value] of Object.entries(res.data)){
let values:any=value;
this.activationLIB[values.value]=values.name;
this.PlacementLabel.push(values.name);
}
if(this.datastream){
this.SOURCE=this.datastream.source
if(this.datastream.source=='from_opt_activation'){
this.ELEMENT_DATA_CONSTRAINTS=this.datastream.data[0] || [];
this.selectedData=this.datastream.data[1] || [];
this.response_data=this.datastream.data[2] || [];
this.filterData=this.datastream.data[3] || [];
this.defaultData=this.datastream.data[3] || [];
this.Ratecardjson=this.datastream.data[4] || [];
this.ELEMENT_DATA_CONSTRAINTS.forEach((element:any) => {
let itemlist=[];
for( const [key,value] of Object.entries(element)){
if((value) && (this.activationLIB[key]!=undefined)){
itemlist.push(this.activationLIB[key]);
}
}
this.activationLIBSelected[element.pack_type]=itemlist;
});
}
this.ELEMENT_DATA=this.filterData;
this.ngAfterViewInit();
this.getSavedData();
this.groupedOnPackType=groupByJson(this.filterData,'pack_type');
this.segmentList=Object.keys(this.groupedOnPackType);
this.selectedSegmentList = this.segmentList;
this.chartInit(this.ELEMENT_DATA);
}else{
this.routes.navigate(['/planner']);
}
}
});
}
@ViewChild(MatPaginator) paginator: any;
ngAfterViewInit() {
console.log(this.ELEMENT_DATA,"this.ELEMENT_DATA__");
this.ELEMENT_DATA=this.ELEMENT_DATA.sort((a:any, b:any) => b.processed_lift - a.processed_lift);
this.ELEMENT_DATA.forEach((element:any) => {
element['csv_roas']=((element.total_incremental_sales/element.total_activation_cost)*100).toFixed()
});
this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
this.dataSource.paginator = this.paginator;
this.dataSource.connect().subscribe(d => {
this.renderedData = d});
}
// File Reader ( EXCEL OR CSV) to JSON Format
// Input Handler for the promocode upload
async testData(event:any){
// let promoList:any=await this.onFileChange(event);
// let FilteredSet=promoList['sheet1'];
// this.ELEMENT_DATA=FilteredSet;
// this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
// this.ngAfterViewInit();
}
saveScenarioTrigger(content:any) {
this.modalService.open(content, this.modalOptions).result.then((result) => {
});
}
deleteSavedList(){
let that=this;
Notiflix.Confirm.show('Confirm Delete','Are you sure you want to delete this item?','Yes','No',
()=>{
//scenario_planner_listdelete
this.apiServices.scenario_planner_listdelete(this.valueSelected).subscribe((res:any)=>{
if(res.code==200 && res.status=='success'){
that.getSavedData();
Notiflix.Notify.success('Deleted Successfully ! ');
}
});
});
}
LoadSaveList(){
this.incremantalCSV=0;
this.totalActivationCost=0;
this.totalscvROAS=0;
if(this.valueSelected!=0){
//load data
Notiflix.Loading.dots('Loading...');
this.apiServices.scenario_planner_listdetails(this.valueSelected).subscribe((res:any)=>{
console.log(res,"listDetails");
Notiflix.Loading.remove();
let response=res;
if(res.code==200 && res.status=='success'){
this.resetFilter();
let filterData:any=response['data'][0].json_data;
this.groupedOnPackType=groupByJson(filterData,'pack_type');
this.segmentList=Object.keys(this.groupedOnPackType);
this.selectedSegmentList = this.segmentList;
filterData = filterData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"]));
if(this.selectedplacementTypes.length!=0){
let to_find:any=[...this.selectedplacementTypes];
console.log(to_find,"to_find");
filterData=recursiveFind(filterData,to_find);
console.log(to_find,"to_find")
}
filterData=filterData.sort((a:any, b:any) => b.processed_lift - a.processed_lift);
console.log(filterData,"filterData");
this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData);
this.dataSource.paginator = this.paginator;
this.chartInit(filterData);
Notiflix.Notify.success('Senario is loaded successfully !!!');
this.filterData=filterData;
this.modalService.dismissAll();
}
});
}else{
//load default data
let filterData:any = this.defaultData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"]));
if(this.selectedplacementTypes.length!=0){
let to_find:any=[...this.selectedplacementTypes];
filterData=recursiveFind(filterData,to_find);
}
this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData);
this.dataSource.paginator = this.paginator;
this.chartInit(filterData);
this.modalService.dismissAll();
}
}
saveScenario(){
let planner_type='';
if(this.SOURCE=='from_opt_activation'){
planner_type='optimizer';
}else |
let payload={
"name":this.FileName,
"json_data":this.filterData,
"planner_type":planner_type
}
if(this.FileName.trim()!=''){
this.apiServices.scenario_planner_simulate_save(payload).subscribe((res:any)=>{
console.log(res,"res")
if(res.code==200){
this.modalService.dismissAll();
Notiflix.Notify.success('Simulation is Saved Successfully');
this.getSavedData();
this.FileName='';
}else{
if(res.status=='Failed'){
Notiflix.Notify.failure('Failed to save record');
}
}
});
}else{
Notiflix.Notify.failure('Please Enter The Scenario Name')
}
}
getSavedData(){
this.apiServices.scenario_planner_list().subscribe((res:any) =>{
console.log(res,"scenatio_list");
this.saveList=[];
if(res.code==200 && res.status=='success'){
if(this.SOURCE=='from_opt_activation'){
// planner_type='optimizer';
this.saveList=[{'name':'Default','id':0}];
this.saveList.push(...res.data['optimizer']);
}else{
// planner_type='simulation'
//this.saveList=res.data['simulation'];
this.saveList=[{'name':'Default','id':0}];
this.saveList.push(...res.data['simulation']);
}
}
console.log(this.saveList,"saveList");
});
}
getpackTypeList(filterData:any,byPacktype:any){
this.TATS_ARRAY=[];
for(let [key,value] of Object.entries(this.activationLIB)){
this.TATS_ARRAY.push({'name':value,'value':this.TATS[key]})
}
this.TATSPack_ARRAY=[];
if(this.packTypeList){
for(let [key,value] of Object.entries(this.packTypeList)){
let values:any=value;
this.TATSPack_ARRAY.push({'name':values.name,'value':this.TATS[key]})
}
for(let [key,value] of Object.entries(byPacktype | {
planner_type='simulation'
} | conditional_block |
optimizer-output.component.ts | (private modalService: NgbModal,
private dataservice:DataControllerService,
private routes:Router,private apiServices:ScenarioPlannerService) {
// console.log(this.route.getCurrentNavigation()?.extras.state);
this.datastream=this.routes.getCurrentNavigation()?.extras.state;
this.currencySymbol=environment.currencySymbol;
this.modalOptions = {
backdrop:'static',
backdropClass:'customBackdrop'
}
};
ELEMENT_DATA: ScenarioPlanner[] = [];
activationLIB:any={};
TATS:any={};
packTypeList:any;
TATS_ARRAY:any=[];
DynActivationColumns:any=[];
TATS_BY_PACK:any={};
Chartpoints_pla_rev:any={};
FileName:string='';
activationLIBSelected:any={};
binaryOption=[
{id: 'Yes', name: "Yes"},
{id: 'No', name: "No"},];
reload:boolean=true;
ELEMENT_DATA_CONSTRAINTS:any=[];
//displayedColumnsConstraints: string[] = ['pack_type','fsi', 'fai','search', 'sot', 'bpp'];
//dataSourceConstraints = new MatTableDataSource<ScenarioPlannerConstraint>(this.ELEMENT_DATA_CONSTRAINTS);
PlacementLabel:any=[];
@Input() dataSetLabel:any=[ 'FAI', 'FSI', 'SOT', 'BBP','Search'];
@Input() dataSet:any={ data: [0, 0, 0, 0, 0],
title: {
text: 'Incremental Revenue by Placements',
display: true
} };
dataSetLabel1:any=[];
saveList:any=[{'name':'SELECT','id':0},
{'name':'Load1','id':1}]
selectedplacementTypes='';
dataSet1:any={ data: [], label: 'Expected Lift by Pack type' };
//'total_activation_cost','total_incremental_sales','processed_lift'
displayedColumns: string[] = ['pack_sub_type','pack_type','activation_type','total_activation_cost','total_incremental_sales','csv_roas','processed_lift',];
dataSource = new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
selection = new SelectionModel<ScenarioPlanner>(true, []);
sortedData: ScenarioPlanner[]=[];
selectedData:any=[];
skuList: ScenarioPlanner[] = [];
activityType: ScenarioPlanner[] = [];
activityLift:any = '';
activityROI:any = '';
renderedData: any;
closeModal: any;
liftSliderValue:any = [5,60];
roiSliderValue:any = [5,40];
groupedOnPackType=[];
// Configuration for the filters
skuSelected:any = [];
placementTypes = new FormControl();
//segment
Segment = new FormControl();
segmentList: any[] = [];
selectedSegmentList: any = [];
constraint_list=[]
ngOnInit(): void {
Notiflix.Loading.dots('Loading...');
this.budgetConstraintSubscribe = this.dataservice.BudgetConstraintOb.subscribe((constraint:any) => {
if(constraint){
this.totalBudget=constraint['total'];
}
console.log(constraint,"constraintz");
console.log(this.totalBudget,"totalbudget")
});
this.apiServices.getActivationList().subscribe((res:any)=>{
console.log(res,"RES");
Notiflix.Loading.remove();
if(res.code==200){
this.DynActivationColumns=res.data;
for(let [key,value] of Object.entries(res.data)){
let values:any=value;
this.activationLIB[values.value]=values.name;
this.PlacementLabel.push(values.name);
}
if(this.datastream){
this.SOURCE=this.datastream.source
if(this.datastream.source=='from_opt_activation'){
this.ELEMENT_DATA_CONSTRAINTS=this.datastream.data[0] || [];
this.selectedData=this.datastream.data[1] || [];
this.response_data=this.datastream.data[2] || [];
this.filterData=this.datastream.data[3] || [];
this.defaultData=this.datastream.data[3] || [];
this.Ratecardjson=this.datastream.data[4] || [];
this.ELEMENT_DATA_CONSTRAINTS.forEach((element:any) => {
let itemlist=[];
for( const [key,value] of Object.entries(element)){
if((value) && (this.activationLIB[key]!=undefined)){
itemlist.push(this.activationLIB[key]);
}
}
this.activationLIBSelected[element.pack_type]=itemlist;
});
}
this.ELEMENT_DATA=this.filterData;
this.ngAfterViewInit();
this.getSavedData();
this.groupedOnPackType=groupByJson(this.filterData,'pack_type');
this.segmentList=Object.keys(this.groupedOnPackType);
this.selectedSegmentList = this.segmentList;
this.chartInit(this.ELEMENT_DATA);
}else{
this.routes.navigate(['/planner']);
}
}
});
}
@ViewChild(MatPaginator) paginator: any;
ngAfterViewInit() {
console.log(this.ELEMENT_DATA,"this.ELEMENT_DATA__");
this.ELEMENT_DATA=this.ELEMENT_DATA.sort((a:any, b:any) => b.processed_lift - a.processed_lift);
this.ELEMENT_DATA.forEach((element:any) => {
element['csv_roas']=((element.total_incremental_sales/element.total_activation_cost)*100).toFixed()
});
this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
this.dataSource.paginator = this.paginator;
this.dataSource.connect().subscribe(d => {
this.renderedData = d});
}
// File Reader ( EXCEL OR CSV) to JSON Format
// Input Handler for the promocode upload
async testData(event:any){
// let promoList:any=await this.onFileChange(event);
// let FilteredSet=promoList['sheet1'];
// this.ELEMENT_DATA=FilteredSet;
// this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
// this.ngAfterViewInit();
}
saveScenarioTrigger(content:any) {
this.modalService.open(content, this.modalOptions).result.then((result) => {
});
}
deleteSavedList(){
let that=this;
Notiflix.Confirm.show('Confirm Delete','Are you sure you want to delete this item?','Yes','No',
()=>{
//scenario_planner_listdelete
this.apiServices.scenario_planner_listdelete(this.valueSelected).subscribe((res:any)=>{
if(res.code==200 && res.status=='success'){
that.getSavedData();
Notiflix.Notify.success('Deleted Successfully ! ');
}
});
});
}
LoadSaveList(){
this.incremantalCSV=0;
this.totalActivationCost=0;
this.totalscvROAS=0;
if(this.valueSelected!=0){
//load data
Notiflix.Loading.dots('Loading...');
this.apiServices.scenario_planner_listdetails(this.valueSelected).subscribe((res:any)=>{
console.log(res,"listDetails");
Notiflix.Loading.remove();
let response=res;
if(res.code==200 && res.status=='success'){
this.resetFilter();
let filterData:any=response['data'][0].json_data;
this.groupedOnPackType=groupByJson(filterData,'pack_type');
this.segmentList=Object.keys(this.groupedOnPackType);
this.selectedSegmentList = this.segmentList;
filterData = filterData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"]));
if(this.selectedplacementTypes.length!=0){
let to_find:any=[...this.selectedplacementTypes];
console.log(to_find,"to_find");
filterData=recursiveFind(filterData,to_find);
console.log(to_find,"to_find")
}
filterData=filterData.sort((a:any, b:any) => b.processed_lift - a.processed_lift);
console.log(filterData,"filterData");
this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData);
this.dataSource.paginator = this.paginator;
this.chartInit(filterData);
Notiflix.Notify.success('Senario is loaded successfully !!!');
this.filterData=filterData;
this.modalService.dismissAll();
}
});
}else{
//load default data
let filterData:any = this.defaultData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"]));
if(this.selectedplacementTypes.length!=0){
let to_find:any=[...this.selectedplacementTypes];
filterData=recursiveFind(filterData,to_find);
}
this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData);
this.dataSource.paginator = this.paginator;
this.chartInit(filterData);
this.modalService.dismissAll();
}
}
saveScenario(){
let planner_type='';
if(this.SOURCE=='from_opt_activation'){
planner_type='optimizer';
}else{
planner_type='simulation'
}
let payload={
"name":this.FileName,
"json_data":this.filterData,
"planner_type":planner_type
}
if(this.FileName.trim()!=''){
this.apiServices.scenario_planner_simulate_save(payload).subscribe((res:any)=>{
console.log(res,"res")
if(res.code==200){
this.modalService.dismissAll();
Notiflix.Notify.success('Simulation is Saved Successfully');
this.getSavedData();
this.FileName='';
}else{
if(res.status=='Failed'){
Notiflix.Notify.failure('Failed to save record');
}
}
});
}else{
Notiflix.Notify.failure('Please Enter The Scenario Name')
}
}
getSaved | constructor | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.