code
stringlengths
31
2.05k
label_name
stringclasses
5 values
label
int64
0
4
func CsrfFromForm(param string) func(c *fiber.Ctx) (string, error) { return func(c *fiber.Ctx) (string, error) { token := c.FormValue(param) if token == "" { return "", errMissingForm } return token, nil } }
Base
1
func CsrfFromForm(param string) func(c *fiber.Ctx) (string, error) { return func(c *fiber.Ctx) (string, error) { token := c.FormValue(param) if token == "" { return "", errMissingForm } return token, nil } }
Base
1
func NewSortParameter(sort admin.Sort) (SortParameter, error) { var gormOrderExpression string switch sort.Direction { case admin.Sort_DESCENDING: gormOrderExpression = fmt.Sprintf(gormDescending, sort.Key) case admin.Sort_ASCENDING: gormOrderExpression = fmt.Sprintf(gormAscending, sort.Key) default: return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid sort order specified: %v", sort) } return &sortParamImpl{ gormOrderExpression: gormOrderExpression, }, nil }
Base
1
func TestSortParameter_Descending(t *testing.T) { sortParameter, err := NewSortParameter(admin.Sort{ Direction: admin.Sort_DESCENDING, Key: "project", }) assert.Nil(t, err) assert.Equal(t, "project desc", sortParameter.GetGormOrderExpr()) }
Base
1
func TestSortParameter_Ascending(t *testing.T) { sortParameter, err := NewSortParameter(admin.Sort{ Direction: admin.Sort_ASCENDING, Key: "name", }) assert.Nil(t, err) assert.Equal(t, "name asc", sortParameter.GetGormOrderExpr()) }
Base
1
func (m *LaunchPlanManager) ListLaunchPlans(ctx context.Context, request admin.ResourceListRequest) ( *admin.LaunchPlanList, error) { // Check required fields if err := validation.ValidateResourceListRequest(request); err != nil { logger.Debugf(ctx, "") return nil, err } ctx = m.getNamedEntityContext(ctx, request.Id) filters, err := util.GetDbFilters(util.FilterSpec{ Project: request.Id.Project, Domain: request.Id.Domain, Name: request.Id.Name, RequestFilters: request.Filters, }, common.LaunchPlan) if err != nil { return nil, err } var sortParameter common.SortParameter if request.SortBy != nil { sortParameter, err = common.NewSortParameter(*request.SortBy) if err != nil { return nil, err } } offset, err := validation.ValidateToken(request.Token) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s for ListLaunchPlans", request.Token) } listLaunchPlansInput := repoInterfaces.ListResourceInput{ Limit: int(request.Limit), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, } output, err := m.db.LaunchPlanRepo().List(ctx, listLaunchPlansInput) if err != nil { logger.Debugf(ctx, "Failed to list launch plans for request [%+v] with err %v", request, err) return nil, err } launchPlanList, err := transformers.FromLaunchPlanModels(output.LaunchPlans) if err != nil { logger.Errorf(ctx, "Failed to transform launch plan models [%+v] with err: %v", output.LaunchPlans, err) return nil, err } var token string if len(output.LaunchPlans) == int(request.Limit) { token = strconv.Itoa(offset + len(output.LaunchPlans)) } return &admin.LaunchPlanList{ LaunchPlans: launchPlanList, Token: token, }, nil }
Base
1
func (m *LaunchPlanManager) ListLaunchPlanIds(ctx context.Context, request admin.NamedEntityIdentifierListRequest) ( *admin.NamedEntityIdentifierList, error) { ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain) filters, err := util.GetDbFilters(util.FilterSpec{ Project: request.Project, Domain: request.Domain, }, common.LaunchPlan) if err != nil { return nil, err } var sortParameter common.SortParameter if request.SortBy != nil { sortParameter, err = common.NewSortParameter(*request.SortBy) if err != nil { return nil, err } } offset, err := validation.ValidateToken(request.Token) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s", request.Token) } listLaunchPlansInput := repoInterfaces.ListResourceInput{ Limit: int(request.Limit), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, } output, err := m.db.LaunchPlanRepo().ListLaunchPlanIdentifiers(ctx, listLaunchPlansInput) if err != nil { logger.Debugf(ctx, "Failed to list launch plan ids for request [%+v] with err %v", request, err) return nil, err } var token string if len(output.LaunchPlans) == int(request.Limit) { token = strconv.Itoa(offset + len(output.LaunchPlans)) } return &admin.NamedEntityIdentifierList{ Entities: transformers.FromLaunchPlanModelsToIdentifiers(output.LaunchPlans), Token: token, }, nil }
Base
1
func (m *LaunchPlanManager) ListActiveLaunchPlans(ctx context.Context, request admin.ActiveLaunchPlanListRequest) ( *admin.LaunchPlanList, error) { // Check required fields if err := validation.ValidateActiveLaunchPlanListRequest(request); err != nil { logger.Debugf(ctx, "") return nil, err } ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain) filters, err := util.ListActiveLaunchPlanVersionsFilters(request.Project, request.Domain) if err != nil { return nil, err } var sortParameter common.SortParameter if request.SortBy != nil { sortParameter, err = common.NewSortParameter(*request.SortBy) if err != nil { return nil, err } } offset, err := validation.ValidateToken(request.Token) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s for ListActiveLaunchPlans", request.Token) } listLaunchPlansInput := repoInterfaces.ListResourceInput{ Limit: int(request.Limit), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, } output, err := m.db.LaunchPlanRepo().List(ctx, listLaunchPlansInput) if err != nil { logger.Debugf(ctx, "Failed to list active launch plans for request [%+v] with err %v", request, err) return nil, err } launchPlanList, err := transformers.FromLaunchPlanModels(output.LaunchPlans) if err != nil { logger.Errorf(ctx, "Failed to transform active launch plan models [%+v] with err: %v", output.LaunchPlans, err) return nil, err } var token string if len(output.LaunchPlans) == int(request.Limit) { token = strconv.Itoa(offset + len(output.LaunchPlans)) } return &admin.LaunchPlanList{ LaunchPlans: launchPlanList, Token: token, }, nil }
Base
1
func (m *NodeExecutionManager) listNodeExecutions( ctx context.Context, identifierFilters []common.InlineFilter, requestFilters string, limit uint32, requestToken string, sortBy *admin.Sort, mapFilters []common.MapFilter) ( *admin.NodeExecutionList, error) { filters, err := util.AddRequestFilters(requestFilters, common.NodeExecution, identifierFilters) if err != nil { return nil, err } var sortParameter common.SortParameter if sortBy != nil { sortParameter, err = common.NewSortParameter(*sortBy) if err != nil { return nil, err } } offset, err := validation.ValidateToken(requestToken) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s for ListNodeExecutions", requestToken) } listInput := repoInterfaces.ListResourceInput{ Limit: int(limit), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, } listInput.MapFilters = mapFilters output, err := m.db.NodeExecutionRepo().List(ctx, listInput) if err != nil { logger.Debugf(ctx, "Failed to list node executions for request with err %v", err) return nil, err } var token string if len(output.NodeExecutions) == int(limit) { token = strconv.Itoa(offset + len(output.NodeExecutions)) } nodeExecutionList, err := m.transformNodeExecutionModelList(ctx, output.NodeExecutions) if err != nil { logger.Debugf(ctx, "failed to transform node execution models for request with err: %v", err) return nil, err } return &admin.NodeExecutionList{ NodeExecutions: nodeExecutionList, Token: token, }, nil }
Base
1
func TestListNodeExecutions_NothingToReturn(t *testing.T) { repository := repositoryMocks.NewMockRepository() repository.NodeExecutionRepo().(*repositoryMocks.MockNodeExecutionRepo).SetListCallback( func(ctx context.Context, input interfaces.ListResourceInput) ( interfaces.NodeExecutionCollectionOutput, error) { return interfaces.NodeExecutionCollectionOutput{}, nil }) var listExecutionsCalled bool repository.ExecutionRepo().(*repositoryMocks.MockExecutionRepo).SetListCallback( func(ctx context.Context, input interfaces.ListResourceInput) ( interfaces.ExecutionCollectionOutput, error) { listExecutionsCalled = true return interfaces.ExecutionCollectionOutput{}, nil }) nodeExecManager := NewNodeExecutionManager(repository, getMockExecutionsConfigProvider(), make([]string, 0), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockNodeExecutionRemoteURL, nil, nil, &eventWriterMocks.NodeExecutionEventWriter{}) _, err := nodeExecManager.ListNodeExecutions(context.Background(), admin.NodeExecutionListRequest{ WorkflowExecutionId: &core.WorkflowExecutionIdentifier{ Project: "project", Domain: "domain", Name: "name", }, Limit: 1, Token: "2", SortBy: &admin.Sort{ Direction: admin.Sort_ASCENDING, Key: "domain", }, }) assert.Nil(t, err) assert.False(t, listExecutionsCalled) }
Base
1
func (m *ProjectManager) ListProjects(ctx context.Context, request admin.ProjectListRequest) (*admin.Projects, error) { spec := util.FilterSpec{ RequestFilters: request.Filters, } filters, err := util.GetDbFilters(spec, common.Project) if err != nil { return nil, err } var sortParameter common.SortParameter if request.SortBy != nil { sortParameter, err = common.NewSortParameter(*request.SortBy) if err != nil { return nil, err } } else { sortParameter = alphabeticalSortParam } offset, err := validation.ValidateToken(request.Token) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s for ListProjects", request.Token) } // And finally, query the database listProjectsInput := repoInterfaces.ListResourceInput{ Limit: int(request.Limit), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, } projectModels, err := m.db.ProjectRepo().List(ctx, listProjectsInput) if err != nil { return nil, err } projects := transformers.FromProjectModels(projectModels, m.getDomains()) var token string if len(projects) == int(request.Limit) { token = strconv.Itoa(offset + len(projects)) } return &admin.Projects{ Projects: projects, Token: token, }, nil }
Base
1
func (s *SignalManager) ListSignals(ctx context.Context, request admin.SignalListRequest) (*admin.SignalList, error) { if err := validation.ValidateSignalListRequest(ctx, request); err != nil { logger.Debugf(ctx, "ListSignals request [%+v] is invalid: %v", request, err) return nil, err } ctx = getExecutionContext(ctx, request.WorkflowExecutionId) identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, *request.WorkflowExecutionId) if err != nil { return nil, err } filters, err := util.AddRequestFilters(request.Filters, common.Signal, identifierFilters) if err != nil { return nil, err } var sortParameter common.SortParameter if request.SortBy != nil { sortParameter, err = common.NewSortParameter(*request.SortBy) if err != nil { return nil, err } } offset, err := validation.ValidateToken(request.Token) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s for ListSignals", request.Token) } signalModelList, err := s.db.SignalRepo().List(ctx, repoInterfaces.ListResourceInput{ InlineFilters: filters, Offset: offset, Limit: int(request.Limit), SortParameter: sortParameter, }) if err != nil { logger.Debugf(ctx, "Failed to list signals with request [%+v] with err %v", request, err) return nil, err } signalList, err := transformers.FromSignalModels(signalModelList) if err != nil { logger.Debugf(ctx, "failed to transform signal models for request [%+v] with err: %v", request, err) return nil, err } var token string if len(signalList) == int(request.Limit) { token = strconv.Itoa(offset + len(signalList)) } return &admin.SignalList{ Signals: signalList, Token: token, }, nil }
Base
1
func (t *TaskManager) ListTasks(ctx context.Context, request admin.ResourceListRequest) (*admin.TaskList, error) { // Check required fields if err := validation.ValidateResourceListRequest(request); err != nil { logger.Debugf(ctx, "Invalid request [%+v]: %v", request, err) return nil, err } ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain) ctx = contextutils.WithTaskID(ctx, request.Id.Name) spec := util.FilterSpec{ Project: request.Id.Project, Domain: request.Id.Domain, Name: request.Id.Name, RequestFilters: request.Filters, } filters, err := util.GetDbFilters(spec, common.Task) if err != nil { return nil, err } var sortParameter common.SortParameter if request.SortBy != nil { sortParameter, err = common.NewSortParameter(*request.SortBy) if err != nil { return nil, err } } offset, err := validation.ValidateToken(request.Token) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s for ListTasks", request.Token) } // And finally, query the database listTasksInput := repoInterfaces.ListResourceInput{ Limit: int(request.Limit), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, } output, err := t.db.TaskRepo().List(ctx, listTasksInput) if err != nil { logger.Debugf(ctx, "Failed to list tasks with id [%+v] with err %v", request.Id, err) return nil, err } taskList, err := transformers.FromTaskModels(output.Tasks) if err != nil { logger.Errorf(ctx, "Failed to transform task models [%+v] with err: %v", output.Tasks, err) return nil, err } var token string if len(taskList) == int(request.Limit) { token = strconv.Itoa(offset + len(taskList)) } return &admin.TaskList{ Tasks: taskList, Token: token, }, nil }
Base
1
func (t *TaskManager) ListUniqueTaskIdentifiers(ctx context.Context, request admin.NamedEntityIdentifierListRequest) ( *admin.NamedEntityIdentifierList, error) { if err := validation.ValidateNamedEntityIdentifierListRequest(request); err != nil { logger.Debugf(ctx, "invalid request [%+v]: %v", request, err) return nil, err } ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain) filters, err := util.GetDbFilters(util.FilterSpec{ Project: request.Project, Domain: request.Domain, }, common.Task) if err != nil { return nil, err } var sortParameter common.SortParameter if request.SortBy != nil { sortParameter, err = common.NewSortParameter(*request.SortBy) if err != nil { return nil, err } } offset, err := validation.ValidateToken(request.Token) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s for ListUniqueTaskIdentifiers", request.Token) } listTasksInput := repoInterfaces.ListResourceInput{ Limit: int(request.Limit), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, } output, err := t.db.TaskRepo().ListTaskIdentifiers(ctx, listTasksInput) if err != nil { logger.Debugf(ctx, "Failed to list tasks ids with project: %s and domain: %s with err %v", request.Project, request.Domain, err) return nil, err } idList := transformers.FromTaskModelsToIdentifiers(output.Tasks) var token string if len(idList) == int(request.Limit) { token = strconv.Itoa(offset + len(idList)) } return &admin.NamedEntityIdentifierList{ Entities: idList, Token: token, }, nil }
Base
1
func TestParseFilters(t *testing.T) { filterExpression := "eq(foo, 123)+ne(version, TheWorst)+value_in(bar, 4;5;6)" taskFilters, err := ParseFilters(filterExpression, common.Task) assert.NoError(t, err) assert.Len(t, taskFilters, 3) actualFilterExpression, _ := taskFilters[0].GetGormQueryExpr() assert.Equal(t, "foo = ?", actualFilterExpression.Query) assert.Equal(t, "123", actualFilterExpression.Args) actualFilterExpression, _ = taskFilters[1].GetGormQueryExpr() assert.Equal(t, "version <> ?", actualFilterExpression.Query) assert.Equal(t, "TheWorst", actualFilterExpression.Args) actualFilterExpression, _ = taskFilters[2].GetGormQueryExpr() assert.Equal(t, "bar in (?)", actualFilterExpression.Query) assert.Equal(t, []interface{}{"4", "5", "6"}, actualFilterExpression.Args) filterExpression = "invalid_function(foo,bar)" _, err = ParseFilters(filterExpression, common.Task) assert.Error(t, err) assert.EqualError(t, err, "unrecognized filter function: invalid_function") }
Base
1
func TestAddRequestFilters(t *testing.T) { filters, err := AddRequestFilters( "ne(version, TheWorst)+eq(workflow.name, workflow)", common.Execution, make([]common.InlineFilter, 0)) assert.Nil(t, err) assert.Len(t, filters, 2) expression, err := filters[0].GetGormQueryExpr() assert.Nil(t, err) assert.Equal(t, "version <> ?", expression.Query) assert.Equal(t, "TheWorst", expression.Args) expression, err = filters[1].GetGormQueryExpr() assert.Nil(t, err) assert.Equal(t, testutils.NameQueryPattern, expression.Query) assert.Equal(t, "workflow", expression.Args) }
Base
1
func (w *WorkflowManager) ListWorkflows( ctx context.Context, request admin.ResourceListRequest) (*admin.WorkflowList, error) { // Check required fields if err := validation.ValidateResourceListRequest(request); err != nil { return nil, err } ctx = contextutils.WithProjectDomain(ctx, request.Id.Project, request.Id.Domain) ctx = contextutils.WithWorkflowID(ctx, request.Id.Name) filters, err := util.GetDbFilters(util.FilterSpec{ Project: request.Id.Project, Domain: request.Id.Domain, Name: request.Id.Name, RequestFilters: request.Filters, }, common.Workflow) if err != nil { return nil, err } var sortParameter common.SortParameter if request.SortBy != nil { sortParameter, err = common.NewSortParameter(*request.SortBy) if err != nil { return nil, err } } offset, err := validation.ValidateToken(request.Token) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s for ListWorkflows", request.Token) } listWorkflowsInput := repoInterfaces.ListResourceInput{ Limit: int(request.Limit), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, } output, err := w.db.WorkflowRepo().List(ctx, listWorkflowsInput) if err != nil { logger.Debugf(ctx, "Failed to list workflows with [%+v] with err %v", request.Id, err) return nil, err } workflowList, err := transformers.FromWorkflowModels(output.Workflows) if err != nil { logger.Errorf(ctx, "Failed to transform workflow models [%+v] with err: %v", output.Workflows, err) return nil, err } var token string if len(output.Workflows) == int(request.Limit) { token = strconv.Itoa(offset + len(output.Workflows)) } return &admin.WorkflowList{ Workflows: workflowList, Token: token, }, nil }
Base
1
func (w *WorkflowManager) ListWorkflowIdentifiers(ctx context.Context, request admin.NamedEntityIdentifierListRequest) ( *admin.NamedEntityIdentifierList, error) { if err := validation.ValidateNamedEntityIdentifierListRequest(request); err != nil { logger.Debugf(ctx, "invalid request [%+v]: %v", request, err) return nil, err } ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain) filters, err := util.GetDbFilters(util.FilterSpec{ Project: request.Project, Domain: request.Domain, }, common.Workflow) if err != nil { return nil, err } var sortParameter common.SortParameter if request.SortBy != nil { sortParameter, err = common.NewSortParameter(*request.SortBy) if err != nil { return nil, err } } offset, err := validation.ValidateToken(request.Token) if err != nil { return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s for ListWorkflowIdentifiers", request.Token) } listWorkflowsInput := repoInterfaces.ListResourceInput{ Limit: int(request.Limit), Offset: offset, InlineFilters: filters, SortParameter: sortParameter, } output, err := w.db.WorkflowRepo().ListIdentifiers(ctx, listWorkflowsInput) if err != nil { logger.Debugf(ctx, "Failed to list workflow ids with project: %s and domain: %s with err %v", request.Project, request.Domain, err) return nil, err } var token string if len(output.Workflows) == int(request.Limit) { token = strconv.Itoa(offset + len(output.Workflows)) } entities := transformers.FromWorkflowModelsToIdentifiers(output.Workflows) return &admin.NamedEntityIdentifierList{ Entities: entities, Token: token, }, nil }
Base
1
func TestListExecutions_Order(t *testing.T) { executionRepo := NewExecutionRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) executions := make([]map[string]interface{}, 0) GlobalMock := mocket.Catcher.Reset() // Only match on queries that include ordering by name mockQuery := GlobalMock.NewMock().WithQuery(`name asc`) mockQuery.WithReply(executions) sortParameter, _ := common.NewSortParameter(admin.Sort{ Direction: admin.Sort_ASCENDING, Key: "name", }) _, err := executionRepo.List(context.Background(), interfaces.ListResourceInput{ SortParameter: sortParameter, InlineFilters: []common.InlineFilter{ getEqualityFilter(common.Task, "project", project), getEqualityFilter(common.Task, "domain", domain), getEqualityFilter(common.Task, "name", name), }, Limit: 20, }) assert.NoError(t, err) assert.True(t, mockQuery.Triggered) }
Base
1
func TestListExecutions_WithTags(t *testing.T) { executionRepo := NewExecutionRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) executions := make([]map[string]interface{}, 0) GlobalMock := mocket.Catcher.Reset() // Only match on queries that include ordering by name mockQuery := GlobalMock.NewMock().WithQuery(`name asc`) mockQuery.WithReply(executions) sortParameter, _ := common.NewSortParameter(admin.Sort{ Direction: admin.Sort_ASCENDING, Key: "name", }) vals := []string{"tag1", "tag2"} tagFilter, err := common.NewRepeatedValueFilter(common.ExecutionAdminTag, common.ValueIn, "admin_tag_name", vals) assert.NoError(t, err) _, err = executionRepo.List(context.Background(), interfaces.ListResourceInput{ SortParameter: sortParameter, InlineFilters: []common.InlineFilter{ getEqualityFilter(common.Task, "project", project), getEqualityFilter(common.Task, "domain", domain), getEqualityFilter(common.Task, "name", name), tagFilter, }, Limit: 20, }) assert.NoError(t, err) assert.True(t, mockQuery.Triggered) }
Base
1
func TestListLaunchPlans_Order(t *testing.T) { launchPlanRepo := NewLaunchPlanRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) launchPlans := make([]map[string]interface{}, 0) GlobalMock := mocket.Catcher.Reset() // Only match on queries that include ordering by project mockQuery := GlobalMock.NewMock() mockQuery.WithQuery(`project desc`) mockQuery.WithReply(launchPlans) sortParameter, _ := common.NewSortParameter(admin.Sort{ Direction: admin.Sort_DESCENDING, Key: "project", }) _, err := launchPlanRepo.List(context.Background(), interfaces.ListResourceInput{ SortParameter: sortParameter, InlineFilters: []common.InlineFilter{ getEqualityFilter(common.LaunchPlan, "project", project), getEqualityFilter(common.LaunchPlan, "domain", domain), getEqualityFilter(common.LaunchPlan, "name", name), getEqualityFilter(common.LaunchPlan, "version", version), }, Limit: 20, }) assert.NoError(t, err) assert.True(t, mockQuery.Triggered) }
Base
1
func TestListNamedEntity(t *testing.T) { metadataRepo := NewNamedEntityRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) results := make([]map[string]interface{}, 0) metadata := getMockNamedEntityResponseFromDb(models.NamedEntity{ NamedEntityKey: models.NamedEntityKey{ ResourceType: resourceType, Project: project, Domain: domain, Name: name, }, NamedEntityMetadataFields: models.NamedEntityMetadataFields{ Description: description, }, }) results = append(results, metadata) GlobalMock := mocket.Catcher.Reset() GlobalMock.Logging = true mockQuery := GlobalMock.NewMock() mockQuery.WithQuery( `SELECT entities.project,entities.domain,entities.name,'2' AS resource_type,named_entity_metadata.description,named_entity_metadata.state FROM "named_entity_metadata" RIGHT JOIN (SELECT project,domain,name FROM "workflows" WHERE "domain" = $1 AND "project" = $2 GROUP BY project, domain, name ORDER BY name desc LIMIT 20) AS entities ON named_entity_metadata.resource_type = 2 AND named_entity_metadata.project = entities.project AND named_entity_metadata.domain = entities.domain AND named_entity_metadata.name = entities.name GROUP BY entities.project, entities.domain, entities.name, named_entity_metadata.description, named_entity_metadata.state ORDER BY name desc`).WithReply(results) sortParameter, _ := common.NewSortParameter(admin.Sort{ Direction: admin.Sort_DESCENDING, Key: "name", }) output, err := metadataRepo.List(context.Background(), interfaces.ListNamedEntityInput{ ResourceType: resourceType, Project: "admintests", Domain: "development", ListResourceInput: interfaces.ListResourceInput{ Limit: 20, SortParameter: sortParameter, }, }) assert.NoError(t, err) assert.Len(t, output.Entities, 1) }
Base
1
func TestListNodeExecutions_Order(t *testing.T) { nodeExecutionRepo := NewNodeExecutionRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) nodeExecutions := make([]map[string]interface{}, 0) GlobalMock := mocket.Catcher.Reset() // Only match on queries that include ordering by project mockQuery := GlobalMock.NewMock() mockQuery.WithQuery(`project desc`) mockQuery.WithReply(nodeExecutions) sortParameter, _ := common.NewSortParameter(admin.Sort{ Direction: admin.Sort_DESCENDING, Key: "project", }) _, err := nodeExecutionRepo.List(context.Background(), interfaces.ListResourceInput{ SortParameter: sortParameter, InlineFilters: []common.InlineFilter{ getEqualityFilter(common.NodeExecution, "phase", nodePhase), }, Limit: 20, }) assert.NoError(t, err) assert.True(t, mockQuery.Triggered) }
Base
1
func TestListProjects(t *testing.T) { filter, err := common.NewSingleValueFilter(common.Project, common.Equal, "name", "foo") assert.Nil(t, err) testListProjects(interfaces.ListResourceInput{ Offset: 0, Limit: 1, InlineFilters: []common.InlineFilter{filter}, SortParameter: alphabeticalSortParam, }, `SELECT * FROM "projects" WHERE name = $1 ORDER BY identifier asc LIMIT 1`, t) }
Base
1
func TestListTasks_Order(t *testing.T) { taskRepo := NewTaskRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) tasks := make([]map[string]interface{}, 0) GlobalMock := mocket.Catcher.Reset() GlobalMock.Logging = true // Only match on queries that include ordering by project mockQuery := GlobalMock.NewMock() mockQuery.WithQuery(`project desc`) mockQuery.WithReply(tasks) sortParameter, _ := common.NewSortParameter(admin.Sort{ Direction: admin.Sort_DESCENDING, Key: "project", }) _, err := taskRepo.List(context.Background(), interfaces.ListResourceInput{ SortParameter: sortParameter, InlineFilters: []common.InlineFilter{ getEqualityFilter(common.Task, "project", project), getEqualityFilter(common.Task, "domain", domain), getEqualityFilter(common.Task, "name", name), getEqualityFilter(common.Task, "version", "ABC"), }, Limit: 20, }) assert.Empty(t, err) assert.True(t, mockQuery.Triggered) }
Base
1
func TestListWorkflows_Order(t *testing.T) { workflowRepo := NewWorkflowRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) workflows := make([]map[string]interface{}, 0) GlobalMock := mocket.Catcher.Reset() // Only match on queries that include ordering by project mockQuery := GlobalMock.NewMock() mockQuery.WithQuery(`project desc`) mockQuery.WithReply(workflows) sortParameter, _ := common.NewSortParameter(admin.Sort{ Direction: admin.Sort_DESCENDING, Key: "project", }) _, err := workflowRepo.List(context.Background(), interfaces.ListResourceInput{ SortParameter: sortParameter, InlineFilters: []common.InlineFilter{ getEqualityFilter(common.Workflow, "project", project), getEqualityFilter(common.Workflow, "domain", domain), getEqualityFilter(common.Workflow, "name", name), getEqualityFilter(common.Workflow, "version", "ABC"), }, Limit: 20, }) assert.Empty(t, err) assert.True(t, mockQuery.Triggered) }
Base
1
func (p *Parser) isPrefixHeading(data []byte) bool { if data[0] != '#' { return false } if p.extensions&SpaceHeadings != 0 { level := skipCharN(data, 0, '#', 6) if level == len(data) || data[level] != ' ' { return false } } return true }
Base
1
func (builder *builder) Cmp(i1, i2 frontend.Variable) frontend.Variable { vars, _ := builder.toVariables(i1, i2) bi1 := builder.ToBinary(vars[0], builder.cs.FieldBitLen()) bi2 := builder.ToBinary(vars[1], builder.cs.FieldBitLen()) res := builder.cstZero() for i := builder.cs.FieldBitLen() - 1; i >= 0; i-- { iszeroi1 := builder.IsZero(bi1[i]) iszeroi2 := builder.IsZero(bi2[i]) i1i2 := builder.And(bi1[i], iszeroi2) i2i1 := builder.And(bi2[i], iszeroi1) n := builder.Select(i2i1, -1, 0) m := builder.Select(i1i2, 1, n) res = builder.Select(builder.IsZero(res), m, res).(expr.LinearExpression) } return res }
Base
1
func (builder *builder) Cmp(i1, i2 frontend.Variable) frontend.Variable { vars, _ := builder.toVariables(i1, i2) bi1 := builder.ToBinary(vars[0], builder.cs.FieldBitLen()) bi2 := builder.ToBinary(vars[1], builder.cs.FieldBitLen()) res := builder.cstZero() for i := builder.cs.FieldBitLen() - 1; i >= 0; i-- { iszeroi1 := builder.IsZero(bi1[i]) iszeroi2 := builder.IsZero(bi2[i]) i1i2 := builder.And(bi1[i], iszeroi2) i2i1 := builder.And(bi2[i], iszeroi1) n := builder.Select(i2i1, -1, 0) m := builder.Select(i1i2, 1, n) res = builder.Select(builder.IsZero(res), m, res).(expr.LinearExpression) } return res }
Pillar
3
func (builder *builder) mustBeLessOrEqVar(a, bound frontend.Variable) { // here bound is NOT a constant, // but a can be either constant or a wire. _, aConst := builder.constantValue(a) debug := builder.newDebugInfo("mustBeLessOrEq", a, " <= ", bound) nbBits := builder.cs.FieldBitLen() aBits := bits.ToBinary(builder, a, bits.WithNbDigits(nbBits), bits.WithUnconstrainedOutputs()) boundBits := builder.ToBinary(bound, nbBits) // constraint added added := make([]int, 0, nbBits) p := make([]frontend.Variable, nbBits+1) p[nbBits] = builder.cstOne() zero := builder.cstZero() for i := nbBits - 1; i >= 0; i-- { // if bound[i] == 0 // p[i] = p[i+1] // t = p[i+1] // else // p[i] = p[i+1] * a[i] // t = 0 v := builder.Mul(p[i+1], aBits[i]) p[i] = builder.Select(boundBits[i], v, p[i+1]) t := builder.Select(boundBits[i], zero, p[i+1]) // (1 - t - ai) * ai == 0 var l frontend.Variable l = builder.cstOne() l = builder.Sub(l, t, aBits[i]) // note if bound[i] == 1, this constraint is (1 - ai) * ai == 0 // → this is a boolean constraint // if bound[i] == 0, t must be 0 or 1, thus ai must be 0 or 1 too if aConst { // aBits[i] is a constant; l = builder.Mul(l, aBits[i]) // TODO @gbotrel this constraint seems useless. added = append(added, builder.cs.AddR1C(builder.newR1C(l, zero, zero), builder.genericGate)) } else { added = append(added, builder.cs.AddR1C(builder.newR1C(l, aBits[i], zero), builder.genericGate)) } } builder.cs.AttachDebugInfo(debug, added) }
Base
1
func (builder *builder) mustBeLessOrEqVar(a, bound frontend.Variable) { // here bound is NOT a constant, // but a can be either constant or a wire. _, aConst := builder.constantValue(a) debug := builder.newDebugInfo("mustBeLessOrEq", a, " <= ", bound) nbBits := builder.cs.FieldBitLen() aBits := bits.ToBinary(builder, a, bits.WithNbDigits(nbBits), bits.WithUnconstrainedOutputs()) boundBits := builder.ToBinary(bound, nbBits) // constraint added added := make([]int, 0, nbBits) p := make([]frontend.Variable, nbBits+1) p[nbBits] = builder.cstOne() zero := builder.cstZero() for i := nbBits - 1; i >= 0; i-- { // if bound[i] == 0 // p[i] = p[i+1] // t = p[i+1] // else // p[i] = p[i+1] * a[i] // t = 0 v := builder.Mul(p[i+1], aBits[i]) p[i] = builder.Select(boundBits[i], v, p[i+1]) t := builder.Select(boundBits[i], zero, p[i+1]) // (1 - t - ai) * ai == 0 var l frontend.Variable l = builder.cstOne() l = builder.Sub(l, t, aBits[i]) // note if bound[i] == 1, this constraint is (1 - ai) * ai == 0 // → this is a boolean constraint // if bound[i] == 0, t must be 0 or 1, thus ai must be 0 or 1 too if aConst { // aBits[i] is a constant; l = builder.Mul(l, aBits[i]) // TODO @gbotrel this constraint seems useless. added = append(added, builder.cs.AddR1C(builder.newR1C(l, zero, zero), builder.genericGate)) } else { added = append(added, builder.cs.AddR1C(builder.newR1C(l, aBits[i], zero), builder.genericGate)) } } builder.cs.AttachDebugInfo(debug, added) }
Pillar
3
func (builder *builder) AssertIsLessOrEqual(v frontend.Variable, bound frontend.Variable) { cv, vConst := builder.constantValue(v) cb, bConst := builder.constantValue(bound) // both inputs are constants if vConst && bConst { bv, bb := builder.cs.ToBigInt(cv), builder.cs.ToBigInt(cb) if bv.Cmp(bb) == 1 { panic(fmt.Sprintf("AssertIsLessOrEqual: %s > %s", bv.String(), bb.String())) } } // bound is constant if bConst { vv := builder.toVariable(v) builder.mustBeLessOrEqCst(vv, builder.cs.ToBigInt(cb)) return } builder.mustBeLessOrEqVar(v, bound) }
Base
1
func (builder *builder) AssertIsLessOrEqual(v frontend.Variable, bound frontend.Variable) { cv, vConst := builder.constantValue(v) cb, bConst := builder.constantValue(bound) // both inputs are constants if vConst && bConst { bv, bb := builder.cs.ToBigInt(cv), builder.cs.ToBigInt(cb) if bv.Cmp(bb) == 1 { panic(fmt.Sprintf("AssertIsLessOrEqual: %s > %s", bv.String(), bb.String())) } } // bound is constant if bConst { vv := builder.toVariable(v) builder.mustBeLessOrEqCst(vv, builder.cs.ToBigInt(cb)) return } builder.mustBeLessOrEqVar(v, bound) }
Pillar
3
func (builder *builder) mustBeLessOrEqCst(a expr.LinearExpression, bound *big.Int) { nbBits := builder.cs.FieldBitLen() // ensure the bound is positive, it's bit-len doesn't matter if bound.Sign() == -1 { panic("AssertIsLessOrEqual: bound must be positive") } if bound.BitLen() > nbBits { panic("AssertIsLessOrEqual: bound is too large, constraint will never be satisfied") } // debug info debug := builder.newDebugInfo("mustBeLessOrEq", a, " <= ", builder.toVariable(bound)) // note that at this stage, we didn't boolean-constraint these new variables yet // (as opposed to ToBinary) aBits := bits.ToBinary(builder, a, bits.WithNbDigits(nbBits), bits.WithUnconstrainedOutputs()) // t trailing bits in the bound t := 0 for i := 0; i < nbBits; i++ { if bound.Bit(i) == 0 { break } t++ } // constraint added added := make([]int, 0, nbBits) p := make([]frontend.Variable, nbBits+1) // p[i] == 1 → a[j] == c[j] for all j ⩾ i p[nbBits] = builder.cstOne() for i := nbBits - 1; i >= t; i-- { if bound.Bit(i) == 0 { p[i] = p[i+1] } else { p[i] = builder.Mul(p[i+1], aBits[i]) } } for i := nbBits - 1; i >= 0; i-- { if bound.Bit(i) == 0 { // (1 - p(i+1) - ai) * ai == 0 l := builder.Sub(1, p[i+1]) l = builder.Sub(l, aBits[i]) added = append(added, builder.cs.AddR1C(builder.newR1C(l, aBits[i], builder.cstZero()), builder.genericGate)) } else { builder.AssertIsBoolean(aBits[i]) } } if len(added) != 0 { builder.cs.AttachDebugInfo(debug, added) } }
Base
1
func (builder *builder) mustBeLessOrEqCst(a expr.LinearExpression, bound *big.Int) { nbBits := builder.cs.FieldBitLen() // ensure the bound is positive, it's bit-len doesn't matter if bound.Sign() == -1 { panic("AssertIsLessOrEqual: bound must be positive") } if bound.BitLen() > nbBits { panic("AssertIsLessOrEqual: bound is too large, constraint will never be satisfied") } // debug info debug := builder.newDebugInfo("mustBeLessOrEq", a, " <= ", builder.toVariable(bound)) // note that at this stage, we didn't boolean-constraint these new variables yet // (as opposed to ToBinary) aBits := bits.ToBinary(builder, a, bits.WithNbDigits(nbBits), bits.WithUnconstrainedOutputs()) // t trailing bits in the bound t := 0 for i := 0; i < nbBits; i++ { if bound.Bit(i) == 0 { break } t++ } // constraint added added := make([]int, 0, nbBits) p := make([]frontend.Variable, nbBits+1) // p[i] == 1 → a[j] == c[j] for all j ⩾ i p[nbBits] = builder.cstOne() for i := nbBits - 1; i >= t; i-- { if bound.Bit(i) == 0 { p[i] = p[i+1] } else { p[i] = builder.Mul(p[i+1], aBits[i]) } } for i := nbBits - 1; i >= 0; i-- { if bound.Bit(i) == 0 { // (1 - p(i+1) - ai) * ai == 0 l := builder.Sub(1, p[i+1]) l = builder.Sub(l, aBits[i]) added = append(added, builder.cs.AddR1C(builder.newR1C(l, aBits[i], builder.cstZero()), builder.genericGate)) } else { builder.AssertIsBoolean(aBits[i]) } } if len(added) != 0 { builder.cs.AttachDebugInfo(debug, added) } }
Pillar
3
func (builder *builder) Cmp(i1, i2 frontend.Variable) frontend.Variable { bi1 := builder.ToBinary(i1, builder.cs.FieldBitLen()) bi2 := builder.ToBinary(i2, builder.cs.FieldBitLen()) var res frontend.Variable res = 0 for i := builder.cs.FieldBitLen() - 1; i >= 0; i-- { iszeroi1 := builder.IsZero(bi1[i]) iszeroi2 := builder.IsZero(bi2[i]) i1i2 := builder.And(bi1[i], iszeroi2) i2i1 := builder.And(bi2[i], iszeroi1) n := builder.Select(i2i1, -1, 0) m := builder.Select(i1i2, 1, n) res = builder.Select(builder.IsZero(res), m, res) } return res }
Base
1
func (builder *builder) Cmp(i1, i2 frontend.Variable) frontend.Variable { bi1 := builder.ToBinary(i1, builder.cs.FieldBitLen()) bi2 := builder.ToBinary(i2, builder.cs.FieldBitLen()) var res frontend.Variable res = 0 for i := builder.cs.FieldBitLen() - 1; i >= 0; i-- { iszeroi1 := builder.IsZero(bi1[i]) iszeroi2 := builder.IsZero(bi2[i]) i1i2 := builder.And(bi1[i], iszeroi2) i2i1 := builder.And(bi2[i], iszeroi1) n := builder.Select(i2i1, -1, 0) m := builder.Select(i1i2, 1, n) res = builder.Select(builder.IsZero(res), m, res) } return res }
Pillar
3
func (builder *builder) AssertIsLessOrEqual(v frontend.Variable, bound frontend.Variable) { switch b := bound.(type) { case expr.Term: builder.mustBeLessOrEqVar(v, b) default: builder.mustBeLessOrEqCst(v, utils.FromInterface(b)) } }
Base
1
func (builder *builder) AssertIsLessOrEqual(v frontend.Variable, bound frontend.Variable) { switch b := bound.(type) { case expr.Term: builder.mustBeLessOrEqVar(v, b) default: builder.mustBeLessOrEqCst(v, utils.FromInterface(b)) } }
Pillar
3
func (builder *builder) mustBeLessOrEqCst(a frontend.Variable, bound big.Int) { nbBits := builder.cs.FieldBitLen() // ensure the bound is positive, it's bit-len doesn't matter if bound.Sign() == -1 { panic("AssertIsLessOrEqual: bound must be positive") } if bound.BitLen() > nbBits { panic("AssertIsLessOrEqual: bound is too large, constraint will never be satisfied") } if ca, ok := builder.constantValue(a); ok { // a is constant, compare the big int values ba := builder.cs.ToBigInt(ca) if ba.Cmp(&bound) == 1 { panic(fmt.Sprintf("AssertIsLessOrEqual: %s > %s", ba.String(), bound.String())) } } // debug info debug := builder.newDebugInfo("mustBeLessOrEq", a, " <= ", bound) // note that at this stage, we didn't boolean-constraint these new variables yet // (as opposed to ToBinary) aBits := bits.ToBinary(builder, a, bits.WithNbDigits(nbBits), bits.WithUnconstrainedOutputs()) // t trailing bits in the bound t := 0 for i := 0; i < nbBits; i++ { if bound.Bit(i) == 0 { break } t++ } p := make([]frontend.Variable, nbBits+1) // p[i] == 1 → a[j] == c[j] for all j ⩾ i p[nbBits] = 1 for i := nbBits - 1; i >= t; i-- { if bound.Bit(i) == 0 { p[i] = p[i+1] } else { p[i] = builder.Mul(p[i+1], aBits[i]) } } for i := nbBits - 1; i >= 0; i-- { if bound.Bit(i) == 0 { // (1 - p(i+1) - ai) * ai == 0 l := builder.Sub(1, p[i+1], aBits[i]).(expr.Term) //l = builder.Sub(l, ).(term) builder.addPlonkConstraint(sparseR1C{ xa: l.VID, xb: aBits[i].(expr.Term).VID, qM: builder.tOne, }, debug) } else { builder.AssertIsBoolean(aBits[i]) } } }
Base
1
func (builder *builder) mustBeLessOrEqCst(a frontend.Variable, bound big.Int) { nbBits := builder.cs.FieldBitLen() // ensure the bound is positive, it's bit-len doesn't matter if bound.Sign() == -1 { panic("AssertIsLessOrEqual: bound must be positive") } if bound.BitLen() > nbBits { panic("AssertIsLessOrEqual: bound is too large, constraint will never be satisfied") } if ca, ok := builder.constantValue(a); ok { // a is constant, compare the big int values ba := builder.cs.ToBigInt(ca) if ba.Cmp(&bound) == 1 { panic(fmt.Sprintf("AssertIsLessOrEqual: %s > %s", ba.String(), bound.String())) } } // debug info debug := builder.newDebugInfo("mustBeLessOrEq", a, " <= ", bound) // note that at this stage, we didn't boolean-constraint these new variables yet // (as opposed to ToBinary) aBits := bits.ToBinary(builder, a, bits.WithNbDigits(nbBits), bits.WithUnconstrainedOutputs()) // t trailing bits in the bound t := 0 for i := 0; i < nbBits; i++ { if bound.Bit(i) == 0 { break } t++ } p := make([]frontend.Variable, nbBits+1) // p[i] == 1 → a[j] == c[j] for all j ⩾ i p[nbBits] = 1 for i := nbBits - 1; i >= t; i-- { if bound.Bit(i) == 0 { p[i] = p[i+1] } else { p[i] = builder.Mul(p[i+1], aBits[i]) } } for i := nbBits - 1; i >= 0; i-- { if bound.Bit(i) == 0 { // (1 - p(i+1) - ai) * ai == 0 l := builder.Sub(1, p[i+1], aBits[i]).(expr.Term) //l = builder.Sub(l, ).(term) builder.addPlonkConstraint(sparseR1C{ xa: l.VID, xb: aBits[i].(expr.Term).VID, qM: builder.tOne, }, debug) } else { builder.AssertIsBoolean(aBits[i]) } } }
Pillar
3
func (builder *builder) mustBeLessOrEqVar(a frontend.Variable, bound expr.Term) { debug := builder.newDebugInfo("mustBeLessOrEq", a, " <= ", bound) nbBits := builder.cs.FieldBitLen() aBits := bits.ToBinary(builder, a, bits.WithNbDigits(nbBits), bits.WithUnconstrainedOutputs()) boundBits := builder.ToBinary(bound, nbBits) p := make([]frontend.Variable, nbBits+1) p[nbBits] = 1 for i := nbBits - 1; i >= 0; i-- { // if bound[i] == 0 // p[i] = p[i+1] // t = p[i+1] // else // p[i] = p[i+1] * a[i] // t = 0 v := builder.Mul(p[i+1], aBits[i]) p[i] = builder.Select(boundBits[i], v, p[i+1]) t := builder.Select(boundBits[i], 0, p[i+1]) // (1 - t - ai) * ai == 0 l := builder.Sub(1, t, aBits[i]).(expr.Term) // note if bound[i] == 1, this constraint is (1 - ai) * ai == 0 // → this is a boolean constraint // if bound[i] == 0, t must be 0 or 1, thus ai must be 0 or 1 too if ai, ok := builder.constantValue(aBits[i]); ok { // a is constant; ensure l == 0 l.Coeff = builder.cs.Mul(l.Coeff, ai) builder.addPlonkConstraint(sparseR1C{ xa: l.VID, qL: l.Coeff, }, debug) } else { // l * a[i] == 0 builder.addPlonkConstraint(sparseR1C{ xa: l.VID, xb: aBits[i].(expr.Term).VID, qM: l.Coeff, }, debug) } } }
Base
1
func (builder *builder) mustBeLessOrEqVar(a frontend.Variable, bound expr.Term) { debug := builder.newDebugInfo("mustBeLessOrEq", a, " <= ", bound) nbBits := builder.cs.FieldBitLen() aBits := bits.ToBinary(builder, a, bits.WithNbDigits(nbBits), bits.WithUnconstrainedOutputs()) boundBits := builder.ToBinary(bound, nbBits) p := make([]frontend.Variable, nbBits+1) p[nbBits] = 1 for i := nbBits - 1; i >= 0; i-- { // if bound[i] == 0 // p[i] = p[i+1] // t = p[i+1] // else // p[i] = p[i+1] * a[i] // t = 0 v := builder.Mul(p[i+1], aBits[i]) p[i] = builder.Select(boundBits[i], v, p[i+1]) t := builder.Select(boundBits[i], 0, p[i+1]) // (1 - t - ai) * ai == 0 l := builder.Sub(1, t, aBits[i]).(expr.Term) // note if bound[i] == 1, this constraint is (1 - ai) * ai == 0 // → this is a boolean constraint // if bound[i] == 0, t must be 0 or 1, thus ai must be 0 or 1 too if ai, ok := builder.constantValue(aBits[i]); ok { // a is constant; ensure l == 0 l.Coeff = builder.cs.Mul(l.Coeff, ai) builder.addPlonkConstraint(sparseR1C{ xa: l.VID, qL: l.Coeff, }, debug) } else { // l * a[i] == 0 builder.addPlonkConstraint(sparseR1C{ xa: l.VID, xb: aBits[i].(expr.Term).VID, qM: l.Coeff, }, debug) } } }
Pillar
3
func (circuit *recursiveHint) Define(api frontend.API) error { // first hint produces wire w1 w1, _ := api.Compiler().NewHint(make3, 1) // this linear expression is not recorded in a R1CS just yet linearExpression := api.Add(circuit.A, w1[0]) // api.ToBinary calls another hint (bits.NBits) with linearExpression as input // however, when the solver will resolve bits[...] it will need to detect w1 as a dependency // in order to compute the correct linearExpression value bits := api.ToBinary(linearExpression, 10) a := api.FromBinary(bits...) api.AssertIsEqual(a, 45) return nil }
Base
1
func (circuit *recursiveHint) Define(api frontend.API) error { // first hint produces wire w1 w1, _ := api.Compiler().NewHint(make3, 1) // this linear expression is not recorded in a R1CS just yet linearExpression := api.Add(circuit.A, w1[0]) // api.ToBinary calls another hint (bits.NBits) with linearExpression as input // however, when the solver will resolve bits[...] it will need to detect w1 as a dependency // in order to compute the correct linearExpression value bits := api.ToBinary(linearExpression, 10) a := api.FromBinary(bits...) api.AssertIsEqual(a, 45) return nil }
Pillar
3
func handleRequestBody(c *Client, r *Request) error { var bodyBytes []byte releaseBuffer(r.bodyBuf) r.bodyBuf = nil switch body := r.Body.(type) { case io.Reader: if c.setContentLength || r.setContentLength { // keep backward compatibility r.bodyBuf = acquireBuffer() if _, err := r.bodyBuf.ReadFrom(body); err != nil { return err } r.Body = nil } else { // Otherwise buffer less processing for `io.Reader`, sounds good. return nil } case []byte: bodyBytes = body case string: bodyBytes = []byte(body) default: contentType := r.Header.Get(hdrContentTypeKey) kind := kindOf(r.Body) var err error if IsJSONType(contentType) && (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice) { r.bodyBuf, err = jsonMarshal(c, r, r.Body) } else if IsXMLType(contentType) && (kind == reflect.Struct) { bodyBytes, err = c.XMLMarshal(r.Body) } if err != nil { return err } } if bodyBytes == nil && r.bodyBuf == nil { return errors.New("unsupported 'Body' type/value") } // []byte into Buffer if bodyBytes != nil && r.bodyBuf == nil { r.bodyBuf = acquireBuffer() _, _ = r.bodyBuf.Write(bodyBytes) } return nil }
Class
2
func (s *memoryStore) Verify(id, answer string, clear bool) bool { v := s.Get(id, clear) return v == answer }
Class
2
func (maap *MesheryApplicationPersister) GetMesheryApplications(search, order string, page, pageSize uint64, updatedAfter string) ([]byte, error) { order = sanitizeOrderInput(order, []string{"created_at", "updated_at", "name"}) if order == "" { order = "updated_at desc" } count := int64(0) applications := []*MesheryApplication{} query := maap.DB.Where("updated_at > ?", updatedAfter).Order(order) if search != "" { like := "%" + strings.ToLower(search) + "%" query = query.Where("(lower(meshery_applications.name) like ?)", like) } query.Table("meshery_applications").Count(&count) Paginate(uint(page), uint(pageSize))(query).Find(&applications) mesheryApplicationPage := &MesheryApplicationPage{ Page: page, PageSize: pageSize, TotalCount: int(count), Applications: applications, } return marshalMesheryApplicationPage(mesheryApplicationPage), nil }
Base
1
func (mfp *MesheryFilterPersister) GetMesheryCatalogFilters(page, pageSize, search, order string) ([]byte, error) { var err error order = sanitizeOrderInput(order, []string{"created_at", "updated_at", "name"}) if order == "" { order = "updated_at desc" } var pg int if page != "" { pg, err = strconv.Atoi(page) if err != nil || pg < 0 { pg = 0 } } else { pg = 0 } // 0 page size is for all records var pgSize int if pageSize != "" { pgSize, err = strconv.Atoi(pageSize) if err != nil || pgSize < 0 { pgSize = 0 } } else { pgSize = 0 } filters := []MesheryFilter{} query := mfp.DB.Where("visibility = ?", Published).Order(order) if search != "" { like := "%" + strings.ToLower(search) + "%" query = query.Where("(lower(meshery_filters.name) like ?)", like) } var count int64 err = query.Model(&MesheryFilter{}).Count(&count).Error if err != nil { return nil, err } if pgSize != 0 { Paginate(uint(pg), uint(pgSize))(query).Find(&filters) } else { query.Find(&filters) } response := FiltersAPIResponse{ Page: uint(pg), PageSize: uint(pgSize), TotalCount: uint(count), Filters: filters, } marshalledResponse, _ := json.Marshal(response) return marshalledResponse, nil }
Base
1
func (mfp *MesheryFilterPersister) GetMesheryFilters(search, order string, page, pageSize uint64, visibility []string) ([]byte, error) { order = sanitizeOrderInput(order, []string{"created_at", "updated_at", "name"}) if order == "" { order = "updated_at desc" } count := int64(0) filters := []*MesheryFilter{} var query *gorm.DB if len(visibility) == 0 { query = mfp.DB.Where("visibility in (?)", visibility) } query = query.Order(order) if search != "" { like := "%" + strings.ToLower(search) + "%" query = query.Where("(lower(meshery_filters.name) like ?)", like) } query.Table("meshery_filters").Count(&count) Paginate(uint(page), uint(pageSize))(query).Find(&filters) mesheryFilterPage := &MesheryFilterPage{ Page: page, PageSize: pageSize, TotalCount: int(count), Filters: filters, } return marshalMesheryFilterPage(mesheryFilterPage), nil }
Base
1
func (mkcp *MesheryK8sContextPersister) GetMesheryK8sContexts(search, order string, page, pageSize uint64) ([]byte, error) { order = sanitizeOrderInput(order, []string{"created_at", "updated_at", "name"}) if order == "" { order = "updated_at desc" } count := int64(0) contexts := []*K8sContext{} query := mkcp.DB.Order(order) if search != "" { like := "%" + strings.ToLower(search) + "%" query = query.Where("(lower(name) like ?)", like) } query.Model(K8sContext{}).Count(&count) Paginate(uint(page), uint(pageSize))(query).Find(&contexts) mesheryK8sContextPage := MesheryK8sContextPage{ Page: page, PageSize: pageSize, TotalCount: int(count), Contexts: contexts, } resp, _ := json.Marshal(mesheryK8sContextPage) return resp, nil }
Base
1
func (mpp *MesheryPatternPersister) GetMesheryPatterns(search, order string, page, pageSize uint64, updatedAfter string, visibility []string) ([]byte, error) { order = sanitizeOrderInput(order, []string{"created_at", "updated_at", "name"}) if order == "" { order = "updated_at desc" } count := int64(0) patterns := []*MesheryPattern{} var query *gorm.DB if len(visibility) == 0 { query = mpp.DB.Where("visibility in (?)", visibility) } query = query.Where("updated_at > ?", updatedAfter).Order(order) if search != "" { like := "%" + strings.ToLower(search) + "%" query = query.Where("(lower(meshery_patterns.name) like ?)", like) } query.Table("meshery_patterns").Count(&count) Paginate(uint(page), uint(pageSize))(query).Find(&patterns) mesheryPatternPage := &MesheryPatternPage{ Page: page, PageSize: pageSize, TotalCount: int(count), Patterns: patterns, } return marshalMesheryPatternPage(mesheryPatternPage), nil }
Base
1
func (mpp *MesheryPatternPersister) GetMesheryCatalogPatterns(page, pageSize, search, order string) ([]byte, error) { var err error order = sanitizeOrderInput(order, []string{"created_at", "updated_at", "name"}) if order == "" { order = "updated_at desc" } var pg int if page != "" { pg, err = strconv.Atoi(page) if err != nil || pg < 0 { pg = 0 } } else { pg = 0 } // 0 page size is for all records var pgSize int if pageSize != "" { pgSize, err = strconv.Atoi(pageSize) if err != nil || pgSize < 0 { pgSize = 0 } } else { pgSize = 0 } patterns := []MesheryPattern{} query := mpp.DB.Where("visibility = ?", Published).Order(order) if search != "" { like := "%" + strings.ToLower(search) + "%" query = query.Where("(lower(meshery_patterns.name) like ?)", like) } var count int64 err = query.Model(&MesheryPattern{}).Count(&count).Error if err != nil { return nil, err } if pgSize != 0 { Paginate(uint(pg), uint(pgSize))(query).Find(&patterns) } else { query.Find(&patterns) } response := PatternsAPIResponse{ Page: uint(pg), PageSize: uint(pgSize), TotalCount: uint(count), Patterns: patterns, } marshalledResponse, _ := json.Marshal(response) return marshalledResponse, nil }
Base
1
func (prp *PatternResourcePersister) GetPatternResources(search, order, name, namespace, typ, oamType string, page, pageSize uint64) (*PatternResourcePage, error) { order = sanitizeOrderInput(order, []string{"created_at", "updated_at", "name"}) if order == "" { order = "updated_at desc" } count := int64(0) resources := []*PatternResource{} query := prp.DB.Order(order).Where("deleted = false") if search != "" { like := "%" + strings.ToLower(search) + "%" query = query.Where("(lower(pattern_resources.name) like ?)", like) } if name != "" { query = query.Where("name = ?", name) } if namespace != "" { query = query.Where("namespace = ?", namespace) } if typ != "" { query = query.Where("type = ?", typ) } if oamType != "" { query = query.Where("oam_type = ?", oamType) } query.Model(&PatternResource{}).Count(&count) Paginate(uint(page), uint(pageSize))(query).Find(&resources) patternResourcePage := &PatternResourcePage{ Page: page, PageSize: pageSize, TotalCount: int(count), Resources: resources, } return patternResourcePage, nil }
Base
1
func (prp *PatternResourcePersister) Exists(name, namespace, typ, oamType string) bool { var result struct { Found bool } prp.DB. Raw(` SELECT EXISTS(SELECT 1 FROM pattern_resources WHERE name = ? AND namespace = ? AND type = ? AND oam_type = ? AND deleted = false) AS "found"`, name, namespace, typ, oamType, ). Scan(&result) return result.Found }
Base
1
func (ppp *PerformanceProfilePersister) GetPerformanceProfiles(_, search, order string, page, pageSize uint64) ([]byte, error) { order = sanitizeOrderInput(order, []string{"updated_at", "created_at", "name", "last_run"}) if order == "" { order = "updated_at desc" } count := int64(0) profiles := []*PerformanceProfile{} query := ppp.DB. Select(` id, name, load_generators, endpoints, qps, service_mesh, duration, request_headers, request_cookies, request_body, content_type, created_at, updated_at, (?) as last_run, (?) as total_results`, ppp.DB.Table("meshery_results").Select("DATETIME(MAX(meshery_results.test_start_time))").Where("performance_profile = performance_profiles.id"), ppp.DB.Table("meshery_results").Select("COUNT(meshery_results.name)").Where("performance_profile = performance_profiles.id"), ). Order(order) if search != "" { like := "%" + strings.ToLower(search) + "%" query = query.Where("(lower(performance_profiles.name) like ?)", like) } query.Table("performance_profiles").Count(&count) Paginate(uint(page), uint(pageSize))(query).Find(&profiles) performanceProfilePage := &PerformanceProfilePage{ Page: page, PageSize: pageSize, TotalCount: int(count), Profiles: profiles, } return marshalPerformanceProfilePage(performanceProfilePage), nil }
Base
1
func sanitizeOrderInput(order string, validColumns []string) string { parsedOrderStr := strings.Split(order, " ") if len(parsedOrderStr) != 2 { return "" } inputCol := parsedOrderStr[0] typ := strings.ToLower(parsedOrderStr[1]) for _, col := range validColumns { if col == inputCol { if typ == "desc" { return fmt.Sprintf("%s desc", col) } return fmt.Sprintf("%s asc", col) } } return "" }
Base
1
func RandomString(length int, seed RandomSeed) string { runs := seed.Runes() result := "" for i := 0; i < length; i++ { rand.Seed(time.Now().UnixNano()) randNumber := rand.Intn(len(runs)) result += string(runs[randNumber]) } return result }
Class
2
return func( ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, callOpts ...grpc.CallOption, ) error { i := &InterceptorInfo{ Method: method, Type: UnaryClient, } if cfg.Filter != nil && !cfg.Filter(i) { return invoker(ctx, method, req, reply, cc, callOpts...) } name, attr := spanInfo(method, cc.Target()) startOpts := append([]trace.SpanStartOption{ trace.WithSpanKind(trace.SpanKindClient), trace.WithAttributes(attr...), }, cfg.SpanStartOptions..., ) ctx, span := tracer.Start( ctx, name, startOpts..., ) defer span.End() ctx = inject(ctx, cfg.Propagators) if cfg.SentEvent { messageSent.Event(ctx, 1, req) } err := invoker(ctx, method, req, reply, cc, callOpts...) if cfg.ReceivedEvent { messageReceived.Event(ctx, 1, reply) } if err != nil { s, _ := status.FromError(err) span.SetStatus(codes.Error, s.Message()) span.SetAttributes(statusCodeAttr(s.Code())) } else { span.SetAttributes(statusCodeAttr(grpc_codes.OK)) } return err }
Base
1
func checkUnaryServerRecords(t *testing.T, reader metric.Reader) { rm := metricdata.ResourceMetrics{} err := reader.Collect(context.Background(), &rm) assert.NoError(t, err) require.Len(t, rm.ScopeMetrics, 1) // TODO: Remove these #4322 address, ok := findScopeMetricAttribute(rm.ScopeMetrics[0], semconv.NetSockPeerAddrKey) assert.True(t, ok) port, ok := findScopeMetricAttribute(rm.ScopeMetrics[0], semconv.NetSockPeerPortKey) assert.True(t, ok) want := metricdata.ScopeMetrics{ Scope: wantInstrumentationScope, Metrics: []metricdata.Metrics{ { Name: "rpc.server.duration", Description: "Measures the duration of inbound RPC.", Unit: "ms", Data: metricdata.Histogram[float64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[float64]{ { Attributes: attribute.NewSet( semconv.RPCMethod("EmptyCall"), semconv.RPCService("grpc.testing.TestService"), otelgrpc.RPCSystemGRPC, otelgrpc.GRPCStatusCodeKey.Int64(int64(codes.OK)), address, port, ), }, { Attributes: attribute.NewSet( semconv.RPCMethod("UnaryCall"), semconv.RPCService("grpc.testing.TestService"), otelgrpc.RPCSystemGRPC, otelgrpc.GRPCStatusCodeKey.Int64(int64(codes.OK)), address, port, ), }, }, }, }, }, } metricdatatest.AssertEqual(t, want, rm.ScopeMetrics[0], metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreValue()) }
Base
1
func findScopeMetricAttribute(sm metricdata.ScopeMetrics, key attribute.Key) (attribute.KeyValue, bool) { for _, m := range sm.Metrics { // This only needs to cover data types used by the instrumentation. switch d := m.Data.(type) { case metricdata.Histogram[int64]: for _, dp := range d.DataPoints { if kv, ok := findAttribute(dp.Attributes.ToSlice(), key); ok { return kv, true } } case metricdata.Histogram[float64]: for _, dp := range d.DataPoints { if kv, ok := findAttribute(dp.Attributes.ToSlice(), key); ok { return kv, true } } default: panic(fmt.Sprintf("unexpected data type %T - name %s", d, m.Name)) } } return attribute.KeyValue{}, false }
Base
1
func NewClient(url string) (*gitrekor.Client, error) { return gitrekor.New(url, rekor.WithUserAgent("gitsign")) }
Base
1
func rekorPubsFromClient(rekorClient *client.Rekor) (*cosign.TrustedTransparencyLogPubKeys, error) { publicKeys := cosign.NewTrustedTransparencyLogPubKeys() pubOK, err := rekorClient.Pubkey.GetPublicKey(nil) if err != nil { return nil, fmt.Errorf("unable to fetch rekor public key from rekor: %w", err) } if err := publicKeys.AddTransparencyLogPubKey([]byte(pubOK.Payload), tuf.Active); err != nil { return nil, fmt.Errorf("constructRekorPubKey: %w", err) } return &publicKeys, nil }
Base
1
func New(url string, opts ...rekor.Option) (*Client, error) { c, err := rekor.GetRekorClient(url, opts...) if err != nil { return nil, err } pubs, err := rekorPubsFromClient(c) if err != nil { return nil, err } return &Client{ Rekor: c, publicKeys: pubs, }, nil }
Base
1
func (t *handshakeTransport) readLoop() { first := true for { p, err := t.readOnePacket(first) first = false if err != nil { t.readError = err close(t.incoming) break } if p[0] == msgIgnore || p[0] == msgDebug { continue } t.incoming <- p } // Stop writers too. t.recordWriteError(t.readError) // Unblock the writer should it wait for this. close(t.startKex) // Don't close t.requestKex; it's also written to from writePacket. }
Base
1
func (t *transport) readPacket() (p []byte, err error) { for { p, err = t.reader.readPacket(t.bufReader) if err != nil { break } if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { break } } if debugTransport { t.printPacket(p, false) } return p, err }
Base
1
func (t *transport) writePacket(packet []byte) error { if debugTransport { t.printPacket(packet, true) } return t.writer.writePacket(t.bufWriter, t.rand, packet) }
Base
1
func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { changeKeys := len(packet) > 0 && packet[0] == msgNewKeys err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) if err != nil { return err } if err = w.Flush(); err != nil { return err } s.seqNum++ if changeKeys { select { case cipher := <-s.pendingKeyChange: s.packetCipher = cipher default: panic("ssh: no key material for msgNewKeys") } } return err }
Base
1
func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) s.seqNum++ if err == nil && len(packet) == 0 { err = errors.New("ssh: zero length packet") } if len(packet) > 0 { switch packet[0] { case msgNewKeys: select { case cipher := <-s.pendingKeyChange: s.packetCipher = cipher default: return nil, errors.New("ssh: got bogus newkeys message") } case msgDisconnect: // Transform a disconnect message into an // error. Since this is lowest level at which // we interpret message types, doing it here // ensures that we don't have to handle it // elsewhere. var msg disconnectMsg if err := Unmarshal(packet, &msg); err != nil { return nil, err } return nil, &msg } } // The packet may point to an internal buffer, so copy the // packet out here. fresh := make([]byte, len(packet)) copy(fresh, packet) return fresh, err }
Base
1
func CertHandler(c *gin.Context) { if strings.Contains(c.Request.UserAgent(), "Firefox") { c.Header("content-type", "application/x-x509-ca-cert") c.File("ca.cert.cer") return } noFirefoxTemplate.Execute(c.Writer, gin.H{ "url": "http://" + c.Request.Host + c.Request.URL.String(), }) }
Base
1
func DeleteCertHandler(c *gin.Context) { DeleteCertificates(config.GetCertificatesDir()) }
Base
1
func Init(ds model.DataStore) { once.Do(func() { log.Info("Setting Session Timeout", "value", conf.Server.SessionTimeout) secret, err := ds.Property(context.TODO()).DefaultGet(consts.JWTSecretKey, "not so secret") if err != nil { log.Error("No JWT secret found in DB. Setting a temp one, but please report this error", err) } Secret = []byte(secret) TokenAuth = jwtauth.New("HS256", Secret, nil) }) }
Class
2
func New(ds model.DataStore, broker events.Broker) *Server { s := &Server{ds: ds, broker: broker} auth.Init(s.ds) initialSetup(ds) s.initRoutes() s.mountAuthenticationRoutes() s.mountRootRedirector() checkFfmpegInstallation() checkExternalCredentials() return s }
Class
2
const recordDefinition = (id, highByte) => { let structure = read().map(property => property.toString()) // ensure that all keys are strings and that the array is mutable let firstByte = id if (highByte !== undefined) { id = id < 32 ? -((highByte << 5) + id) : ((highByte << 5) + id) structure.highByte = highByte } let existingStructure = currentStructures[id] // If it is a shared structure, we need to restore any changes after reading. // Also in sequential mode, we may get incomplete reads and thus errors, and we need to restore // to the state prior to an incomplete read in order to properly resume. if (existingStructure && (existingStructure.isShared || sequentialMode)) { (currentStructures.restoreStructures || (currentStructures.restoreStructures = []))[id] = existingStructure } currentStructures[id] = structure structure.read = createStructureReader(structure, firstByte) return structure.read() }
Class
2
function readKey() { let length = src[position++] if (length >= 0xa0 && length < 0xc0) { // fixstr, potentially use key cache length = length - 0xa0 if (srcStringEnd >= position) // if it has been extracted, must use it (and faster anyway) return srcString.slice(position - srcStringStart, (position += length) - srcStringStart) else if (!(srcStringEnd == 0 && srcEnd < 180)) return readFixedString(length) } else { // not cacheable, go back and do a standard read position-- return read().toString() } let key = ((length << 5) ^ (length > 1 ? dataView.getUint16(position) : length > 0 ? src[position] : 0)) & 0xfff let entry = keyCache[key] let checkPosition = position let end = position + length - 3 let chunk let i = 0 if (entry && entry.bytes == length) { while (checkPosition < end) { chunk = dataView.getUint32(checkPosition) if (chunk != entry[i++]) { checkPosition = 0x70000000 break } checkPosition += 4 } end += 3 while (checkPosition < end) { chunk = src[checkPosition++] if (chunk != entry[i++]) { checkPosition = 0x70000000 break } } if (checkPosition === end) { position = checkPosition return entry.string } end -= 3 checkPosition = position } entry = [] keyCache[key] = entry entry.bytes = length while (checkPosition < end) { chunk = dataView.getUint32(checkPosition) entry.push(chunk) checkPosition += 4 } end += 3 while (checkPosition < end) { chunk = src[checkPosition++] entry.push(chunk) } // for small blocks, avoiding the overhead of the extract call is helpful let string = length < 16 ? shortStringInJS(length) : longStringInJS(length) if (string != null) return entry.string = string return entry.string = readFixedString(length) }
Class
2
func SaveSettings(c *gin.Context) { var json struct { Server settings.Server `json:"server"` Nginx settings.Nginx `json:"nginx"` Openai settings.OpenAI `json:"openai"` } if !api.BindAndValid(c, &json) { return } settings.ServerSettings = json.Server settings.NginxSettings = json.Nginx settings.OpenAISettings = json.Openai settings.ReflectFrom() err := settings.Save() if err != nil { api.ErrHandler(c, err) return } GetSettings(c) }
Class
2
func SaveSettings(c *gin.Context) { var json struct { Server settings.Server `json:"server"` Nginx settings.Nginx `json:"nginx"` Openai settings.OpenAI `json:"openai"` } if !api.BindAndValid(c, &json) { return } settings.ServerSettings = json.Server settings.NginxSettings = json.Nginx settings.OpenAISettings = json.Openai settings.ReflectFrom() err := settings.Save() if err != nil { api.ErrHandler(c, err) return } GetSettings(c) }
Class
2
func GetSettings(c *gin.Context) { c.JSON(http.StatusOK, gin.H{ "server": settings.ServerSettings, "nginx": settings.NginxSettings, "openai": settings.OpenAISettings, }) }
Class
2
func GetSettings(c *gin.Context) { c.JSON(http.StatusOK, gin.H{ "server": settings.ServerSettings, "nginx": settings.NginxSettings, "openai": settings.OpenAISettings, }) }
Class
2
return func(db *gorm.DB) *gorm.DB { sort := c.ctx.DefaultQuery("order", "desc") order := fmt.Sprintf("%s %s", DefaultQuery(c.ctx, "sort_by", c.itemKey), sort) return db.Order(order) } }
Base
1
return func(db *gorm.DB) *gorm.DB { sort := c.DefaultQuery("order", "desc") order := fmt.Sprintf("`%s` %s", DefaultQuery(c, "sort_by", "id"), sort) db = db.Order(order) page := cast.ToInt(c.Query("page")) if page == 0 { page = 1 } pageSize := settings.ServerSettings.PageSize reqPageSize := c.Query("page_size") if reqPageSize != "" { pageSize = cast.ToInt(reqPageSize) } offset := (page - 1) * pageSize return db.Offset(offset).Limit(pageSize) } }
Base
1
RENDERER_INLINE.paragraph = (text) => { return text; };
Base
1
public ngOnChanges() { let html = ''; const markdown = this.markdown; if (!markdown) { html = markdown; } else if (this.optional && markdown.indexOf('!') !== 0) { html = markdown; } else if (this.markdown) { const renderer = this.inline ? RENDERER_INLINE : RENDERER_DEFAULT; html = marked(this.markdown, { renderer }); } if (!this.html && (!html || html === this.markdown || html.indexOf('<') < 0)) { this.renderer.setProperty(this.element.nativeElement, 'textContent', html); } else { this.renderer.setProperty(this.element.nativeElement, 'innerHTML', html); } }
Base
1
RENDERER_DEFAULT.link = (href, _, text) => { if (href && href.startsWith('mailto')) { return text; } else { return `<a href="${href}" target="_blank", rel="noopener">${text} <i class="icon-external-link"></i></a>`; } };
Base
1
inlinerRenderer.paragraph = (text) => { return text; };
Base
1
renderer.link = (href, _, text) => { if (href && href.startsWith('mailto')) { return text; } else { return `<a href="${href}" target="_blank", rel="noopener">${text} <i class="icon-external-link"></i></a>`; } };
Base
1
const userName = (userId: string) => { const parts = userId.split(':'); if (parts.length === 1) { return users.getUser(parts[0], null).pipe(map(u => u.displayName)); } else if (parts[0] === 'subject') { return users.getUser(parts[1], null).pipe(map(u => u.displayName)); } else if (parts[1].endsWith('client')) { return of(parts[1]); } else { return of(`${parts[1]}-client`); } };
Base
1
export function formatHistoryMessage(message: string, users: UsersProviderService): Observable<string> { const userName = (userId: string) => { const parts = userId.split(':'); if (parts.length === 1) { return users.getUser(parts[0], null).pipe(map(u => u.displayName)); } else if (parts[0] === 'subject') { return users.getUser(parts[1], null).pipe(map(u => u.displayName)); } else if (parts[1].endsWith('client')) { return of(parts[1]); } else { return of(`${parts[1]}-client`); } }; let foundUserId: string | null = null; message = message.replace(/{([^\s:]*):([^}]*)}/, (match: string, type: string, id: string) => { if (type === 'user') { foundUserId = id; return REPLACEMENT_TEMP; } else { return id; } }); message = message.replace(/{([^}]*)}/g, (match: string, marker: string) => { return `<span class="marker-ref">${marker}</span>`; }); if (foundUserId) { return userName(foundUserId).pipe(map(t => message.replace(REPLACEMENT_TEMP, `<span class="user-ref">${t}</span>`))); } return of(message); }
Base
1
export function getCellWidth(field: TableField, sizes: FieldSizes | undefined | null) { const size = sizes?.[field.name] || 0; if (size > 0) { return size; } switch (field) { case META_FIELDS.id: return 280; case META_FIELDS.created: return 150; case META_FIELDS.createdByAvatar: return 55; case META_FIELDS.createdByName: return 150; case META_FIELDS.lastModified: return 150; case META_FIELDS.lastModifiedByAvatar: return 55; case META_FIELDS.lastModifiedByName: return 150; case META_FIELDS.status: return 200; case META_FIELDS.statusNext: return 240; case META_FIELDS.statusColor: return 50; case META_FIELDS.version: return 80; default: return 200; } }
Compound
4
function _getPayloadURL (url: string, opts: LoadPayloadOptions = {}) { const u = new URL(url, 'http://localhost') if (u.search) { throw new Error('Payload URL cannot contain search params: ' + url) } if (u.host !== 'localhost') { throw new Error('Payload URL cannot contain host: ' + url) } const hash = opts.hash || (opts.fresh ? Date.now() : '') return joinURL(useRuntimeConfig().app.baseURL, u.pathname, hash ? `_payload.${hash}.js` : '_payload.js') }
Base
1
export async function sendScanResults( payloads: ScanResultsPayload[], ): Promise<boolean> { for (const payload of payloads) { // Intentionally removing scan results as they would be too big to log const payloadWithoutScanResults = { ...payload, scanResults: undefined }; try { const request: KubernetesUpstreamRequest = { method: 'post', url: `${upstreamUrl}/api/v1/scan-results`, payload, }; const { response, attempt } = await reqQueue.push(request); if (!isSuccessStatusCode(response.statusCode)) { throw new Error(`${response.statusCode} ${response.statusMessage}`); } else { logger.info( { payload: payloadWithoutScanResults, attempt }, 'scan results sent upstream successfully', ); } } catch (error) { logger.error( { error, payload: payloadWithoutScanResults }, 'could not send the scan results upstream', ); return false; } } return true; }
Class
2
export async function sendDepGraph( ...payloads: IDependencyGraphPayload[] ): Promise<void> { for (const payload of payloads) { // Intentionally removing dependencyGraph as it would be too big to log // eslint-disable-next-line @typescript-eslint/no-unused-vars const { dependencyGraph, ...payloadWithoutDepGraph } = payload; try { const request: KubernetesUpstreamRequest = { method: 'post', url: `${upstreamUrl}/api/v1/dependency-graph`, payload, }; const { response, attempt } = await reqQueue.push(request); if (!isSuccessStatusCode(response.statusCode)) { throw new Error(`${response.statusCode} ${response.statusMessage}`); } else { logger.info( { payload: payloadWithoutDepGraph, attempt }, 'dependency graph sent upstream successfully', ); } } catch (error) { logger.error( { error, payload: payloadWithoutDepGraph }, 'could not send the dependency scan result upstream', ); } } }
Class
2
export async function sendWorkloadMetadata( payload: IWorkloadMetadataPayload, ): Promise<void> { try { logger.info( { workloadLocator: payload.workloadLocator }, 'attempting to send workload metadata upstream', ); const request: KubernetesUpstreamRequest = { method: 'post', url: `${upstreamUrl}/api/v1/workload`, payload, }; const { response, attempt } = await reqQueue.push(request); if (!isSuccessStatusCode(response.statusCode)) { throw new Error(`${response.statusCode} ${response.statusMessage}`); } else { logger.info( { workloadLocator: payload.workloadLocator, attempt }, 'workload metadata sent upstream successfully', ); } } catch (error) { logger.error( { error, workloadLocator: payload.workloadLocator }, 'could not send workload metadata upstream', ); } }
Class
2
export async function deleteWorkload( payload: IDeleteWorkloadPayload, ): Promise<void> { try { const { workloadLocator, agentId } = payload; const { userLocator, cluster, namespace, type, name } = workloadLocator; const query = `userLocator=${userLocator}&cluster=${cluster}&namespace=${namespace}&type=${type}&name=${name}&agentId=${agentId}`; const request: KubernetesUpstreamRequest = { method: 'delete', url: `${upstreamUrl}/api/v1/workload?${query}`, payload, }; const { response, attempt } = await reqQueue.push(request); // TODO: Remove this check, the upstream no longer returns 404 in such cases if (response.statusCode === 404) { logger.info( { payload }, 'attempted to delete a workload the Upstream service could not find', ); return; } if (!isSuccessStatusCode(response.statusCode)) { throw new Error(`${response.statusCode} ${response.statusMessage}`); } else { logger.info( { workloadLocator: payload.workloadLocator, attempt }, 'workload deleted successfully', ); } } catch (error) { logger.error( { error, payload }, 'could not send delete a workload from the upstream', ); } }
Class
2
export async function sendRuntimeData( payload: IRuntimeDataPayload, ): Promise<void> { const logContext = { userLocator: payload.target.userLocator, cluster: payload.target.cluster, agentId: payload.target.agentId, identity: payload.identity, }; try { logger.info(logContext, 'attempting to send runtime data'); const request: KubernetesUpstreamRequest = { method: 'post', url: `${upstreamUrl}/api/v1/runtime-results`, payload, }; const { response, attempt } = await reqQueue.push(request); if (!isSuccessStatusCode(response.statusCode)) { throw new Error(`${response.statusCode} ${response.statusMessage}`); } logger.info( { attempt, ...logContext, }, 'runtime data sent upstream successfully', ); } catch (error) { logger.error( { error, ...logContext, }, 'could not send runtime data', ); } }
Class
2
export async function sendClusterMetadata(): Promise<void> { const payload: IClusterMetadataPayload = { userLocator: config.INTEGRATION_ID, cluster: config.CLUSTER_NAME, agentId: config.AGENT_ID, version: config.MONITOR_VERSION, namespace: config.NAMESPACE, }; try { logger.info( { userLocator: payload.userLocator, cluster: payload.cluster, agentId: payload.agentId, }, 'attempting to send cluster metadata', ); const request: KubernetesUpstreamRequest = { method: 'post', url: `${upstreamUrl}/api/v1/cluster`, payload, }; const { response, attempt } = await reqQueue.push(request); if (!isSuccessStatusCode(response.statusCode)) { throw new Error(`${response.statusCode} ${response.statusMessage}`); } logger.info( { userLocator: payload.userLocator, cluster: payload.cluster, agentId: payload.agentId, attempt, }, 'cluster metadata sent upstream successfully', ); } catch (error) { logger.error( { error, userLocator: payload.userLocator, cluster: payload.cluster, agentId: payload.agentId, }, 'could not send cluster metadata', ); } }
Class
2
export async function sendWorkloadEventsPolicy( payload: IWorkloadEventsPolicyPayload, ): Promise<void> { try { logger.info( { userLocator: payload.userLocator, cluster: payload.cluster, agentId: payload.agentId, }, 'attempting to send workload auto-import policy', ); const { response, attempt } = await retryRequest( 'post', `${upstreamUrl}/api/v1/policy`, payload, ); if (!isSuccessStatusCode(response.statusCode)) { throw new Error(`${response.statusCode} ${response.statusMessage}`); } logger.info( { userLocator: payload.userLocator, cluster: payload.cluster, agentId: payload.agentId, attempt, }, 'workload auto-import policy sent upstream successfully', ); } catch (error) { logger.error( { error, userLocator: payload.userLocator, cluster: payload.cluster, agentId: payload.agentId, }, 'could not send workload auto-import policy', ); } }
Class
2
async function deployKubernetesMonitor( imageOptions: IImageOptions, deployOptions: IDeployOptions, ): Promise<void> { if (!existsSync(helmPath)) { await downloadHelm(); } await kubectl.applyK8sYaml('test/fixtures/proxying/tinyproxy-service.yaml'); await kubectl.applyK8sYaml( 'test/fixtures/proxying/tinyproxy-deployment.yaml', ); await kubectl.waitForDeployment('forwarding-proxy', 'snyk-monitor'); const imageNameAndTag = imageOptions.nameAndTag.split(':'); const imageName = imageNameAndTag[0]; const imageTag = imageNameAndTag[1]; const imagePullPolicy = imageOptions.pullPolicy; await exec( `${helmPath} upgrade --install snyk-monitor ${helmChartPath} --namespace snyk-monitor ` + `--set image.repository=${imageName} ` + `--set image.tag=${imageTag} ` + `--set image.pullPolicy=${imagePullPolicy} ` + '--set integrationApi=https://kubernetes-upstream.dev.snyk.io ' + `--set clusterName=${deployOptions.clusterName} ` + '--set https_proxy=http://forwarding-proxy:8080', ); console.log( `Deployed ${imageOptions.nameAndTag} with pull policy ${imageOptions.pullPolicy}`, ); }
Class
2
async function deployKubernetesMonitor( imageOptions: IImageOptions, deployOptions: IDeployOptions, ): Promise<void> { if (!existsSync(helmPath)) { await downloadHelm(); } const imageNameAndTag = imageOptions.nameAndTag.split(':'); const imageName = imageNameAndTag[0]; const imageTag = imageNameAndTag[1]; const imagePullPolicy = imageOptions.pullPolicy; await exec( `${helmPath} upgrade --install snyk-monitor ${helmChartPath} --namespace snyk-monitor ` + `--set image.repository=${imageName} ` + `--set image.tag=${imageTag} ` + `--set image.pullPolicy=${imagePullPolicy} ` + '--set integrationApi=https://kubernetes-upstream.dev.snyk.io ' + `--set clusterName=${deployOptions.clusterName} ` + '--set nodeSelector."kubernetes\\.io/os"=linux ' + '--set pvc.enabled=true ' + '--set pvc.create=true ' + '--set log_level="INFO" ' + '--set rbac.serviceAccount.annotations."foo"="bar" ' + '--set volumes.projected.serviceAccountToken=true ' + '--set securityContext.fsGroup=65534 ' + '--set skopeo.compression.level=1 ' + '--set workers.count=5 ' + '--set sysdig.enabled=true ', ); console.log( `Deployed ${imageOptions.nameAndTag} with pull policy ${imageOptions.pullPolicy}`, ); }
Class
2
function createTestYamlDeployment( newYamlPath: string, imageNameAndTag: string, imagePullPolicy: string, clusterName: string, ): void { console.log('Creating YAML snyk-monitor deployment...'); const originalDeploymentYaml = readFileSync( './snyk-monitor-deployment.yaml', 'utf8', ); const deployment = parse(originalDeploymentYaml); const container = deployment.spec.template.spec.containers.find( (container) => container.name === 'snyk-monitor', ); container.image = imageNameAndTag; container.imagePullPolicy = imagePullPolicy; // Inject the baseUrl of kubernetes-upstream that snyk-monitor container use to send metadata const envVar = container.env.find( (env) => env.name === 'SNYK_INTEGRATION_API', ); envVar.value = 'https://kubernetes-upstream.dev.snyk.io'; delete envVar.valueFrom; if (clusterName) { const clusterNameEnvVar = container.env.find( (env) => env.name === 'SNYK_CLUSTER_NAME', ); clusterNameEnvVar.value = clusterName; delete clusterNameEnvVar.valueFrom; } writeFileSync(newYamlPath, stringify(deployment)); console.log('Created YAML snyk-monitor deployment'); }
Class
2