repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/importer/importer.go | app/services/importer/importer.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"context"
"fmt"
"net/url"
"path"
"strings"
"time"
"github.com/harness/gitness/app/bootstrap"
repoevents "github.com/harness/gitness/app/events/repo"
"github.com/harness/gitness/app/githook"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/app/services/keywordsearch"
"github.com/harness/gitness/app/services/publicaccess"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/services/settings"
"github.com/harness/gitness/app/sse"
"github.com/harness/gitness/app/store"
gitnessurl "github.com/harness/gitness/app/url"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/git"
"github.com/harness/gitness/git/sha"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/drone/go-convert/convert/bitbucket"
"github.com/drone/go-convert/convert/circle"
"github.com/drone/go-convert/convert/drone"
"github.com/drone/go-convert/convert/github"
"github.com/drone/go-convert/convert/gitlab"
"github.com/rs/zerolog/log"
)
type Importer struct {
defaultBranch string
urlProvider gitnessurl.Provider
git git.Interface
tx dbtx.Transactor
repoStore store.RepoStore
pipelineStore store.PipelineStore
triggerStore store.TriggerStore
repoFinder refcache.RepoFinder
sseStreamer sse.Streamer
indexer keywordsearch.Indexer
publicAccess publicaccess.Service
eventReporter *repoevents.Reporter
auditService audit.Service
settings *settings.Service
}
func NewImporter(
defaultBranch string,
urlProvider gitnessurl.Provider,
git git.Interface,
tx dbtx.Transactor,
repoStore store.RepoStore,
pipelineStore store.PipelineStore,
triggerStore store.TriggerStore,
repoFinder refcache.RepoFinder,
sseStreamer sse.Streamer,
indexer keywordsearch.Indexer,
publicAccess publicaccess.Service,
eventReporter *repoevents.Reporter,
auditService audit.Service,
settings *settings.Service,
) *Importer {
return &Importer{
defaultBranch: defaultBranch,
urlProvider: urlProvider,
git: git,
tx: tx,
repoStore: repoStore,
pipelineStore: pipelineStore,
triggerStore: triggerStore,
repoFinder: repoFinder,
sseStreamer: sseStreamer,
indexer: indexer,
publicAccess: publicAccess,
eventReporter: eventReporter,
auditService: auditService,
settings: settings,
}
}
// PipelineOption defines the supported pipeline import options for repository import.
type PipelineOption string
func (PipelineOption) Enum() []any {
return []any{PipelineOptionConvert, PipelineOptionIgnore}
}
const (
PipelineOptionConvert PipelineOption = "convert"
PipelineOptionIgnore PipelineOption = "ignore"
)
type Input struct {
RepoID int64 `json:"repo_id"`
Public bool `json:"public"`
GitUser string `json:"git_user"`
GitPass string `json:"git_pass"`
CloneURL string `json:"clone_url"`
Pipelines PipelineOption `json:"pipelines"`
}
func (r *Importer) Import(ctx context.Context, input Input) error {
systemPrincipal := bootstrap.NewSystemServiceSession().Principal
if input.CloneURL == "" {
return errors.InvalidArgument("missing git repository clone URL")
}
repoURL, err := url.Parse(input.CloneURL)
if err != nil {
return fmt.Errorf("failed to parse git clone URL: %w", err)
}
repoURL.User = url.UserPassword(input.GitUser, input.GitPass)
cloneURLWithAuth := repoURL.String()
repo, err := r.repoStore.Find(ctx, input.RepoID)
if err != nil {
return fmt.Errorf("failed to find repo by id: %w", err)
}
if repo.State != enum.RepoStateGitImport {
return errors.InvalidArgumentf("repository %s is not being imported", repo.Identifier)
}
log := log.Ctx(ctx).With().
Int64("repo.id", repo.ID).
Str("repo.path", repo.Path).
Logger()
log.Info().Msg("configure access mode")
parentPath, _, err := paths.DisectLeaf(repo.Path)
if err != nil {
return fmt.Errorf("failed to disect path %q: %w", repo.Path, err)
}
isPublicAccessSupported, err := r.publicAccess.IsPublicAccessSupported(ctx, enum.PublicResourceTypeRepo, parentPath)
if err != nil {
return fmt.Errorf(
"failed to check if public access is supported for parent space %q: %w",
parentPath,
err,
)
}
isRepoPublic := input.Public
if !isPublicAccessSupported {
log.Debug().Msg("public access is not supported, import public repo as private instead")
isRepoPublic = false
}
err = r.publicAccess.Set(ctx, enum.PublicResourceTypeRepo, repo.Path, isRepoPublic)
if err != nil {
return fmt.Errorf("failed to set repo access mode: %w", err)
}
if isRepoPublic {
err = r.auditService.Log(ctx,
bootstrap.NewSystemServiceSession().Principal,
audit.NewResource(audit.ResourceTypeRepository, repo.Identifier),
audit.ActionUpdated,
paths.Parent(repo.Path),
audit.WithOldObject(audit.RepositoryObject{
Repository: *repo,
IsPublic: false,
}),
audit.WithNewObject(audit.RepositoryObject{
Repository: *repo,
IsPublic: true,
}),
)
if err != nil {
log.Warn().Msgf("failed to insert audit log for updating repo to public: %s", err)
}
}
// revert this when import fetches LFS objects
if err := r.settings.RepoSet(ctx, repo.ID, settings.KeyGitLFSEnabled, false); err != nil {
log.Warn().Err(err).Msg("failed to disable Git LFS in repository settings")
}
log.Info().Msg("create git repository")
gitUID, err := r.createGitRepository(ctx, &systemPrincipal, repo.ID)
if err != nil {
return fmt.Errorf("failed to create empty git repository: %w", err)
}
log.Info().Msgf("successfully created git repository with git_uid '%s'", gitUID)
err = func() error {
repo, err = r.repoStore.UpdateOptLock(ctx, repo, func(repo *types.Repository) error {
if repo.State != enum.RepoStateGitImport {
return errors.New("repository has already finished importing")
}
repo.GitUID = gitUID
return nil
})
if err != nil {
return fmt.Errorf("failed to update repository prior to the import: %w", err)
}
r.repoFinder.MarkChanged(ctx, repo.Core())
log.Info().Msg("sync repository")
err = r.syncGitRepository(ctx, &systemPrincipal, repo, cloneURLWithAuth)
if err != nil {
return fmt.Errorf("failed to sync git repository from '%s': %w", input.CloneURL, err)
}
log.Info().Msgf("successfully synced repository (with default branch: %q)", repo.DefaultBranch)
log.Info().Msg("update repo in DB")
repo, err = r.repoStore.UpdateOptLock(ctx, repo, func(repo *types.Repository) error {
if repo.State != enum.RepoStateGitImport {
return errors.New("repository has already finished importing")
}
repo.GitUID = gitUID
repo.State = enum.RepoStateActive
return nil
})
if err != nil {
return fmt.Errorf("failed to update repository after import: %w", err)
}
r.repoFinder.MarkChanged(ctx, repo.Core())
if input.Pipelines != PipelineOptionConvert {
return nil // assumes the value is enum.PipelineOptionIgnore
}
const convertPipelinesCommitMessage = "autoconvert pipeline"
err = r.processPipelines(ctx, &systemPrincipal, repo, convertPipelinesCommitMessage)
if err != nil {
log.Warn().Err(err).Msg("failed to convert pipelines")
}
return nil
}()
if err != nil {
log.Error().Err(err).Msg("failed repository import - cleanup git repository")
repo.GitUID = gitUID // make sure to delete the correct directory
if errDel := r.deleteGitRepository(context.WithoutCancel(ctx), &systemPrincipal, repo); errDel != nil {
log.Warn().Err(errDel).
Msg("failed to delete git repository after failed import")
}
return fmt.Errorf("failed to import repository: %w", err)
}
r.sseStreamer.Publish(ctx, repo.ParentID, enum.SSETypeRepositoryImportCompleted, repo)
r.eventReporter.Created(ctx, &repoevents.CreatedPayload{
Base: repoevents.Base{
RepoID: repo.ID,
PrincipalID: bootstrap.NewSystemServiceSession().Principal.ID,
},
IsPublic: input.Public,
ImportedFrom: input.CloneURL,
})
err = r.indexer.Index(ctx, repo)
if err != nil {
log.Warn().Err(err).Msg("failed to index repository")
}
log.Info().Msg("completed repository import")
return nil
}
func (r *Importer) createGitRepository(
ctx context.Context,
principal *types.Principal,
repoID int64,
) (string, error) {
now := time.Now()
envVars, err := r.createEnvVars(ctx, principal, repoID)
if err != nil {
return "", err
}
resp, err := r.git.CreateRepository(ctx, &git.CreateRepositoryParams{
Actor: git.Identity{
Name: principal.DisplayName,
Email: principal.Email,
},
EnvVars: envVars,
DefaultBranch: r.defaultBranch,
Files: nil,
Author: &git.Identity{
Name: principal.DisplayName,
Email: principal.Email,
},
AuthorDate: &now,
Committer: &git.Identity{
Name: principal.DisplayName,
Email: principal.Email,
},
CommitterDate: &now,
})
if err != nil {
return "", fmt.Errorf("failed to create empty git repository: %w", err)
}
return resp.UID, nil
}
func (r *Importer) syncGitRepository(
ctx context.Context,
principal *types.Principal,
repo *types.Repository,
sourceCloneURL string,
) error {
writeParams, err := r.createRPCWriteParams(ctx, principal, repo)
if err != nil {
return err
}
_, err = r.git.SyncRepository(ctx, &git.SyncRepositoryParams{
WriteParams: writeParams,
Source: sourceCloneURL,
CreateIfNotExists: false,
RefSpecs: []string{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"},
DefaultBranch: repo.DefaultBranch,
})
if err != nil {
return fmt.Errorf("failed to sync repository: %w", err)
}
return nil
}
func (r *Importer) deleteGitRepository(
ctx context.Context,
principal *types.Principal,
repo *types.Repository,
) error {
writeParams, err := r.createRPCWriteParams(ctx, principal, repo)
if err != nil {
return err
}
err = r.git.DeleteRepository(ctx, &git.DeleteRepositoryParams{
WriteParams: writeParams,
})
if err != nil && !errors.IsNotFound(err) {
return fmt.Errorf("failed to delete git repository: %w", err)
}
return nil
}
func (r *Importer) matchFiles(
ctx context.Context,
repo *types.Repository,
ref string,
dirPath string,
pattern string,
maxSize int,
) ([]pipelineFile, error) {
resp, err := r.git.MatchFiles(ctx, &git.MatchFilesParams{
ReadParams: git.ReadParams{RepoUID: repo.GitUID},
Ref: ref,
DirPath: dirPath,
Pattern: pattern,
MaxSize: maxSize,
})
if err != nil {
return nil, fmt.Errorf("failed to convert pipelines: %w", err)
}
pipelines := make([]pipelineFile, len(resp.Files))
for i, pipeline := range resp.Files {
pipelines[i] = pipelineFile{
Name: "",
OriginalPath: pipeline.Path,
ConvertedPath: "",
Content: pipeline.Content,
}
}
return pipelines, nil
}
func (r *Importer) createRPCWriteParams(
ctx context.Context,
principal *types.Principal,
repo *types.Repository,
) (git.WriteParams, error) {
envVars, err := r.createEnvVars(ctx, principal, repo.ID)
if err != nil {
return git.WriteParams{}, err
}
return git.WriteParams{
Actor: git.Identity{
Name: principal.DisplayName,
Email: principal.Email,
},
RepoUID: repo.GitUID,
EnvVars: envVars,
}, nil
}
func (r *Importer) createEnvVars(
ctx context.Context,
principal *types.Principal,
repoID int64,
) (map[string]string, error) {
envVars, err := githook.GenerateEnvironmentVariables(
ctx,
r.urlProvider.GetInternalAPIURL(ctx),
repoID,
principal.ID,
true,
true,
)
if err != nil {
return nil, fmt.Errorf("failed to generate git hook environment variables: %w", err)
}
return envVars, nil
}
type pipelineFile struct {
Name string
OriginalPath string
ConvertedPath string
Content []byte
}
func (r *Importer) processPipelines(ctx context.Context,
principal *types.Principal,
repo *types.Repository,
commitMessage string,
) error {
writeParams, err := r.createRPCWriteParams(ctx, principal, repo)
if err != nil {
return err
}
pipelineFiles := r.convertPipelines(ctx, repo)
if len(pipelineFiles) == 0 {
return nil
}
actions := make([]git.CommitFileAction, len(pipelineFiles))
for i, file := range pipelineFiles {
actions[i] = git.CommitFileAction{
Action: git.CreateAction,
Path: file.ConvertedPath,
Payload: file.Content,
SHA: sha.None,
}
}
now := time.Now()
identity := &git.Identity{
Name: principal.DisplayName,
Email: principal.Email,
}
_, err = r.git.CommitFiles(ctx, &git.CommitFilesParams{
WriteParams: writeParams,
Message: commitMessage,
Branch: repo.DefaultBranch,
NewBranch: repo.DefaultBranch,
Actions: actions,
Committer: identity,
CommitterDate: &now,
Author: identity,
AuthorDate: &now,
})
if err != nil {
return fmt.Errorf("failed to commit converted pipeline files: %w", err)
}
nowMilli := now.UnixMilli()
err = r.tx.WithTx(ctx, func(ctx context.Context) error {
for _, p := range pipelineFiles {
pipeline := &types.Pipeline{
Description: "",
RepoID: repo.ID,
Identifier: p.Name,
CreatedBy: principal.ID,
Seq: 0,
DefaultBranch: repo.DefaultBranch,
ConfigPath: p.ConvertedPath,
Created: nowMilli,
Updated: nowMilli,
Version: 0,
}
err = r.pipelineStore.Create(ctx, pipeline)
if err != nil {
return fmt.Errorf("pipeline creation failed: %w", err)
}
// Try to create a default trigger on pipeline creation.
// Default trigger operations are set on pull request created, reopened or updated.
// We log an error on failure but don't fail the op.
trigger := &types.Trigger{
Description: "auto-created trigger on pipeline conversion",
Created: nowMilli,
Updated: nowMilli,
PipelineID: pipeline.ID,
RepoID: pipeline.RepoID,
CreatedBy: principal.ID,
Identifier: "default",
Actions: []enum.TriggerAction{enum.TriggerActionPullReqCreated,
enum.TriggerActionPullReqReopened, enum.TriggerActionPullReqBranchUpdated},
Disabled: false,
Version: 0,
}
err = r.triggerStore.Create(ctx, trigger)
if err != nil {
return fmt.Errorf("failed to create auto trigger on pipeline creation: %w", err)
}
}
return nil
}, dbtx.TxDefault)
if err != nil {
return fmt.Errorf("failed to insert pipelines and triggers: %w", err)
}
return nil
}
// convertPipelines converts pipelines found in the repository.
// Note: For GitHub actions, there can be multiple.
func (r *Importer) convertPipelines(ctx context.Context,
repo *types.Repository,
) []pipelineFile {
const maxSize = 65536
match := func(dirPath, regExpDef string) []pipelineFile {
files, err := r.matchFiles(ctx, repo, repo.DefaultBranch, dirPath, regExpDef, maxSize)
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msgf("failed to find pipeline file(s) '%s' in '%s'",
regExpDef, dirPath)
return nil
}
return files
}
if files := match("", ".drone.yml"); len(files) > 0 {
converted := convertPipelineFiles(ctx, files, func() pipelineConverter { return drone.New() })
if len(converted) > 0 {
return converted
}
}
if files := match("", "bitbucket-pipelines.yml"); len(files) > 0 {
converted := convertPipelineFiles(ctx, files, func() pipelineConverter { return bitbucket.New() })
if len(converted) > 0 {
return converted
}
}
if files := match("", ".gitlab-ci.yml"); len(files) > 0 {
converted := convertPipelineFiles(ctx, files, func() pipelineConverter { return gitlab.New() })
if len(converted) > 0 {
return converted
}
}
if files := match(".circleci", "config.yml"); len(files) > 0 {
converted := convertPipelineFiles(ctx, files, func() pipelineConverter { return circle.New() })
if len(converted) > 0 {
return converted
}
}
filesYML := match(".github/workflows", "*.yml")
filesYAML := match(".github/workflows", "*.yaml")
//nolint:gocritic // intended usage
files := append(filesYML, filesYAML...)
converted := convertPipelineFiles(ctx, files, func() pipelineConverter { return github.New() })
if len(converted) > 0 {
return converted
}
return nil
}
type pipelineConverter interface {
ConvertBytes([]byte) ([]byte, error)
}
func convertPipelineFiles(ctx context.Context,
files []pipelineFile,
gen func() pipelineConverter,
) []pipelineFile {
const (
harnessPipelineName = "pipeline"
harnessPipelineNameOnly = "default-" + harnessPipelineName
harnessPipelineDir = ".harness"
harnessPipelineFileOnly = harnessPipelineDir + "/pipeline.yaml"
)
result := make([]pipelineFile, 0, len(files))
for _, file := range files {
data, err := gen().ConvertBytes(file.Content)
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msgf("failed to convert pipeline file %s", file.OriginalPath)
continue
}
var pipelineName string
var pipelinePath string
if len(files) == 1 {
pipelineName = harnessPipelineNameOnly
pipelinePath = harnessPipelineFileOnly
} else {
base := path.Base(file.OriginalPath)
base = strings.TrimSuffix(base, path.Ext(base))
pipelineName = harnessPipelineName + "-" + base
pipelinePath = harnessPipelineDir + "/" + base + ".yaml"
}
result = append(result, pipelineFile{
Name: pipelineName,
OriginalPath: file.OriginalPath,
ConvertedPath: pipelinePath,
Content: data,
})
}
return result
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/importer/connector_service.go | app/services/importer/connector_service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"context"
"github.com/harness/gitness/errors"
)
type ConnectorService interface {
GetAccessInfo(ctx context.Context, c ConnectorDef) (AccessInfo, error)
}
type connectorServiceNoop struct{}
func (connectorServiceNoop) GetAccessInfo(context.Context, ConnectorDef) (AccessInfo, error) {
return AccessInfo{}, errors.InvalidArgument("This feature is not supported.")
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/importer/repo.go | app/services/importer/repo.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"encoding/json"
"fmt"
"time"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
func NewRepo(
spaceID int64,
spacePath string,
identifier string,
description string,
principal *types.Principal,
defaultBranch string,
) *types.Repository {
now := time.Now().UnixMilli()
gitTempUID := fmt.Sprintf("importing-%s-%d", hash(fmt.Sprintf("%d:%s", spaceID, identifier)), now)
return &types.Repository{
Version: 0,
ParentID: spaceID,
Identifier: identifier,
GitUID: gitTempUID, // the correct git UID will be set by the job handler
Description: description,
CreatedBy: principal.ID,
Created: now,
Updated: now,
LastGITPush: now, // even in case of an empty repo, the git repo got created.
ForkID: 0,
DefaultBranch: defaultBranch,
State: enum.RepoStateGitImport,
Path: paths.Concatenate(spacePath, identifier),
Tags: json.RawMessage(`{}`),
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/importer/job_repository.go | app/services/importer/job_repository.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"github.com/harness/gitness/encrypt"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/job"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
const (
importJobMaxRetries = 0
importJobMaxDuration = 45 * time.Minute
)
var (
// ErrNotFound is returned if no import data was found.
ErrNotFound = errors.New("import not found")
)
type JobRepository struct {
scheduler *job.Scheduler
encrypter encrypt.Encrypter
importer *Importer
}
var _ job.Handler = (*JobRepository)(nil)
type RepositoryInput struct {
Input
}
const jobTypeRepositoryImport = "repository_import"
func (r *JobRepository) Register(executor *job.Executor) error {
return executor.Register(jobTypeRepositoryImport, r)
}
// Run starts a background job that imports the provided repository from the provided clone URL.
func (r *JobRepository) Run(
ctx context.Context,
provider Provider,
repo *types.Repository,
public bool,
cloneURL string,
pipelines PipelineOption,
) error {
jobID := r.jobIDFromRepoID(repo.ID)
jobDef, err := r.getJobDef(jobID, RepositoryInput{
Input: Input{
RepoID: repo.ID,
Public: public,
GitUser: provider.Username,
GitPass: provider.Password,
CloneURL: cloneURL,
Pipelines: pipelines,
},
})
if err != nil {
return err
}
return r.scheduler.RunJob(ctx, jobDef)
}
// RunMany starts background jobs that import the provided repositories from the provided clone URLs.
func (r *JobRepository) RunMany(
ctx context.Context,
groupID string,
provider Provider,
repoIDs []int64,
publics []bool,
cloneURLs []string,
pipelines PipelineOption,
) error {
if len(repoIDs) != len(cloneURLs) {
return fmt.Errorf("slice length mismatch: have %d repositories and %d clone URLs",
len(repoIDs), len(cloneURLs))
}
n := len(repoIDs)
defs := make([]job.Definition, n)
for k := range n {
repoID := repoIDs[k]
cloneURL := cloneURLs[k]
jobID := r.jobIDFromRepoID(repoID)
jobDef, err := r.getJobDef(jobID, RepositoryInput{
Input: Input{
RepoID: repoID,
Public: publics[k],
GitUser: provider.Username,
GitPass: provider.Password,
CloneURL: cloneURL,
Pipelines: pipelines,
},
})
if err != nil {
return err
}
defs[k] = jobDef
}
err := r.scheduler.RunJobs(ctx, groupID, defs)
if err != nil {
return fmt.Errorf("failed to run jobs: %w", err)
}
return nil
}
func (*JobRepository) jobIDFromRepoID(repoID int64) string {
const jobIDPrefix = "import-repo-"
return jobIDPrefix + strconv.FormatInt(repoID, 10)
}
func (r *JobRepository) getJobDef(jobUID string, input RepositoryInput) (job.Definition, error) {
data, err := json.Marshal(input)
if err != nil {
return job.Definition{}, fmt.Errorf("failed to marshal job input json: %w", err)
}
strData := strings.TrimSpace(string(data))
encryptedData, err := r.encrypter.Encrypt(strData)
if err != nil {
return job.Definition{}, fmt.Errorf("failed to encrypt job input: %w", err)
}
return job.Definition{
UID: jobUID,
Type: jobTypeRepositoryImport,
MaxRetries: importJobMaxRetries,
Timeout: importJobMaxDuration,
Data: base64.StdEncoding.EncodeToString(encryptedData),
}, nil
}
func (r *JobRepository) getJobInput(data string) (RepositoryInput, error) {
encrypted, err := base64.StdEncoding.DecodeString(data)
if err != nil {
return RepositoryInput{}, fmt.Errorf("failed to base64 decode job input: %w", err)
}
decrypted, err := r.encrypter.Decrypt(encrypted)
if err != nil {
return RepositoryInput{}, fmt.Errorf("failed to decrypt job input: %w", err)
}
var input RepositoryInput
err = json.NewDecoder(strings.NewReader(decrypted)).Decode(&input)
if err != nil {
return RepositoryInput{}, fmt.Errorf("failed to unmarshal job input json: %w", err)
}
return input, nil
}
// Handle is repository import background job handler.
//
//nolint:gocognit // refactor if needed.
func (r *JobRepository) Handle(ctx context.Context, data string, _ job.ProgressReporter) (string, error) {
input, err := r.getJobInput(data)
if err != nil {
return "", err
}
err = r.importer.Import(ctx, input.Input)
if err != nil {
return "", fmt.Errorf("failed to import repository: %w", err)
}
return "", nil
}
func (r *JobRepository) GetProgress(ctx context.Context, repo *types.RepositoryCore) (job.Progress, error) {
progress, err := r.scheduler.GetJobProgress(ctx, r.jobIDFromRepoID(repo.ID))
if errors.Is(err, gitness_store.ErrResourceNotFound) {
if repo.State == enum.RepoStateGitImport {
// if the job is not found but repo is marked as importing, return state=failed
return job.FailProgress(), nil
}
// if repo is importing through the migrator cli there is no job created for it, return state=progress
if repo.State == enum.RepoStateMigrateDataImport ||
repo.State == enum.RepoStateMigrateGitPush {
return job.Progress{
State: job.JobStateRunning,
Progress: job.ProgressMin,
}, nil
}
// otherwise there either was no import, or it completed a long time ago (job cleaned up by now)
return job.Progress{}, ErrNotFound
}
if err != nil {
return job.Progress{}, fmt.Errorf("failed to get job progress: %w", err)
}
return progress, nil
}
func (r *JobRepository) Cancel(ctx context.Context, repo *types.Repository) error {
if repo.State != enum.RepoStateGitImport {
return nil
}
err := r.scheduler.CancelJob(ctx, r.jobIDFromRepoID(repo.ID))
if err != nil {
return fmt.Errorf("failed to cancel job: %w", err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/importer/job_link.go | app/services/importer/job_link.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"strconv"
"time"
"github.com/harness/gitness/app/bootstrap"
repoevents "github.com/harness/gitness/app/events/repo"
"github.com/harness/gitness/app/githook"
"github.com/harness/gitness/app/services/keywordsearch"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/sse"
"github.com/harness/gitness/app/store"
gitnessurl "github.com/harness/gitness/app/url"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/git"
"github.com/harness/gitness/job"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
const (
jobLinkRepoMaxRetries = 0
jobLinkRepoMaxDuration = 45 * time.Minute
)
type JobRepositoryLink struct {
scheduler *job.Scheduler
urlProvider gitnessurl.Provider
git git.Interface
connectorService ConnectorService
repoStore store.RepoStore
linkedRepoStore store.LinkedRepoStore
repoFinder refcache.RepoFinder
sseStreamer sse.Streamer
indexer keywordsearch.Indexer
eventReporter *repoevents.Reporter
}
var _ job.Handler = (*JobRepositoryLink)(nil)
func NewJobRepositoryLink(
scheduler *job.Scheduler,
urlProvider gitnessurl.Provider,
git git.Interface,
connectorService ConnectorService,
repoStore store.RepoStore,
linkedRepoStore store.LinkedRepoStore,
repoFinder refcache.RepoFinder,
sseStreamer sse.Streamer,
indexer keywordsearch.Indexer,
eventReporter *repoevents.Reporter,
) *JobRepositoryLink {
return &JobRepositoryLink{
scheduler: scheduler,
urlProvider: urlProvider,
git: git,
connectorService: connectorService,
repoStore: repoStore,
linkedRepoStore: linkedRepoStore,
repoFinder: repoFinder,
sseStreamer: sseStreamer,
indexer: indexer,
eventReporter: eventReporter,
}
}
type JobLinkRepoInput struct {
RepoID int64 `json:"repo_id"`
IsPublic bool `json:"is_public"`
}
const jobTypeRepositoryLink = "link_repository_import"
func (r *JobRepositoryLink) Register(executor *job.Executor) error {
return executor.Register(jobTypeRepositoryLink, r)
}
// Run starts a background job that imports the provided repository from the provided clone URL.
func (r *JobRepositoryLink) Run(ctx context.Context, repoID int64, isPublic bool) error {
jobID := r.jobIDFromRepoID(repoID)
jobDef, err := r.getJobDef(jobID, JobLinkRepoInput{
RepoID: repoID,
IsPublic: isPublic,
})
if err != nil {
return err
}
return r.scheduler.RunJob(ctx, jobDef)
}
func (*JobRepositoryLink) jobIDFromRepoID(repoID int64) string {
const jobIDPrefix = "link-repo-"
return jobIDPrefix + strconv.FormatInt(repoID, 10)
}
func (r *JobRepositoryLink) getJobDef(jobUID string, input JobLinkRepoInput) (job.Definition, error) {
data, err := json.Marshal(input)
if err != nil {
return job.Definition{}, fmt.Errorf("failed to marshal job input json: %w", err)
}
return job.Definition{
UID: jobUID,
Type: jobTypeRepositoryLink,
MaxRetries: jobLinkRepoMaxRetries,
Timeout: jobLinkRepoMaxDuration,
Data: base64.StdEncoding.EncodeToString(bytes.TrimSpace(data)),
}, nil
}
func (r *JobRepositoryLink) getJobInput(data string) (JobLinkRepoInput, error) {
raw, err := base64.StdEncoding.DecodeString(data)
if err != nil {
return JobLinkRepoInput{}, fmt.Errorf("failed to base64 decode job input: %w", err)
}
var input JobLinkRepoInput
err = json.NewDecoder(bytes.NewReader(raw)).Decode(&input)
if err != nil {
return JobLinkRepoInput{}, fmt.Errorf("failed to unmarshal job input json: %w", err)
}
return input, nil
}
func (r *JobRepositoryLink) Handle(ctx context.Context, data string, _ job.ProgressReporter) (string, error) {
input, err := r.getJobInput(data)
if err != nil {
return "", err
}
systemPrincipal := bootstrap.NewSystemServiceSession().Principal
gitIdentity := git.Identity{
Name: systemPrincipal.DisplayName,
Email: systemPrincipal.Email,
}
repo, err := r.repoStore.Find(ctx, input.RepoID)
if err != nil {
return "", fmt.Errorf("failed to find repo by id: %w", err)
}
linkedRepo, err := r.linkedRepoStore.Find(ctx, repo.ID)
if err != nil {
return "", fmt.Errorf("failed to find linked repo by repo id: %w", err)
}
accessInfo, err := r.connectorService.GetAccessInfo(ctx, ConnectorDef{
Path: linkedRepo.ConnectorPath,
Identifier: linkedRepo.ConnectorIdentifier,
})
if err != nil {
return "", fmt.Errorf("failed to get repository access info from connector: %w", err)
}
cloneURLWithAuth, err := accessInfo.URLWithCredentials()
if err != nil {
return "", fmt.Errorf("failed to parse git clone URL: %w", err)
}
envVars, err := githook.GenerateEnvironmentVariables(
ctx,
r.urlProvider.GetInternalAPIURL(ctx),
repo.ID,
systemPrincipal.ID,
true,
true,
)
if err != nil {
return "", fmt.Errorf("failed to create environment variables: %w", err)
}
if repo.State != enum.RepoStateGitImport {
return "", fmt.Errorf("repository %s is not being imported", repo.Identifier)
}
log := log.Ctx(ctx).With().
Int64("repo.id", repo.ID).
Str("repo.path", repo.Path).
Logger()
now := time.Now()
respCreateRepo, err := r.git.CreateRepository(ctx, &git.CreateRepositoryParams{
Actor: gitIdentity,
EnvVars: envVars,
DefaultBranch: repo.DefaultBranch,
Files: nil,
Author: &gitIdentity,
AuthorDate: &now,
Committer: &gitIdentity,
CommitterDate: &now,
})
if err != nil {
return "", fmt.Errorf("failed to create empty git repository: %w", err)
}
gitUID := respCreateRepo.UID
writeParams := git.WriteParams{
Actor: gitIdentity,
RepoUID: gitUID,
EnvVars: envVars,
}
err = func() error {
_, err = r.git.SyncRepository(ctx, &git.SyncRepositoryParams{
WriteParams: writeParams,
Source: cloneURLWithAuth,
CreateIfNotExists: false,
RefSpecs: []string{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"},
DefaultBranch: repo.DefaultBranch,
})
if err != nil {
return fmt.Errorf("failed to sync repository: %w", err)
}
repo, err = r.repoStore.UpdateOptLock(ctx, repo, func(repo *types.Repository) error {
if repo.State != enum.RepoStateGitImport {
return errors.New("repository has already finished importing")
}
repo.State = enum.RepoStateActive
repo.GitUID = gitUID
return nil
})
if err != nil {
return fmt.Errorf("failed to update repository after import: %w", err)
}
r.repoFinder.MarkChanged(ctx, repo.Core())
return nil
}()
if err != nil {
log.Error().Err(err).Msg("failed repository import - cleanup git repository")
if errDel := r.git.DeleteRepository(context.WithoutCancel(ctx), &git.DeleteRepositoryParams{
WriteParams: writeParams,
}); errDel != nil {
log.Warn().Err(errDel).
Msg("failed to delete git repository after failed import")
}
return "", fmt.Errorf("failed to import repository: %w", err)
}
r.sseStreamer.Publish(ctx, repo.ParentID, enum.SSETypeRepositoryImportCompleted, repo)
r.eventReporter.Created(ctx, &repoevents.CreatedPayload{
Base: repoevents.Base{
RepoID: repo.ID,
PrincipalID: bootstrap.NewSystemServiceSession().Principal.ID,
},
IsPublic: input.IsPublic,
ImportedFrom: repo.GitURL,
})
err = r.indexer.Index(ctx, repo)
if err != nil {
log.Warn().Err(err).Msg("failed to index repository")
}
return "", nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/importer/connector.go | app/services/importer/connector.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"fmt"
"net/url"
)
type ConnectorDef struct {
Path string `json:"path"`
Identifier string `json:"identifier"`
}
type AccessInfo struct {
Username string
Password string
URL string
}
func (info AccessInfo) URLWithCredentials() (string, error) {
repoURL, err := url.Parse(info.URL)
if err != nil {
return "", fmt.Errorf("failed to parse repository clone url, %q: %w", info.URL, err)
}
repoURL.User = url.UserPassword(info.Username, info.Password)
cloneURLWithAuth := repoURL.String()
return cloneURLWithAuth, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/importer/provider.go | app/services/importer/provider.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"context"
"crypto/sha512"
"encoding/base32"
"errors"
"fmt"
"net/http"
"strings"
"github.com/harness/gitness/app/api/usererror"
"github.com/harness/gitness/types"
"github.com/drone/go-scm/scm"
"github.com/drone/go-scm/scm/driver/azure"
"github.com/drone/go-scm/scm/driver/bitbucket"
"github.com/drone/go-scm/scm/driver/gitea"
"github.com/drone/go-scm/scm/driver/github"
"github.com/drone/go-scm/scm/driver/gitlab"
"github.com/drone/go-scm/scm/driver/gogs"
"github.com/drone/go-scm/scm/driver/stash"
"github.com/drone/go-scm/scm/transport"
"github.com/drone/go-scm/scm/transport/oauth2"
)
type ProviderType string
const (
ProviderTypeGitHub ProviderType = "github"
ProviderTypeGitLab ProviderType = "gitlab"
ProviderTypeBitbucket ProviderType = "bitbucket"
ProviderTypeStash ProviderType = "stash"
ProviderTypeGitea ProviderType = "gitea"
ProviderTypeGogs ProviderType = "gogs"
ProviderTypeAzure ProviderType = "azure"
)
func (p ProviderType) Enum() []any {
return []any{
ProviderTypeGitHub,
ProviderTypeGitLab,
ProviderTypeBitbucket,
ProviderTypeStash,
ProviderTypeGitea,
ProviderTypeGogs,
ProviderTypeAzure,
}
}
type Provider struct {
Type ProviderType `json:"type"`
Host string `json:"host"`
Username string `json:"username"`
Password string `json:"password"`
}
type RepositoryInfo struct {
Space string
Identifier string
CloneURL string
IsPublic bool
DefaultBranch string
}
// ToRepo converts the RepositoryInfo into the types.Repository object marked as being imported and is-public flag.
func (r *RepositoryInfo) ToRepo(
spaceID int64,
spacePath string,
identifier string,
description string,
principal *types.Principal,
) (*types.Repository, bool) {
return NewRepo(
spaceID,
spacePath,
identifier,
description,
principal,
r.DefaultBranch,
), r.IsPublic
}
func hash(s string) string {
h := sha512.New()
_, _ = h.Write([]byte(s))
return base32.StdEncoding.EncodeToString(h.Sum(nil)[:10])
}
func oauthTransport(token string, scheme string) http.RoundTripper {
if token == "" {
return baseTransport
}
return &oauth2.Transport{
Base: baseTransport,
Scheme: scheme,
Source: oauth2.StaticTokenSource(&scm.Token{Token: token}),
}
}
func authHeaderTransport(token string) http.RoundTripper {
if token == "" {
return baseTransport
}
return &transport.Authorization{
Base: baseTransport,
Scheme: "token",
Credentials: token,
}
}
func basicAuthTransport(username, password string) http.RoundTripper {
if username == "" && password == "" {
return baseTransport
}
return &transport.BasicAuth{
Base: baseTransport,
Username: username,
Password: password,
}
}
// getScmClientWithTransport creates an SCM client along with the necessary transport
// layer depending on the provider. For example, for bitbucket we support app passwords
// so the auth transport is BasicAuth whereas it's Oauth for other providers.
// It validates that auth credentials are provided if authReq is true.
func getScmClientWithTransport(provider Provider, slug string, authReq bool) (*scm.Client, error) { //nolint:gocognit
if authReq && (provider.Username == "" || provider.Password == "") {
return nil, usererror.BadRequest("SCM provider authentication credentials missing")
}
var c *scm.Client
var err error
var transport http.RoundTripper
switch provider.Type {
case "":
return nil, errors.New("scm provider can not be empty")
case ProviderTypeGitHub:
if provider.Host != "" {
c, err = github.New(provider.Host)
if err != nil {
return nil, fmt.Errorf("scm provider Host invalid: %w", err)
}
} else {
c = github.NewDefault()
}
transport = oauthTransport(provider.Password, oauth2.SchemeBearer)
case ProviderTypeGitLab:
if provider.Host != "" {
c, err = gitlab.New(provider.Host)
if err != nil {
return nil, fmt.Errorf("scm provider Host invalid: %w", err)
}
} else {
c = gitlab.NewDefault()
}
transport = oauthTransport(provider.Password, oauth2.SchemeBearer)
case ProviderTypeBitbucket:
if provider.Host != "" {
c, err = bitbucket.New(provider.Host)
if err != nil {
return nil, fmt.Errorf("scm provider Host invalid: %w", err)
}
} else {
c = bitbucket.NewDefault()
}
transport = basicAuthTransport(provider.Username, provider.Password)
case ProviderTypeStash:
if provider.Host != "" {
c, err = stash.New(provider.Host)
if err != nil {
return nil, fmt.Errorf("scm provider Host invalid: %w", err)
}
} else {
c = stash.NewDefault()
}
transport = oauthTransport(provider.Password, oauth2.SchemeBearer)
case ProviderTypeGitea:
if provider.Host == "" {
return nil, errors.New("scm provider Host missing")
}
c, err = gitea.New(provider.Host)
if err != nil {
return nil, fmt.Errorf("scm provider Host invalid: %w", err)
}
transport = authHeaderTransport(provider.Password)
case ProviderTypeGogs:
if provider.Host == "" {
return nil, errors.New("scm provider Host missing")
}
c, err = gogs.New(provider.Host)
if err != nil {
return nil, fmt.Errorf("scm provider Host invalid: %w", err)
}
transport = oauthTransport(provider.Password, oauth2.SchemeToken)
case ProviderTypeAzure:
org, project, err := extractOrgAndProjectFromSlug(slug)
if err != nil {
return nil, fmt.Errorf("invalid slug format: %w", err)
}
if provider.Host != "" {
c, err = azure.New(provider.Host, org, project)
if err != nil {
return nil, fmt.Errorf("scm provider Host invalid: %w", err)
}
} else {
c = azure.NewDefault(org, project)
}
transport = basicAuthTransport(provider.Username, provider.Password)
default:
return nil, fmt.Errorf("unsupported scm provider: %s", provider)
}
c.Client = &http.Client{
Transport: transport,
CheckRedirect: func(*http.Request, []*http.Request) error {
return http.ErrUseLastResponse
},
}
return c, nil
}
func LoadRepositoryFromProvider(
ctx context.Context,
provider Provider,
repoSlug string,
) (RepositoryInfo, Provider, error) {
if repoSlug == "" {
return RepositoryInfo{}, provider, usererror.BadRequest("Provider repository identifier is missing")
}
scmClient, err := getScmClientWithTransport(provider, repoSlug, false)
if err != nil {
return RepositoryInfo{}, provider, usererror.BadRequestf("Could not create client: %s", err)
}
// Augment user information if it's not provided for certain vendors.
if provider.Password != "" && provider.Username == "" {
user, _, err := scmClient.Users.Find(ctx)
if err != nil {
return RepositoryInfo{}, provider, usererror.BadRequestf("Could not find user: %s", err)
}
provider.Username = user.Login
}
if provider.Type == ProviderTypeAzure {
repoSlug, err = extractRepoFromSlug(repoSlug)
if err != nil {
return RepositoryInfo{}, provider, usererror.BadRequestf("Invalid slug format: %s", err)
}
}
scmRepo, scmResp, err := scmClient.Repositories.Find(ctx, repoSlug)
if err = convertSCMError(provider, repoSlug, scmResp, err); err != nil {
return RepositoryInfo{}, provider, err
}
return RepositoryInfo{
Space: scmRepo.Namespace,
Identifier: scmRepo.Name,
CloneURL: scmRepo.Clone,
IsPublic: !scmRepo.Private,
DefaultBranch: scmRepo.Branch,
}, provider, nil
}
//nolint:gocognit
func LoadRepositoriesFromProviderSpace(
ctx context.Context,
provider Provider,
spaceSlug string,
) ([]RepositoryInfo, Provider, error) {
if spaceSlug == "" {
return nil, provider, usererror.BadRequest("Provider space identifier is missing")
}
var err error
scmClient, err := getScmClientWithTransport(provider, spaceSlug, false)
if err != nil {
return nil, provider, usererror.BadRequestf("Could not create client: %s", err)
}
opts := scm.ListOptions{
Size: 100,
}
// Augment user information if it's not provided for certain vendors.
if provider.Password != "" && provider.Username == "" {
user, _, err := scmClient.Users.Find(ctx)
if err != nil {
return nil, provider, usererror.BadRequestf("Could not find user: %s", err)
}
provider.Username = user.Login
}
var optsv2 scm.RepoListOptions
listv2 := false
if provider.Type == ProviderTypeGitHub {
listv2 = true
optsv2 = scm.RepoListOptions{
ListOptions: opts,
RepoSearchTerm: scm.RepoSearchTerm{
User: spaceSlug,
},
}
}
repos := make([]RepositoryInfo, 0)
var scmRepos []*scm.Repository
var scmResp *scm.Response
for {
if listv2 {
scmRepos, scmResp, err = scmClient.Repositories.ListV2(ctx, optsv2)
if err = convertSCMError(provider, spaceSlug, scmResp, err); err != nil {
return nil, provider, err
}
optsv2.Page = scmResp.Page.Next
optsv2.URL = scmResp.Page.NextURL
} else {
scmRepos, scmResp, err = scmClient.Repositories.List(ctx, opts)
if err = convertSCMError(provider, spaceSlug, scmResp, err); err != nil {
return nil, provider, err
}
opts.Page = scmResp.Page.Next
opts.URL = scmResp.Page.NextURL
}
if len(scmRepos) == 0 {
break
}
for _, scmRepo := range scmRepos {
// in some cases the namespace filter isn't working (e.g. Gitlab)
if !strings.EqualFold(scmRepo.Namespace, spaceSlug) {
continue
}
repos = append(repos, RepositoryInfo{
Space: scmRepo.Namespace,
Identifier: scmRepo.Name,
CloneURL: scmRepo.Clone,
IsPublic: !scmRepo.Private,
DefaultBranch: scmRepo.Branch,
})
}
if listv2 {
if optsv2.Page == 0 && optsv2.URL == "" {
break
}
} else {
if opts.Page == 0 && opts.URL == "" {
break
}
}
}
return repos, provider, nil
}
func extractOrgAndProjectFromSlug(slug string) (string, string, error) {
res := strings.Split(slug, "/")
if len(res) < 2 {
return "", "", fmt.Errorf("organization or project info missing")
}
if len(res) > 3 {
return "", "", fmt.Errorf("too many parts")
}
return res[0], res[1], nil
}
func extractRepoFromSlug(slug string) (string, error) {
res := strings.Split(slug, "/")
if len(res) == 3 {
return res[2], nil
}
return "", fmt.Errorf("repo name missing")
}
func convertSCMError(provider Provider, slug string, r *scm.Response, err error) error {
if err == nil {
return nil
}
if r == nil {
if provider.Host != "" {
return usererror.BadRequestf("Failed to make HTTP request to %s (host=%s): %s",
provider.Type, provider.Host, err)
}
return usererror.BadRequestf("Failed to make HTTP request to %s: %s",
provider.Type, err)
}
switch r.Status {
case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther,
http.StatusTemporaryRedirect, http.StatusPermanentRedirect:
return usererror.BadRequestf("Redirects are not supported (HTTP status %d)", r.Status)
case http.StatusNotFound:
return usererror.BadRequestf("Couldn't find %s at %s: %s",
slug, provider.Type, err.Error())
case http.StatusUnauthorized:
return usererror.BadRequestf("Bad credentials provided for %s at %s: %s",
slug, provider.Type, err.Error())
case http.StatusForbidden:
return usererror.BadRequestf("Access denied to %s at %s: %s",
slug, provider.Type, err.Error())
default:
return usererror.BadRequestf("Failed to fetch %s from %s (HTTP status %d): %s",
slug, provider.Type, r.Status, err.Error())
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/importer/base_transport.go | app/services/importer/base_transport.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"context"
"fmt"
"net"
"net/http"
"time"
"github.com/harness/gitness/app/api/usererror"
)
var baseTransport http.RoundTripper
func init() {
tr := http.DefaultTransport.(*http.Transport).Clone() //nolint:errcheck
// the client verifies the server's certificate chain and host name
tr.TLSClientConfig.InsecureSkipVerify = false
// Overwrite DialContext method to block connections to localhost and private networks.
tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
// create basic net.Dialer (Similar to what is used by http.DefaultTransport)
dialer := &net.Dialer{Timeout: 30 * time.Second}
// dial connection using
con, err := dialer.DialContext(ctx, network, addr)
if err != nil {
return nil, err
}
tcpAddr, ok := con.RemoteAddr().(*net.TCPAddr)
if !ok { // not expected to happen, but to be sure
_ = con.Close()
return nil, fmt.Errorf("address resolved to a non-TCP address (original: '%s', resolved: '%s')",
addr, con.RemoteAddr())
}
if tcpAddr.IP.IsLoopback() {
_ = con.Close()
return nil, usererror.BadRequestf("Loopback address is not allowed.")
}
if tcpAddr.IP.IsPrivate() {
_ = con.Close()
return nil, usererror.BadRequestf("Private network address is not allowed.")
}
return con, nil
}
baseTransport = tr
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/importer/job_reference_sync.go | app/services/importer/job_reference_sync.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"bytes"
"context"
"crypto/rand"
"encoding/base32"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/harness/gitness/app/bootstrap"
repoevents "github.com/harness/gitness/app/events/repo"
"github.com/harness/gitness/app/githook"
"github.com/harness/gitness/app/services/keywordsearch"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
gitnessurl "github.com/harness/gitness/app/url"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/git"
"github.com/harness/gitness/git/api"
"github.com/harness/gitness/job"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
const (
refSyncJobMaxRetries = 2
refSyncJobMaxDuration = 45 * time.Minute
refSyncJobType = "reference_sync"
)
type JobReferenceSync struct {
defaultBranch string
urlProvider gitnessurl.Provider
git git.Interface
repoStore store.RepoStore
repoFinder refcache.RepoFinder
scheduler *job.Scheduler
indexer keywordsearch.Indexer
eventReporter *repoevents.Reporter
}
var _ job.Handler = (*JobReferenceSync)(nil)
type RefSpecType string
const (
RefSpecTypeReference RefSpecType = "reference"
RefSpecTypeDefaultBranch RefSpecType = "default_branch"
RefSpecTypeAllBranches RefSpecType = "all_branches"
RefSpecTypeBranchesAndTags RefSpecType = "branches_and_tags"
)
type ReferenceSyncInput struct {
SourceRepoID int64 `json:"source_repo_id"`
TargetRepoID int64 `json:"target_repo_id"`
RefSpecType RefSpecType `json:"ref_spec_type"`
SourceRef string `json:"source_ref"`
TargetRef string `json:"target_ref"`
}
func (r *JobReferenceSync) Register(executor *job.Executor) error {
return executor.Register(refSyncJobType, r)
}
// Run starts a background job that imports the provided repository from the provided clone URL.
func (r *JobReferenceSync) Run(
ctx context.Context,
sourceRepoID, targetRepoID int64,
refSpecType RefSpecType,
sourceRef, targetRef string,
) error {
var idRaw [10]byte
if _, err := rand.Read(idRaw[:]); err != nil {
return fmt.Errorf("could not generate repository sync job ID: %w", err)
}
id := base32.StdEncoding.EncodeToString(idRaw[:])
jobDef, err := r.getJobDef(id, ReferenceSyncInput{
SourceRepoID: sourceRepoID,
TargetRepoID: targetRepoID,
RefSpecType: refSpecType,
SourceRef: sourceRef,
TargetRef: targetRef,
})
if err != nil {
return fmt.Errorf("could not get repository sync job definition: %w", err)
}
return r.scheduler.RunJob(ctx, jobDef)
}
func (r *JobReferenceSync) getJobDef(jobUID string, input ReferenceSyncInput) (job.Definition, error) {
data, err := json.Marshal(input)
if err != nil {
return job.Definition{}, fmt.Errorf("failed to marshal repository sync job input json: %w", err)
}
data = bytes.TrimSpace(data)
return job.Definition{
UID: jobUID,
Type: refSyncJobType,
MaxRetries: refSyncJobMaxRetries,
Timeout: refSyncJobMaxDuration,
Data: string(data),
}, nil
}
func (r *JobReferenceSync) getJobInput(data string) (ReferenceSyncInput, error) {
var input ReferenceSyncInput
err := json.NewDecoder(strings.NewReader(data)).Decode(&input)
if err != nil {
return ReferenceSyncInput{}, fmt.Errorf("failed to unmarshal repository sync job input json: %w", err)
}
return input, nil
}
// Handle is repository import background job handler.
//
//nolint:gocognit // refactor if needed.
func (r *JobReferenceSync) Handle(ctx context.Context, data string, _ job.ProgressReporter) (string, error) {
systemPrincipal := bootstrap.NewSystemServiceSession().Principal
input, err := r.getJobInput(data)
if err != nil {
return "", err
}
repoSource, err := r.repoFinder.FindByID(ctx, input.SourceRepoID)
if err != nil {
return "", fmt.Errorf("failed to find source repo by id: %w", err)
}
repoTarget, err := r.repoFinder.FindByID(ctx, input.TargetRepoID)
if err != nil {
return "", fmt.Errorf("failed to find target repo by id: %w", err)
}
writeParams, err := r.createRPCWriteParams(ctx, systemPrincipal, repoTarget.ID, repoTarget.GitUID)
if err != nil {
return "", fmt.Errorf("failed to create rpc write params: %w", err)
}
log := log.Ctx(ctx).With().
Int64("repo_source.id", repoSource.ID).
Str("repo_source.path", repoSource.Path).
Int64("repo_target.id", repoTarget.ID).
Str("repo_target.path", repoTarget.Path).
Logger()
var refSpec []string
var needIndexing bool
switch input.RefSpecType {
case RefSpecTypeReference:
refSpec = []string{input.SourceRef + ":" + input.TargetRef}
needIndexing = input.TargetRef == api.BranchPrefix+repoTarget.DefaultBranch
case RefSpecTypeDefaultBranch:
refSpec = []string{
api.BranchPrefix + repoSource.DefaultBranch + ":" + api.BranchPrefix + repoTarget.DefaultBranch,
}
needIndexing = true
case RefSpecTypeAllBranches:
refSpec = []string{api.BranchPrefix + "*:" + api.BranchPrefix + "*"}
needIndexing = true
case RefSpecTypeBranchesAndTags:
refSpec = []string{
api.BranchPrefix + "*:" + api.BranchPrefix + "*",
api.TagPrefix + "*:" + api.TagPrefix + "*",
}
needIndexing = true
}
_, err = r.git.SyncRepository(ctx, &git.SyncRepositoryParams{
WriteParams: writeParams,
Source: repoSource.GitUID,
CreateIfNotExists: false,
RefSpecs: refSpec,
DefaultBranch: repoTarget.DefaultBranch,
})
if err != nil {
return "", fmt.Errorf("failed to sync repository: %w", err)
}
repoTargetFull, err := r.repoStore.Find(ctx, repoTarget.ID)
if err != nil {
return "", fmt.Errorf("failed to find repository: %w", err)
}
// Clear the git import status if set
errNoChange := errors.New("no change")
repoTargetFull, err = r.repoStore.UpdateOptLock(ctx, repoTargetFull, func(r *types.Repository) error {
if r.State != enum.RepoStateGitImport {
return errNoChange
}
r.State = enum.RepoStateActive
return nil
})
if err != nil && !errors.Is(err, errNoChange) {
return "", fmt.Errorf("failed to update repo state: %w", err)
}
r.repoFinder.MarkChanged(ctx, repoTargetFull.Core())
if needIndexing {
err = r.indexer.Index(ctx, repoTargetFull)
if err != nil {
log.Warn().Err(err).Msg("failed to index repository")
}
}
log.Info().Msg("completed repository reference sync job")
return "", nil
}
func (r *JobReferenceSync) createRPCWriteParams(
ctx context.Context,
principal types.Principal,
repoID int64,
repoGitUID string,
) (git.WriteParams, error) {
envVars, err := githook.GenerateEnvironmentVariables(
ctx,
r.urlProvider.GetInternalAPIURL(ctx),
repoID,
principal.ID,
true,
true,
)
if err != nil {
return git.WriteParams{}, fmt.Errorf("failed to generate git hook environment variables: %w", err)
}
return git.WriteParams{
RepoUID: repoGitUID,
Actor: git.Identity{
Name: principal.DisplayName,
Email: principal.Email,
},
EnvVars: envVars,
}, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/aitaskevent/wire.go | app/services/aitaskevent/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aitaskevent
import (
"context"
aitaskevents "github.com/harness/gitness/app/events/aitask"
"github.com/harness/gitness/app/gitspace/orchestrator"
"github.com/harness/gitness/app/services/gitspace"
"github.com/harness/gitness/app/services/gitspaceevent"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/events"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideService,
)
func ProvideService(
ctx context.Context,
config *gitspaceevent.Config,
aiTaskEventReaderFactory *events.ReaderFactory[*aitaskevents.Reader],
orchestrator orchestrator.Orchestrator,
gitspaceSvc *gitspace.Service,
aiTaskStore store.AITaskStore,
) (*Service, error) {
return NewService(
ctx,
config,
aiTaskEventReaderFactory,
orchestrator,
gitspaceSvc,
aiTaskStore,
)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/aitaskevent/service.go | app/services/aitaskevent/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aitaskevent
import (
"context"
"fmt"
"time"
aitaskevents "github.com/harness/gitness/app/events/aitask"
"github.com/harness/gitness/app/gitspace/orchestrator"
"github.com/harness/gitness/app/services/gitspace"
"github.com/harness/gitness/app/services/gitspaceevent"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/events"
"github.com/harness/gitness/stream"
)
const groupAITaskEvents = "gitness:aitask"
type Service struct {
config *gitspaceevent.Config
orchestrator orchestrator.Orchestrator
gitspaceSvc *gitspace.Service
aiTaskStore store.AITaskStore
}
func NewService(
ctx context.Context,
config *gitspaceevent.Config,
aiTaskEventReaderFactory *events.ReaderFactory[*aitaskevents.Reader],
orchestrator orchestrator.Orchestrator,
gitspaceSvc *gitspace.Service,
aiTaskStore store.AITaskStore,
) (*Service, error) {
if err := config.Sanitize(); err != nil {
return nil, fmt.Errorf("provided ai task event service config is invalid: %w", err)
}
service := &Service{
config: config,
orchestrator: orchestrator,
gitspaceSvc: gitspaceSvc,
aiTaskStore: aiTaskStore,
}
_, err := aiTaskEventReaderFactory.Launch(ctx, groupAITaskEvents, config.EventReaderName,
func(r *aitaskevents.Reader) error {
var idleTimeout = time.Duration(config.TimeoutInMins) * time.Minute
r.Configure(
stream.WithConcurrency(config.Concurrency),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(config.MaxRetries),
))
_ = r.RegisterAITaskEvent(service.handleAITaskEvent)
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to launch ai task event reader: %w", err)
}
return service, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/aitaskevent/handler.go | app/services/aitaskevent/handler.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aitaskevent
import (
"context"
"errors"
"fmt"
"time"
aitaskevents "github.com/harness/gitness/app/events/aitask"
"github.com/harness/gitness/events"
"github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
var ErrNilResource = errors.New("nil resource")
func (s *Service) handleAITaskEvent(
ctx context.Context,
event *events.Event[*aitaskevents.AITaskEventPayload],
) error {
logr := log.With().Str("event", string(event.Payload.Type)).Logger()
logr.Debug().Msgf("Received AI task event, identifier: %s", event.Payload.AITaskIdentifier)
payload := event.Payload
ctxWithTimeOut, cancel := context.WithTimeout(ctx, time.Duration(s.config.TimeoutInMins)*time.Minute)
defer cancel()
aiTask, err := s.fetchWithRetry(
ctxWithTimeOut,
event.Payload.AITaskIdentifier,
event.Payload.AITaskSpaceID,
)
if err != nil {
logr.Error().Err(err).Msgf("failed to find AI task: %s", aiTask.Identifier)
return fmt.Errorf("failed to get AI task: %w", err)
}
if aiTask == nil {
logr.Error().Msg("failed to find AI task: ai task is nil")
return fmt.Errorf("failed to find ai task: %w", ErrNilResource)
}
// mark ai task as running
aiTask.State = enum.AITaskStateRunning
err = s.aiTaskStore.Update(ctx, aiTask)
if err != nil {
return fmt.Errorf("failed to update aiTask state: %w", err)
}
gitspaceConfig, err := s.gitspaceSvc.FindWithLatestInstanceByID(ctx, aiTask.GitspaceConfigID, false)
if err != nil {
return fmt.Errorf("failed to get gitspace config: %w", err)
}
if gitspaceConfig == nil {
return fmt.Errorf("failed to find gitspace config: %w", ErrNilResource)
}
// Handle the AI task event based on the task state or other logic
logr.Info().Msgf("Processing AI task %s event: %s", aiTask.Identifier, payload.Type)
var handleEventErr error
switch payload.Type {
case enum.AITaskEventStart:
handleEventErr = s.handleStartEvent(ctx, *aiTask, *gitspaceConfig, logr)
case enum.AITaskEventStop:
handleEventErr = s.handleStopEvent(ctx, payload)
default:
handleEventErr = fmt.Errorf("invalid AI task event type: %s", payload.Type)
}
aiTask.State = enum.AITaskStateRunning
if handleEventErr != nil {
logr.Error().Err(handleEventErr).Msgf("failed to handle AI task event: %s, aiTask ID: %s",
payload.Type, aiTask.Identifier)
aiTask.State = enum.AITaskStateError
errStr := handleEventErr.Error()
aiTask.ErrorMessage = &errStr
}
err = s.aiTaskStore.Update(ctx, aiTask)
if err != nil {
return fmt.Errorf("failed to update aiTask state: %w", err)
}
return nil
}
// fetchWithRetry trys to fetch ai task from db with retry only for case where AI task is not found.
func (s *Service) fetchWithRetry(ctx context.Context, aiTaskID string, spaceID int64) (*types.AITask, error) {
for i := 0; i < 3; i++ {
aiTask, err := s.aiTaskStore.FindByIdentifier(ctx, spaceID, aiTaskID)
if err != nil && !errors.Is(err, store.ErrResourceNotFound) {
// if error is not resource not found, return error
return nil, err
}
if err == nil {
return aiTask, nil
}
time.Sleep(3 * time.Second)
}
return nil, fmt.Errorf("failed to find ai task: %w", ErrNilResource)
}
func (s *Service) handleStartEvent(
ctx context.Context,
aiTask types.AITask,
gitspaceConfig types.GitspaceConfig,
logr zerolog.Logger,
) error {
switch aiTask.State {
case enum.AITaskStateUninitialized, enum.AITaskStateRunning, enum.AITaskStateError:
logr.Debug().Msgf("ai task: %s is starting from %s state", aiTask.Identifier,
aiTask.State)
// continue
case enum.AITaskStateCompleted:
logr.Debug().Msgf("ai task: %s already completed", aiTask.Identifier)
return nil
default:
logr.Debug().Msgf("ai task: %s in invalid state %s", aiTask.Identifier, aiTask.State)
return fmt.Errorf("ai task: %s in invalid state %s", aiTask.Identifier, aiTask.State)
}
// validate before triggering ai task
if gitspaceConfig.State != enum.GitspaceStateRunning {
return fmt.Errorf("gitspace is not running, current: %s, expected: %s", gitspaceConfig.State,
enum.GitspaceStateRunning)
}
return s.orchestrator.TriggerAITask(ctx, aiTask, gitspaceConfig)
}
// handleStopEvent is NOOP as we currently do not support stopping of ai task.
func (s *Service) handleStopEvent(ctx context.Context, eventPayload *aitaskevents.AITaskEventPayload) error {
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/auth/metadata.go | app/auth/metadata.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"github.com/harness/gitness/app/jwt"
"github.com/harness/gitness/types/enum"
)
type Metadata interface {
ImpactsAuthorization() bool
}
// EmptyMetadata represents the state when the auth session doesn't have any extra metadata.
type EmptyMetadata struct{}
func (m *EmptyMetadata) ImpactsAuthorization() bool {
return false
}
// TokenMetadata contains information about the token that was used during auth.
type TokenMetadata struct {
TokenType enum.TokenType
TokenID int64
}
func (m *TokenMetadata) ImpactsAuthorization() bool {
return false
}
// MembershipMetadata contains information about an ephemeral membership grant.
type MembershipMetadata struct {
SpaceID int64
Role enum.MembershipRole
}
func (m *MembershipMetadata) ImpactsAuthorization() bool {
return true
}
// AccessPermissionMetadata contains information about permissions per space.
type AccessPermissionMetadata struct {
AccessPermissions *jwt.SubClaimsAccessPermissions
}
func (m *AccessPermissionMetadata) ImpactsAuthorization() bool {
return true
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/auth/anonymous.go | app/auth/anonymous.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
// AnonymousPrincipal is an in-memory principal for users with no auth data.
// Authorizer is in charge of handling anonymous access.
var AnonymousPrincipal = types.Principal{
ID: -1,
UID: types.AnonymousPrincipalUID,
Type: enum.PrincipalTypeUser,
}
func IsAnonymousSession(session *Session) bool {
return session != nil && session.Principal.IsAnonymous()
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/auth/session.go | app/auth/session.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"github.com/harness/gitness/types"
)
// Session contains information of the authenticated principal and auth related metadata.
type Session struct {
// Principal is the authenticated principal.
Principal types.Principal
// Metadata contains auth related information (access grants, tokenId, sshKeyId, ...)
Metadata Metadata
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/auth/authz/wire.go | app/auth/authz/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authz
import (
"time"
"github.com/harness/gitness/app/services/publicaccess"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideAuthorizer,
ProvidePermissionCache,
)
func ProvideAuthorizer(
pCache PermissionCache,
spaceFinder refcache.SpaceFinder,
publicAccess publicaccess.Service,
) Authorizer {
return NewMembershipAuthorizer(pCache, spaceFinder, publicAccess)
}
func ProvidePermissionCache(
spaceFinder refcache.SpaceFinder,
membershipStore store.MembershipStore,
) PermissionCache {
const permissionCacheTimeout = time.Second * 15
return NewPermissionCache(spaceFinder, membershipStore, permissionCacheTimeout)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/auth/authz/membership_cache.go | app/auth/authz/membership_cache.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authz
import (
"context"
"errors"
"fmt"
"time"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/cache"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"golang.org/x/exp/slices"
)
type PermissionCacheKey struct {
PrincipalID int64
SpaceRef string
Permission enum.Permission
}
type PermissionCache cache.Cache[PermissionCacheKey, bool]
func NewPermissionCache(
spaceFinder refcache.SpaceFinder,
membershipStore store.MembershipStore,
cacheDuration time.Duration,
) PermissionCache {
return cache.New[PermissionCacheKey, bool](permissionCacheGetter{
spaceFinder: spaceFinder,
membershipStore: membershipStore,
}, cacheDuration)
}
type permissionCacheGetter struct {
spaceFinder refcache.SpaceFinder
membershipStore store.MembershipStore
}
func (g permissionCacheGetter) Find(ctx context.Context, key PermissionCacheKey) (bool, error) {
spaceRef := key.SpaceRef
principalID := key.PrincipalID
// Find the first existing space.
space, err := g.findFirstExistingSpace(ctx, spaceRef)
// authz fails if no active space is found on the path; admins can still operate on deleted top-level spaces.
if errors.Is(err, gitness_store.ErrResourceNotFound) {
return false, nil
}
if err != nil {
return false, fmt.Errorf("failed to find an existing space on path '%s': %w", spaceRef, err)
}
// limit the depth to be safe (e.g. root/space1/space2 => maxDepth of 3)
maxDepth := len(paths.Segments(spaceRef))
for range maxDepth {
// Find the membership in the current space.
membership, err := g.membershipStore.Find(ctx, types.MembershipKey{
SpaceID: space.ID,
PrincipalID: principalID,
})
if err != nil && !errors.Is(err, gitness_store.ErrResourceNotFound) {
return false, fmt.Errorf("failed to find membership: %w", err)
}
// If the membership is defined in the current space, check if the user has the required permission.
if membership != nil &&
roleHasPermission(membership.Role, key.Permission) {
return true, nil
}
// If membership with the requested permission has not been found in the current space,
// move to the parent space, if any.
if space.ParentID == 0 {
return false, nil
}
space, err = g.spaceFinder.FindByID(ctx, space.ParentID)
if err != nil {
return false, fmt.Errorf("failed to find parent space with id %d: %w", space.ParentID, err)
}
}
return false, nil
}
func roleHasPermission(role enum.MembershipRole, permission enum.Permission) bool {
_, hasRole := slices.BinarySearch(role.Permissions(), permission)
return hasRole
}
// findFirstExistingSpace returns the initial or first existing ancestor space (permissions are inherited).
func (g permissionCacheGetter) findFirstExistingSpace(ctx context.Context, spaceRef string) (*types.SpaceCore, error) {
for {
space, err := g.spaceFinder.FindByRef(ctx, spaceRef)
if err == nil {
return space, nil
}
if !errors.Is(err, gitness_store.ErrResourceNotFound) {
return nil, fmt.Errorf("failed to find space '%s': %w", spaceRef, err)
}
// check whether parent space exists as permissions are inherited.
spaceRef, _, err = paths.DisectLeaf(spaceRef)
if err != nil {
return nil, fmt.Errorf("failed to disect path '%s': %w", spaceRef, err)
}
if spaceRef == "" {
return nil, gitness_store.ErrResourceNotFound
}
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/auth/authz/membership.go | app/auth/authz/membership.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authz
import (
"context"
"fmt"
"github.com/harness/gitness/app/auth"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/app/services/publicaccess"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
"golang.org/x/exp/slices"
)
var _ Authorizer = (*MembershipAuthorizer)(nil)
type MembershipAuthorizer struct {
permissionCache PermissionCache
spaceFinder refcache.SpaceFinder
publicAccess publicaccess.Service
}
func NewMembershipAuthorizer(
permissionCache PermissionCache,
spaceFinder refcache.SpaceFinder,
publicAccess publicaccess.Service,
) *MembershipAuthorizer {
return &MembershipAuthorizer{
permissionCache: permissionCache,
spaceFinder: spaceFinder,
publicAccess: publicAccess,
}
}
func (a *MembershipAuthorizer) Check(
ctx context.Context,
session *auth.Session,
scope *types.Scope,
resource *types.Resource,
permission enum.Permission,
) (bool, error) {
publicAccessAllowed, err := CheckPublicAccess(ctx, a.publicAccess, scope, resource, permission)
if err != nil {
return false, fmt.Errorf("failed to check public access: %w", err)
}
if publicAccessAllowed {
return true, nil
}
log.Ctx(ctx).Debug().Msgf(
"[MembershipAuthorizer] %s with id '%d' requests %s for %s '%s' in scope %#v with metadata %#v",
session.Principal.Type,
session.Principal.ID,
permission,
resource.Type,
resource.Identifier,
scope,
session.Metadata,
)
if session.Principal.Admin {
return true, nil // system admin can call any API
}
var spacePath string
//nolint:exhaustive // we want to fail on anything else
switch resource.Type {
case enum.ResourceTypeSpace:
spacePath = paths.Concatenate(scope.SpacePath, resource.Identifier)
case enum.ResourceTypeRepo:
spacePath = scope.SpacePath
case enum.ResourceTypeServiceAccount:
spacePath = scope.SpacePath
case enum.ResourceTypePipeline:
spacePath = scope.SpacePath
case enum.ResourceTypeSecret:
spacePath = scope.SpacePath
case enum.ResourceTypeConnector:
spacePath = scope.SpacePath
case enum.ResourceTypeTemplate:
spacePath = scope.SpacePath
case enum.ResourceTypeGitspace:
spacePath = scope.SpacePath
case enum.ResourceTypeInfraProvider:
spacePath = scope.SpacePath
case enum.ResourceTypeRegistry:
spacePath = scope.SpacePath
case enum.ResourceTypeUser:
// a user is allowed to edit themselves
if resource.Identifier == session.Principal.UID &&
permission == enum.PermissionUserEdit {
return true, nil
}
// user can see all other users in the system.
if permission == enum.PermissionUserView {
return true, nil
}
// everything else is reserved for admins only (like operations on users other than yourself, or setting admin)
return false, nil
// Service operations aren't exposed to users
case enum.ResourceTypeService:
return false, nil
default:
return false, nil
}
// ephemeral membership overrides any other space memberships of the principal
if membershipMetadata, ok := session.Metadata.(*auth.MembershipMetadata); ok {
return a.checkWithMembershipMetadata(ctx, membershipMetadata, spacePath, permission)
}
// accessPermissionMetadata contains the access permissions of per space
if accessPermissionMetadata, ok := session.Metadata.(*auth.AccessPermissionMetadata); ok {
return a.checkWithAccessPermissionMetadata(ctx, accessPermissionMetadata, spacePath, permission)
}
// ensure we aren't bypassing unknown metadata with impact on authorization
if session.Metadata != nil && session.Metadata.ImpactsAuthorization() {
return false, fmt.Errorf("session contains unknown metadata that impacts authorization: %T", session.Metadata)
}
return a.permissionCache.Get(
ctx, PermissionCacheKey{
PrincipalID: session.Principal.ID,
SpaceRef: spacePath,
Permission: permission,
},
)
}
func (a *MembershipAuthorizer) CheckAll(
ctx context.Context, session *auth.Session,
permissionChecks ...types.PermissionCheck,
) (bool, error) {
for i := range permissionChecks {
p := permissionChecks[i]
check, err := a.Check(ctx, session, &p.Scope, &p.Resource, p.Permission)
if err != nil || !check {
return false, err
}
}
return true, nil
}
// checkWithMembershipMetadata checks access using the ephemeral membership provided in the metadata.
func (a *MembershipAuthorizer) checkWithMembershipMetadata(
ctx context.Context,
membershipMetadata *auth.MembershipMetadata,
requestedSpacePath string,
requestedPermission enum.Permission,
) (bool, error) {
space, err := a.spaceFinder.FindByID(ctx, membershipMetadata.SpaceID)
if err != nil {
return false, fmt.Errorf("failed to find space: %w", err)
}
if !paths.IsAncesterOf(space.Path, requestedSpacePath) {
return false, fmt.Errorf(
"requested permission scope '%s' is outside of ephemeral membership scope '%s'",
requestedSpacePath,
space.Path,
)
}
if !roleHasPermission(membershipMetadata.Role, requestedPermission) {
return false, fmt.Errorf(
"requested permission '%s' is outside of ephemeral membership role '%s'",
requestedPermission,
membershipMetadata.Role,
)
}
// access is granted by ephemeral membership
return true, nil
}
// checkWithAccessPermissionMetadata checks access using the ephemeral membership provided in the metadata.
func (a *MembershipAuthorizer) checkWithAccessPermissionMetadata(
ctx context.Context,
accessPermissionMetadata *auth.AccessPermissionMetadata,
requestedSpacePath string,
requestedPermission enum.Permission,
) (bool, error) {
space, err := a.spaceFinder.FindByRef(ctx, requestedSpacePath)
if err != nil {
return false, fmt.Errorf("failed to find space by ref: %w", err)
}
if accessPermissionMetadata.AccessPermissions.Permissions == nil {
return false, fmt.Errorf("no %s permission provided", requestedPermission)
}
for _, accessPermission := range accessPermissionMetadata.AccessPermissions.Permissions {
if space.ID == accessPermission.SpaceID && slices.Contains(accessPermission.Permissions, requestedPermission) {
return true, nil
}
}
return false, fmt.Errorf("no %s permission provided", requestedPermission)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/auth/authz/authz.go | app/auth/authz/authz.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authz
import (
"context"
"errors"
"github.com/harness/gitness/app/auth"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
var (
// ErrNoPermissionCheckProvided is error that is thrown if no permission checks are provided.
ErrNoPermissionCheckProvided = errors.New("no permission checks provided")
)
// Authorizer abstraction of an entity responsible for authorizing access to resources.
type Authorizer interface {
/*
* Checks whether the principal of the current session with the provided metadata
* has the permission to execute the action on the resource within the scope.
* Returns
* (true, nil) - the action is permitted
* (false, nil) - the action is not permitted
* (false, err) - an error occurred while performing the permission check and the action should be denied
*/
Check(ctx context.Context,
session *auth.Session,
scope *types.Scope,
resource *types.Resource,
permission enum.Permission) (bool, error)
/*
* Checks whether the principal of the current session with the provided metadata
* has the permission to execute ALL the action on the resource within the scope.
* Returns
* (true, nil) - all requested actions are permitted
* (false, nil) - at least one requested action is not permitted
* (false, err) - an error occurred while performing the permission check and all actions should be denied
*/
CheckAll(ctx context.Context,
session *auth.Session,
permissionChecks ...types.PermissionCheck) (bool, error)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/auth/authz/public_access.go | app/auth/authz/public_access.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authz
import (
"context"
"fmt"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/app/services/publicaccess"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
// CheckPublicAccess checks if the requested permission is public for the provided scope and resource.
func CheckPublicAccess(
ctx context.Context,
publicAccess publicaccess.Service,
scope *types.Scope,
resource *types.Resource,
permission enum.Permission,
) (bool, error) {
var pubResType enum.PublicResourceType
var pubResPath string
//nolint:exhaustive
switch resource.Type {
case enum.ResourceTypeSpace:
pubResType = enum.PublicResourceTypeSpace
pubResPath = paths.Concatenate(scope.SpacePath, resource.Identifier)
case enum.ResourceTypeRepo:
if resource.Identifier != "" {
pubResType = enum.PublicResourceTypeRepo
pubResPath = paths.Concatenate(scope.SpacePath, resource.Identifier)
} else { // for spaceScope checks
pubResType = enum.PublicResourceTypeSpace
pubResPath = scope.SpacePath
}
case enum.ResourceTypePipeline:
pubResType = enum.PublicResourceTypeRepo
pubResPath = paths.Concatenate(scope.SpacePath, scope.Repo)
case enum.ResourceTypeRegistry:
if resource.Identifier != "" {
pubResType = enum.PublicResourceTypeRegistry
pubResPath = paths.Concatenate(scope.SpacePath, resource.Identifier)
} else { // for spaceScope checks
pubResType = enum.PublicResourceTypeSpace
pubResPath = scope.SpacePath
}
default:
return false, nil
}
// only view permissions allowed for public access.
if pubResType == enum.PublicResourceTypeRepo &&
(permission != enum.PermissionRepoView &&
permission != enum.PermissionPipelineView) {
return false, nil
}
if pubResType == enum.PublicResourceTypeSpace &&
permission != enum.PermissionSpaceView {
return false, nil
}
if pubResType == enum.PublicResourceTypeRegistry &&
permission != enum.PermissionRegistryView &&
permission != enum.PermissionArtifactsDownload {
return false, nil
}
resourceIsPublic, err := publicAccess.Get(ctx, pubResType, pubResPath)
if err != nil {
return false, fmt.Errorf("failed to check public accessabillity of %s %q: %w", pubResType, pubResPath, err)
}
return resourceIsPublic, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/auth/authn/wire.go | app/auth/authn/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authn
import (
"crypto/rand"
"fmt"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/types"
"github.com/google/wire"
"github.com/rs/zerolog/log"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideAuthenticator,
)
func ProvideAuthenticator(
config *types.Config,
principalStore store.PrincipalStore,
tokenStore store.TokenStore,
) Authenticator {
if config.Auth.AnonymousUserSecret == "" {
var secretBytes [32]byte
if _, err := rand.Read(secretBytes[:]); err != nil {
panic(fmt.Sprintf("could not generate random bytes for anonymous user secret: %v", err))
}
config.Auth.AnonymousUserSecret = string(secretBytes[:])
log.Warn().Msg("No anonymous secret provided - generated random secret.")
}
return NewTokenAuthenticator(principalStore, tokenStore, config.Token.CookieName, config.Auth.AnonymousUserSecret)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/auth/authn/authenticator.go | app/auth/authn/authenticator.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authn
import (
"errors"
"net/http"
"github.com/harness/gitness/app/auth"
)
var (
// ErrNoAuthData that is returned if the authorizer doesn't find any data in the request that can be used for auth.
ErrNoAuthData = errors.New("the request doesn't contain any auth data that can be used by the Authorizer")
)
// Authenticator is an abstraction of an entity that's responsible for authenticating principals
// that are making calls via HTTP.
type Authenticator interface {
/*
* Tries to authenticate the acting principal if credentials are available.
* Returns:
* (session, nil) - request contains auth data and principal was verified
* (nil, ErrNoAuthData) - request doesn't contain any auth data
* (nil, err) - request contains auth data but verification failed
*/
Authenticate(r *http.Request) (*auth.Session, error)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/auth/authn/jwt.go | app/auth/authn/jwt.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authn
import (
"context"
"errors"
"fmt"
"net/http"
"strings"
"github.com/harness/gitness/app/api/request"
"github.com/harness/gitness/app/auth"
"github.com/harness/gitness/app/jwt"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/types"
gojwt "github.com/golang-jwt/jwt/v5"
)
const (
headerTokenPrefixBearer = "Bearer "
//nolint:gosec // wrong flagging
HeaderTokenPrefixRemoteAuth = "RemoteAuth "
)
var _ Authenticator = (*JWTAuthenticator)(nil)
// JWTAuthenticator uses the provided JWT to authenticate the caller.
type JWTAuthenticator struct {
cookieName string
principalStore store.PrincipalStore
tokenStore store.TokenStore
anonymousUserSecret string
}
func NewTokenAuthenticator(
principalStore store.PrincipalStore,
tokenStore store.TokenStore,
cookieName string,
anonymousUserSecret string,
) *JWTAuthenticator {
return &JWTAuthenticator{
cookieName: cookieName,
principalStore: principalStore,
tokenStore: tokenStore,
anonymousUserSecret: anonymousUserSecret,
}
}
func (a *JWTAuthenticator) Authenticate(r *http.Request) (*auth.Session, error) {
ctx := r.Context()
str := extractToken(r, a.cookieName)
if len(str) == 0 {
return nil, ErrNoAuthData
}
// First, parse claims just to get the principal ID (minimal parsing)
claims := &jwt.Claims{}
token, _, err := new(gojwt.Parser).ParseUnverified(str, claims)
if err != nil {
return nil, fmt.Errorf("failed to parse token format: %w", err)
}
// Check if it's the expected token format before proceeding
if _, ok := token.Method.(*gojwt.SigningMethodHMAC); !ok {
return nil, errors.New("invalid signature method for JWT")
}
// Fetch the principal
var principal *types.Principal
if claims.PrincipalID == -1 {
principal = &types.Principal{
ID: auth.AnonymousPrincipal.ID,
UID: auth.AnonymousPrincipal.UID,
Salt: a.anonymousUserSecret,
}
} else {
principal, err = a.principalStore.Find(ctx, claims.PrincipalID)
if err != nil {
return nil, fmt.Errorf("failed to get principal for token: %w", err)
}
}
// Support for multiple secrets (comma-separated)
saltValues := strings.Split(principal.Salt, ",")
var lastErr error
// Try each salt value until one works
for _, salt := range saltValues {
salt = strings.TrimSpace(salt)
// Parse with this salt
verifiedClaims := &jwt.Claims{}
parsedToken, err := gojwt.ParseWithClaims(
str,
verifiedClaims,
func(_ *gojwt.Token) (any, error) {
return []byte(salt), nil
},
)
if err == nil && parsedToken.Valid {
// Use the helper function to create the session
return createSessionFromClaims(ctx, a, principal, verifiedClaims)
}
lastErr = err
}
// All verification attempts failed
if lastErr != nil {
return nil, fmt.Errorf("JWT verification failed: %w", lastErr)
}
return nil, errors.New("JWT verification failed with all provided salts")
}
func (a *JWTAuthenticator) metadataFromTokenClaims(
ctx context.Context,
principal *types.Principal,
tknClaims *jwt.SubClaimsToken,
) (auth.Metadata, error) {
// ensure tkn exists
tkn, err := a.tokenStore.Find(ctx, tknClaims.ID)
if err != nil {
return nil, fmt.Errorf("failed to find token in db: %w", err)
}
// protect against faked JWTs for other principals in case of single salt leak
if principal.ID != tkn.PrincipalID {
return nil, fmt.Errorf(
"JWT was for principal %d while db token was for principal %d",
principal.ID, tkn.PrincipalID,
)
}
return &auth.TokenMetadata{
TokenType: tkn.Type,
TokenID: tkn.ID,
}, nil
}
func (a *JWTAuthenticator) metadataFromMembershipClaims(
mbsClaims *jwt.SubClaimsMembership,
) auth.Metadata {
// We could check if space exists - but also okay to fail later (saves db call)
return &auth.MembershipMetadata{
SpaceID: mbsClaims.SpaceID,
Role: mbsClaims.Role,
}
}
func (a *JWTAuthenticator) metadataFromAccessPermissions(
s *jwt.SubClaimsAccessPermissions,
) auth.Metadata {
return &auth.AccessPermissionMetadata{
AccessPermissions: s,
}
}
func extractToken(r *http.Request, cookieName string) string {
// Check query param first (as that's most immediately visible to caller)
if queryToken, ok := request.GetAccessTokenFromQuery(r); ok {
return queryToken
}
// check authorization header next
headerToken := r.Header.Get(request.HeaderAuthorization)
switch {
// in case of git push / pull it would be basic auth and token is in password
case strings.HasPrefix(headerToken, "Basic "):
// return pwd either way - if it's invalid pwd is empty string which we'd return anyway
_, pwd, _ := r.BasicAuth()
return pwd
// strip bearer prefix if present
case strings.HasPrefix(headerToken, headerTokenPrefixBearer):
return headerToken[len(headerTokenPrefixBearer):]
// for ssh git-lfs-authenticate the returned token prefix would be RemoteAuth of type JWT
case strings.HasPrefix(headerToken, HeaderTokenPrefixRemoteAuth):
return headerToken[len(HeaderTokenPrefixRemoteAuth):]
// otherwise use value as is
case headerToken != "":
return headerToken
}
// check cookies last (as that's least visible to caller)
if cookieToken, ok := request.GetTokenFromCookie(r, cookieName); ok {
return cookieToken
}
// no token found
return ""
}
// createSessionFromClaims creates an auth session from verified JWT claims.
func createSessionFromClaims(
ctx context.Context,
a *JWTAuthenticator,
principal *types.Principal,
claims *jwt.Claims,
) (*auth.Session, error) {
var metadata auth.Metadata
switch {
case claims.Token != nil:
tokenMetadata, err := a.metadataFromTokenClaims(ctx, principal, claims.Token)
if err != nil {
return nil, fmt.Errorf("failed to get metadata from token claims: %w", err)
}
return &auth.Session{
Principal: *principal,
Metadata: tokenMetadata,
}, nil
case claims.Membership != nil:
metadata = a.metadataFromMembershipClaims(claims.Membership)
case claims.AccessPermissions != nil:
metadata = a.metadataFromAccessPermissions(claims.AccessPermissions)
default:
return nil, fmt.Errorf("jwt is missing sub-claims")
}
return &auth.Session{
Principal: *principal,
Metadata: metadata,
}, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/wire.go | app/store/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvidePathTransformation,
ProvidePrincipalUIDTransformation,
)
func ProvidePathTransformation() SpacePathTransformation {
return ToLowerSpacePathTransformation
}
func ProvidePrincipalUIDTransformation() PrincipalUIDTransformation {
return ToLowerPrincipalUIDTransformation
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/cache.go | app/store/cache.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"github.com/harness/gitness/cache"
"github.com/harness/gitness/types"
)
type (
// PrincipalInfoCache caches principal IDs to principal info.
PrincipalInfoCache cache.ExtendedCache[int64, *types.PrincipalInfo]
// SpaceIDCache holds the immutable part of Space objects fetched by space ID.
SpaceIDCache cache.Cache[int64, *types.SpaceCore]
// SpacePathCache caches a raw path to a space path.
SpacePathCache cache.Cache[string, *types.SpacePath]
// RepoIDCache holds Repository objects fetched by their ID.
RepoIDCache cache.Cache[int64, *types.RepositoryCore]
// RepoRefCache holds repository IDs fetched by spaceID and repository identifier.
RepoRefCache cache.Cache[types.RepoCacheKey, int64]
// InfraProviderResourceCache caches infraprovider resourceIDs to infraprovider resource.
InfraProviderResourceCache cache.ExtendedCache[int64, *types.InfraProviderResource]
)
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database.go | app/store/database.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package store defines the data storage interfaces.
package store
import (
"context"
"encoding/json"
"time"
"github.com/harness/gitness/git/sha"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
type (
// PrincipalStore defines the principal data storage.
PrincipalStore interface {
/*
* PRINCIPAL RELATED OPERATIONS.
*/
// Find finds the principal by id.
Find(ctx context.Context, id int64) (*types.Principal, error)
// FindByUID finds the principal by uid.
FindByUID(ctx context.Context, uid string) (*types.Principal, error)
// FindManyByUID returns all principals found for the provided UIDs.
// If a UID isn't found, it's not returned in the list.
FindManyByUID(ctx context.Context, uids []string) ([]*types.Principal, error)
// FindByEmail finds the principal by email.
FindByEmail(ctx context.Context, email string) (*types.Principal, error)
// FindManyByEmail finds all principals for the provided emails.
FindManyByEmail(ctx context.Context, emails []string) ([]*types.Principal, error)
/*
* USER RELATED OPERATIONS.
*/
// FindUser finds the user by id.
FindUser(ctx context.Context, id int64) (*types.User, error)
// List lists the principals matching the provided filter.
List(ctx context.Context, fetchQuery *types.PrincipalFilter) ([]*types.Principal, error)
// FindUserByUID finds the user by uid.
FindUserByUID(ctx context.Context, uid string) (*types.User, error)
// FindUserByEmail finds the user by email.
FindUserByEmail(ctx context.Context, email string) (*types.User, error)
// CreateUser saves the user details.
CreateUser(ctx context.Context, user *types.User) error
// UpdateUser updates an existing user.
UpdateUser(ctx context.Context, user *types.User) error
// DeleteUser deletes the user.
DeleteUser(ctx context.Context, id int64) error
// ListUsers returns a list of users.
ListUsers(ctx context.Context, params *types.UserFilter) ([]*types.User, error)
// CountUsers returns a count of users which match the given filter.
CountUsers(ctx context.Context, opts *types.UserFilter) (int64, error)
/*
* SERVICE ACCOUNT RELATED OPERATIONS.
*/
// FindServiceAccount finds the service account by id.
FindServiceAccount(ctx context.Context, id int64) (*types.ServiceAccount, error)
// FindServiceAccountByUID finds the service account by uid.
FindServiceAccountByUID(ctx context.Context, uid string) (*types.ServiceAccount, error)
FindManyServiceAccountByUID(ctx context.Context, uid []string) ([]*types.ServiceAccount, error)
// CreateServiceAccount saves the service account.
CreateServiceAccount(ctx context.Context, sa *types.ServiceAccount) error
// UpdateServiceAccount updates the service account details.
UpdateServiceAccount(ctx context.Context, sa *types.ServiceAccount) error
// DeleteServiceAccount deletes the service account.
DeleteServiceAccount(ctx context.Context, id int64) error
// ListServiceAccounts returns a list of service accounts for a specific parent.
ListServiceAccounts(
ctx context.Context,
parentInfos []*types.ServiceAccountParentInfo,
opts *types.PrincipalFilter,
) ([]*types.ServiceAccount, error)
// CountServiceAccounts returns a count of service accounts for a specific parent.
CountServiceAccounts(
ctx context.Context,
parentInfos []*types.ServiceAccountParentInfo,
opts *types.PrincipalFilter,
) (int64, error)
/*
* SERVICE RELATED OPERATIONS.
*/
// FindService finds the service by id.
FindService(ctx context.Context, id int64) (*types.Service, error)
// FindServiceByUID finds the service by uid.
FindServiceByUID(ctx context.Context, uid string) (*types.Service, error)
// CreateService saves the service.
CreateService(ctx context.Context, sa *types.Service) error
// UpdateService updates the service.
UpdateService(ctx context.Context, sa *types.Service) error
// DeleteService deletes the service.
DeleteService(ctx context.Context, id int64) error
// ListServices returns a list of service for a specific parent.
ListServices(ctx context.Context) ([]*types.Service, error)
// CountServices returns a count of service for a specific parent.
CountServices(ctx context.Context) (int64, error)
}
// PrincipalInfoView defines helper utility for fetching types.PrincipalInfo objects.
// It uses the same underlying data storage as PrincipalStore.
PrincipalInfoView interface {
Find(ctx context.Context, id int64) (*types.PrincipalInfo, error)
FindMany(ctx context.Context, ids []int64) ([]*types.PrincipalInfo, error)
}
// InfraProviderResourceView defines helper utility for fetching types.InfraProviderResource objects.
// It uses the same underlying data storage as InfraProviderResourceStore.
InfraProviderResourceView interface {
Find(ctx context.Context, id int64) (*types.InfraProviderResource, error)
FindMany(ctx context.Context, ids []int64) ([]*types.InfraProviderResource, error)
}
// SpacePathStore defines the path data storage for spaces.
SpacePathStore interface {
// InsertSegment inserts a space path segment to the table.
InsertSegment(ctx context.Context, segment *types.SpacePathSegment) error
// FindPrimaryBySpaceID finds the primary path of a space given its ID.
FindPrimaryBySpaceID(ctx context.Context, spaceID int64) (*types.SpacePath, error)
// FindByPath returns the space path for a given raw path.
FindByPath(ctx context.Context, path string) (*types.SpacePath, error)
// DeletePrimarySegment deletes the primary segment of a space.
DeletePrimarySegment(ctx context.Context, spaceID int64) error
// DeletePathsAndDescendandPaths deletes all space paths reachable from spaceID including itself.
DeletePathsAndDescendandPaths(ctx context.Context, spaceID int64) error
}
// SpaceStore defines the space data storage.
SpaceStore interface {
// Find the space by id.
Find(ctx context.Context, id int64) (*types.Space, error)
// FindByIDs finds all spaces with specified ids.
FindByIDs(ctx context.Context, ids ...int64) ([]*types.Space, error)
// FindByRef finds the space using the spaceRef as either the id or the space path.
FindByRef(ctx context.Context, spaceRef string) (*types.Space, error)
// FindByRefCaseInsensitive finds the space using the spaceRef.
FindByRefCaseInsensitive(ctx context.Context, spaceRef string) (int64, error)
// FindByRefAndDeletedAt finds the space using the spaceRef and deleted timestamp.
FindByRefAndDeletedAt(ctx context.Context, spaceRef string, deletedAt int64) (*types.Space, error)
// GetRootSpace returns a space where space_parent_id is NULL.
GetRootSpace(ctx context.Context, spaceID int64) (*types.Space, error)
// GetRootSpaces returns all spaces where space_parent_id is NULL.
GetAllRootSpaces(ctx context.Context, opts *types.SpaceFilter) ([]*types.Space, error)
// GetAncestorIDs returns a list of all space IDs along the recursive path to the root space.
// NB: it returns also the spaceID itself in the []int64 slice.
GetAncestorIDs(ctx context.Context, spaceID int64) ([]int64, error)
// GetTreeLevel returns the level of a space in a space tree.
GetTreeLevel(ctx context.Context, spaceID int64) (int64, error)
// GetAncestors returns a list of all spaces along the recursive path to the root space.
GetAncestors(ctx context.Context, spaceID int64) ([]*types.Space, error)
// GetAncestorsData returns a list of space parent data for spaces that are ancestors of the space.
GetAncestorsData(ctx context.Context, spaceID int64) ([]types.SpaceParentData, error)
// GetDescendantsData returns a list of space parent data for spaces that are descendants of the space.
GetDescendantsData(ctx context.Context, spaceID int64) ([]types.SpaceParentData, error)
// GetDescendantsIDs returns a list of space ids for spaces that are descendants of the specified space.
GetDescendantsIDs(ctx context.Context, spaceID int64) ([]int64, error)
// Create creates a new space
Create(ctx context.Context, space *types.Space) error
// Update updates the space details.
Update(ctx context.Context, space *types.Space) error
// UpdateOptLock updates the space using the optimistic locking mechanism.
UpdateOptLock(
ctx context.Context, space *types.Space,
mutateFn func(space *types.Space) error,
) (*types.Space, error)
// FindForUpdate finds the space and locks it for an update.
FindForUpdate(ctx context.Context, id int64) (*types.Space, error)
// SoftDelete deletes the space.
SoftDelete(ctx context.Context, space *types.Space, deletedAt int64) error
// Purge deletes a space permanently.
Purge(ctx context.Context, id int64, deletedAt *int64) error
// Restore restores a soft deleted space.
Restore(
ctx context.Context, space *types.Space,
newIdentifier *string, newParentID *int64,
) (*types.Space, error)
// Count the child spaces of a space.
Count(ctx context.Context, id int64, opts *types.SpaceFilter) (int64, error)
// List returns a list of child spaces in a space.
List(ctx context.Context, id int64, opts *types.SpaceFilter) ([]*types.Space, error)
// GetRootSpacesSize returns the size of the root spaces
GetRootSpacesSize(ctx context.Context) ([]types.SpaceStorage, error)
}
// RepoStore defines the repository data storage.
RepoStore interface {
// Find the repo by id.
Find(ctx context.Context, id int64) (*types.Repository, error)
// FindDeleted the deleted repo by id.
FindDeleted(ctx context.Context, id int64, deleted *int64) (*types.Repository, error)
// FindActiveByUID finds a non-deleted repo by UID.
FindActiveByUID(ctx context.Context, parentSpaceID int64, uid string) (*types.Repository, error)
// FindDeletedByUID finds a deleted repo by UID.
FindDeletedByUID(
ctx context.Context,
parentSpaceID int64,
uid string,
deletedAt int64,
) (*types.Repository, error)
// Create a new repo.
Create(ctx context.Context, repo *types.Repository) error
// Update the repo details.
Update(ctx context.Context, repo *types.Repository) error
// UpdateSize updates the size of a specific repository in the database (size is in KiB).
UpdateSize(ctx context.Context, id int64, sizeInKiB, sizeLFSInKiB int64) error
// GetSize returns the repo size.
GetSize(ctx context.Context, id int64) (int64, error)
// GetLFSSize returns LFS size.
GetLFSSize(ctx context.Context, id int64) (int64, error)
// UpdateOptLock the repo details using the optimistic locking mechanism.
UpdateOptLock(
ctx context.Context, repo *types.Repository,
mutateFn func(repository *types.Repository) error,
) (*types.Repository, error)
// SoftDelete a repo.
SoftDelete(ctx context.Context, repo *types.Repository, deletedAt int64) error
// Purge the soft deleted repo permanently.
Purge(ctx context.Context, id int64, deletedAt *int64) error
// Restore a deleted repo using the optimistic locking mechanism.
Restore(
ctx context.Context, repo *types.Repository,
newIdentifier *string, newParentID *int64,
) (*types.Repository, error)
// Count of active repos in a space. With "DeletedBeforeOrAt" filter, counts deleted repos.
Count(ctx context.Context, parentID int64, opts *types.RepoFilter) (int64, error)
CountByRootSpaces(ctx context.Context) ([]types.RepositoryCount, error)
// List returns a list of repos in a space. With "DeletedBeforeOrAt" filter, lists deleted repos.
List(ctx context.Context, parentID int64, opts *types.RepoFilter) ([]*types.Repository, error)
// ListAll returns a list of all repos across spaces with the provided filters.
ListAll(ctx context.Context, filter *types.RepoFilter) ([]*types.Repository, error)
// ListSizeInfos returns a list of all active repo sizes.
ListSizeInfos(ctx context.Context) ([]*types.RepositorySizeInfo, error)
// UpdateNumForks increases or decreases number of forks of the repository.
UpdateNumForks(ctx context.Context, repoID int64, delta int64) error
// ClearForkID clears fork ID of all repositories that have this fork ID.
ClearForkID(ctx context.Context, repoUpstreamID int64) error
// UpdateParent updates parent_id for all repos with currentParentID to newParentID.
UpdateParent(ctx context.Context, currentParentID, newParentID int64) (int64, error)
}
LinkedRepoStore interface {
Find(ctx context.Context, repoID int64) (*types.LinkedRepo, error)
Create(ctx context.Context, v *types.LinkedRepo) error
Update(ctx context.Context, linked *types.LinkedRepo) error
UpdateOptLock(
ctx context.Context,
r *types.LinkedRepo,
mutateFn func(*types.LinkedRepo) error,
) (*types.LinkedRepo, error)
List(ctx context.Context, limit int) ([]types.LinkedRepo, error)
}
// SettingsStore defines the settings storage.
SettingsStore interface {
// Find returns the value of the setting with the given key for the provided scope.
Find(
ctx context.Context,
scope enum.SettingsScope,
scopeID int64,
key string,
) (json.RawMessage, error)
// FindMany returns the values of the settings with the given keys for the provided scope.
// NOTE: if a setting key doesn't exist the map just won't contain an entry for it (no error returned).
FindMany(
ctx context.Context,
scope enum.SettingsScope,
scopeID int64,
keys ...string,
) (map[string]json.RawMessage, error)
// Upsert upserts the value of the setting with the given key for the provided scope.
Upsert(
ctx context.Context,
scope enum.SettingsScope,
scopeID int64,
key string,
value json.RawMessage,
) error
}
// MembershipStore defines the membership data storage.
MembershipStore interface {
Find(ctx context.Context, key types.MembershipKey) (*types.Membership, error)
FindUser(ctx context.Context, key types.MembershipKey) (*types.MembershipUser, error)
Create(ctx context.Context, membership *types.Membership) error
Update(ctx context.Context, membership *types.Membership) error
Delete(ctx context.Context, key types.MembershipKey) error
CountUsers(ctx context.Context, spaceID int64, filter types.MembershipUserFilter) (int64, error)
ListUsers(ctx context.Context, spaceID int64, filter types.MembershipUserFilter) ([]types.MembershipUser, error)
CountSpaces(ctx context.Context, userID int64, filter types.MembershipSpaceFilter) (int64, error)
ListSpaces(
ctx context.Context,
userID int64,
filter types.MembershipSpaceFilter,
) ([]types.MembershipSpace, error)
}
// PublicAccessStore defines the publicly accessible resources data storage.
PublicAccessStore interface {
Find(ctx context.Context, typ enum.PublicResourceType, id int64) (bool, error)
Create(ctx context.Context, typ enum.PublicResourceType, id int64) error
Delete(ctx context.Context, typ enum.PublicResourceType, id int64) error
}
// TokenStore defines the token data storage.
TokenStore interface {
// Find finds the token by id
Find(ctx context.Context, id int64) (*types.Token, error)
// FindByIdentifier finds the token by principalId and token identifier.
FindByIdentifier(ctx context.Context, principalID int64, identifier string) (*types.Token, error)
// Create saves the token details.
Create(ctx context.Context, token *types.Token) error
// Delete deletes the token with the given id.
Delete(ctx context.Context, id int64) error
// DeleteExpiredBefore deletes all tokens that expired before the provided time.
// If tokenTypes are provided, then only tokens of that type are deleted.
DeleteExpiredBefore(ctx context.Context, before time.Time, tknTypes []enum.TokenType) (int64, error)
// List returns a list of tokens of a specific type for a specific principal.
List(ctx context.Context, principalID int64, tokenType enum.TokenType) ([]*types.Token, error)
// Count returns a count of tokens of a specifc type for a specific principal.
Count(ctx context.Context, principalID int64, tokenType enum.TokenType) (int64, error)
}
// PullReqStore defines the pull request data storage.
PullReqStore interface {
// Find the pull request by id.
Find(ctx context.Context, id int64) (*types.PullReq, error)
// FindByNumberWithLock finds the pull request by repo ID and the pull request number
// and acquires an exclusive lock of the pull request database row for the duration of the transaction.
FindByNumberWithLock(ctx context.Context, repoID, number int64) (*types.PullReq, error)
// FindByNumber finds the pull request by repo ID and the pull request number.
FindByNumber(ctx context.Context, repoID, number int64) (*types.PullReq, error)
// Create a new pull request.
Create(ctx context.Context, pullreq *types.PullReq) error
// Update the pull request. It will set new values to the Version and Updated fields.
Update(ctx context.Context, pr *types.PullReq) error
// UpdateOptLock the pull request details using the optimistic locking mechanism.
UpdateOptLock(
ctx context.Context, pr *types.PullReq,
mutateFn func(pr *types.PullReq) error,
) (*types.PullReq, error)
// UpdateMergeCheckMetadataOptLock updates only the pull request's merge check metadata
// using the optimistic locking mechanism without updating the updated field.
UpdateMergeCheckMetadataOptLock(
ctx context.Context, pr *types.PullReq,
mutateFn func(pr *types.PullReq) error,
) (*types.PullReq, error)
// UpdateActivitySeq the pull request's activity sequence number.
// It will set new values to the ActivitySeq, Version and Updated fields.
UpdateActivitySeq(ctx context.Context, pr *types.PullReq) (*types.PullReq, error)
// ResetMergeCheckStatus resets the pull request's mergeability status to unchecked
// for all prs with target branch pointing to targetBranch.
ResetMergeCheckStatus(ctx context.Context, targetRepo int64, targetBranch string) error
// Delete the pull request.
Delete(ctx context.Context, id int64) error
// Count of pull requests in a space.
Count(ctx context.Context, opts *types.PullReqFilter) (int64, error)
// List returns a list of pull requests in a repository.
List(ctx context.Context, opts *types.PullReqFilter) ([]*types.PullReq, error)
// Stream returns streams pull requests from repositories.
Stream(ctx context.Context, opts *types.PullReqFilter) (<-chan *types.PullReq, <-chan error)
// ListOpenByBranchName returns open pull requests for each branch.
ListOpenByBranchName(
ctx context.Context,
repoID int64,
branchNames []string,
) (map[string][]*types.PullReq, error)
}
PullReqActivityStore interface {
// Find the pull request activity by id.
Find(ctx context.Context, id int64) (*types.PullReqActivity, error)
// Create a new pull request activity. Value of the Order field should be fetched with UpdateActivitySeq.
// Value of the SubOrder field (for replies) should be the incremented ReplySeq field (non-replies have 0).
Create(ctx context.Context, act *types.PullReqActivity) error
// CreateWithPayload create a new system activity from the provided payload.
CreateWithPayload(
ctx context.Context,
pr *types.PullReq,
principalID int64,
payload types.PullReqActivityPayload,
metadata *types.PullReqActivityMetadata,
) (*types.PullReqActivity, error)
// Update the pull request activity. It will set new values to the Version and Updated fields.
Update(ctx context.Context, act *types.PullReqActivity) error
// UpdateOptLock updates the pull request activity using the optimistic locking mechanism.
UpdateOptLock(
ctx context.Context,
act *types.PullReqActivity,
mutateFn func(act *types.PullReqActivity) error,
) (*types.PullReqActivity, error)
// Count returns number of pull request activities in a pull request.
Count(ctx context.Context, prID int64, opts *types.PullReqActivityFilter) (int64, error)
// CountUnresolved returns number of unresolved comments.
CountUnresolved(ctx context.Context, prID int64) (int, error)
// List returns a list of pull request activities in a pull request (a timeline).
List(ctx context.Context, prID int64, opts *types.PullReqActivityFilter) ([]*types.PullReqActivity, error)
// ListAuthorIDs returns a list of pull request activity author ids in a thread (order).
ListAuthorIDs(ctx context.Context, prID int64, order int64) ([]int64, error)
}
// CodeCommentView is to manipulate only code-comment subset of PullReqActivity.
// It's used by internal service that migrates code comment line numbers after new commits.
CodeCommentView interface {
// ListNotAtSourceSHA loads code comments that need to be updated after a new commit.
// Resulting list is ordered by the file name and the relevant line number.
ListNotAtSourceSHA(ctx context.Context, prID int64, sourceSHA string) ([]*types.CodeComment, error)
// ListNotAtMergeBaseSHA loads code comments that need to be updated after merge base update.
// Resulting list is ordered by the file name and the relevant line number.
ListNotAtMergeBaseSHA(ctx context.Context, prID int64, targetSHA string) ([]*types.CodeComment, error)
// UpdateAll updates code comments (pull request activity of types code-comment).
// entities coming from the input channel.
UpdateAll(ctx context.Context, codeComments []*types.CodeComment) error
}
// PullReqReviewStore defines the pull request review storage.
PullReqReviewStore interface {
// Find returns the pull request review entity or an error if it doesn't exist.
Find(ctx context.Context, id int64) (*types.PullReqReview, error)
// Create creates a new pull request review.
Create(ctx context.Context, v *types.PullReqReview) error
}
PullReqReviewerStore interface {
// Find returns the pull request reviewer or an error if it doesn't exist.
Find(ctx context.Context, prID, principalID int64) (*types.PullReqReviewer, error)
// Create creates the new pull request reviewer.
Create(ctx context.Context, v *types.PullReqReviewer) error
// Update updates the pull request reviewer.
Update(ctx context.Context, v *types.PullReqReviewer) error
// Delete the Pull request reviewer
Delete(ctx context.Context, prID, principalID int64) error
// List returns all pull request reviewers for the pull request.
List(ctx context.Context, prID int64) ([]*types.PullReqReviewer, error)
}
// UserGroupReviewerStore defines the pull request usergroup reviewer storage.
UserGroupReviewerStore interface {
Create(ctx context.Context, v *types.UserGroupReviewer) error
Delete(ctx context.Context, prID, principalID int64) error
List(ctx context.Context, prID int64) ([]*types.UserGroupReviewer, error)
Find(
ctx context.Context,
prID,
userGroupReviewerID int64,
) (*types.UserGroupReviewer, error)
}
// PullReqFileViewStore stores information about what file a user viewed.
PullReqFileViewStore interface {
// Upsert inserts or updates the latest viewed sha for a file in a PR.
Upsert(ctx context.Context, fileView *types.PullReqFileView) error
// DeleteByFileForPrincipal deletes the entry for the specified PR, principal, and file.
DeleteByFileForPrincipal(ctx context.Context, prID int64, principalID int64, filePath string) error
// MarkObsolete updates all entries of the files as obsolete for the PR.
MarkObsolete(ctx context.Context, prID int64, filePaths []string) error
// List lists all files marked as viewed by the user for the specified PR.
List(ctx context.Context, prID int64, principalID int64) ([]*types.PullReqFileView, error)
}
// RuleStore defines database interface for protection rules.
RuleStore interface {
// Find finds a protection rule by ID.
Find(ctx context.Context, id int64) (*types.Rule, error)
// FindByIdentifier finds a protection rule by parent ID and identifier.
FindByIdentifier(
ctx context.Context,
parentType enum.RuleParent,
parentID int64,
identifier string,
) (*types.Rule, error)
// Create inserts a new protection rule.
Create(ctx context.Context, rule *types.Rule) error
// Update updates an existing protection rule.
Update(ctx context.Context, rule *types.Rule) error
// Delete removes a protection rule by its ID.
Delete(ctx context.Context, id int64) error
// Count returns count of protection rules of a repository or a space.
Count(
ctx context.Context,
parents []types.RuleParentInfo,
filter *types.RuleFilter,
) (int64, error)
// List returns a list of protection rules of a repository or a space.
List(
ctx context.Context,
parents []types.RuleParentInfo,
filter *types.RuleFilter,
) ([]types.Rule, error)
// ListAllRepoRules returns a list of all protection rules that can be applied on a repository.
ListAllRepoRules(
ctx context.Context,
repoID int64,
ruleTypes ...enum.RuleType,
) ([]types.RuleInfoInternal, error)
// UpdateParentSpace updates the parent space of rules.
UpdateParentSpace(ctx context.Context, srcParentSpaceID int64, targetParentSpaceID int64) (int64, error)
}
// WebhookStore defines the webhook data storage.
WebhookStore interface {
// Find finds the webhook by id.
Find(ctx context.Context, id int64) (*types.Webhook, error)
// FindByIdentifier finds the webhook with the given Identifier for the given parent.
FindByIdentifier(
ctx context.Context,
parentType enum.WebhookParent,
parentID int64,
identifier string,
) (*types.Webhook, error)
// Create creates a new webhook.
Create(ctx context.Context, hook *types.Webhook) error
// Update updates an existing webhook.
Update(ctx context.Context, hook *types.Webhook) error
// UpdateOptLock updates the webhook using the optimistic locking mechanism.
UpdateOptLock(
ctx context.Context, hook *types.Webhook,
mutateFn func(hook *types.Webhook) error,
) (*types.Webhook, error)
// Delete deletes the webhook for the given id.
Delete(ctx context.Context, id int64) error
// DeleteByIdentifier deletes the webhook with the given identifier for the given parent.
DeleteByIdentifier(ctx context.Context, parentType enum.WebhookParent, parentID int64, identifier string) error
// Count counts the webhooks for a given parent type and id.
Count(
ctx context.Context,
parents []types.WebhookParentInfo,
opts *types.WebhookFilter,
) (int64, error)
// List lists the webhooks for a given parent type and id.
List(
ctx context.Context,
parents []types.WebhookParentInfo,
opts *types.WebhookFilter,
) ([]*types.Webhook, error)
// UpdateParentSpace updates the parent space of webhooks.
UpdateParentSpace(ctx context.Context, srcParentSpaceID int64, targetParentSpaceID int64) (int64, error)
}
// WebhookExecutionStore defines the webhook execution data storage.
WebhookExecutionStore interface {
// Find finds the webhook execution by id.
Find(ctx context.Context, id int64) (*types.WebhookExecution, error)
// Create creates a new webhook execution entry.
Create(ctx context.Context, hook *types.WebhookExecution) error
// DeleteOld removes all executions that are older than the provided time.
DeleteOld(ctx context.Context, olderThan time.Time) (int64, error)
// ListForWebhook lists the webhook executions for a given webhook id.
ListForWebhook(
ctx context.Context, webhookID int64,
opts *types.WebhookExecutionFilter,
) ([]*types.WebhookExecution, error)
CountForWebhook(ctx context.Context, webhookID int64) (int64, error)
// ListForTrigger lists the webhook executions for a given trigger id.
ListForTrigger(ctx context.Context, triggerID string) ([]*types.WebhookExecution, error)
}
CheckStore interface {
// FindByIdentifier returns status check result for given unique key.
FindByIdentifier(ctx context.Context, repoID int64, commitSHA string, identifier string) (types.Check, error)
// Upsert creates new or updates an existing status check result.
Upsert(ctx context.Context, check *types.Check) error
// Count counts status check results for a specific commit in a repo.
Count(ctx context.Context, repoID int64, commitSHA string, opts types.CheckListOptions) (int, error)
// List returns a list of status check results for a specific commit in a repo.
List(ctx context.Context, repoID int64, commitSHA string, opts types.CheckListOptions) ([]types.Check, error)
// ListRecent returns a list of recently executed status checks in a repository.
ListRecent(
ctx context.Context,
repoID int64,
opts types.CheckRecentOptions,
) ([]string, error)
// ListRecentSpace returns a list of recently executed status checks in
// repositories in spaces with specified space IDs.
ListRecentSpace(
ctx context.Context,
spaceIDs []int64,
opts types.CheckRecentOptions,
) ([]string, error)
// ListResults returns a list of status check results for a specific commit in a repo.
ListResults(ctx context.Context, repoID int64, commitSHA string) ([]types.CheckResult, error)
// ResultSummary returns a list of status check result summaries for the provided list of commits in a repo.
ResultSummary(
ctx context.Context,
repoID int64,
commitSHAs []string,
) (map[sha.SHA]types.CheckCountSummary, error)
}
GitspaceConfigStore interface {
// Find returns a gitspace config given a ID from the datastore.
Find(ctx context.Context, id int64, includeDeleted bool) (*types.GitspaceConfig, error)
// FindAll returns list of gitspace configs given a IDs from the datastore.
FindAll(ctx context.Context, id []int64) ([]*types.GitspaceConfig, error)
// FindByIdentifier returns a gitspace config with a given UID in a space
FindByIdentifier(ctx context.Context, spaceID int64, identifier string) (*types.GitspaceConfig, error)
// FindAllByIdentifier returns a list of gitspace configs with a given UIDs for a given space
FindAllByIdentifier(ctx context.Context, spaceID int64, identifiers []string) ([]types.GitspaceConfig, error)
// Create creates a new gitspace config in the datastore.
Create(ctx context.Context, gitspaceConfig *types.GitspaceConfig) error
// Update tries to update a gitspace config in the datastore with optimistic locking.
Update(ctx context.Context, gitspaceConfig *types.GitspaceConfig) error
// ListWithLatestInstance returns gitspace configs for the given filter with the latest gitspace instance
// information.
ListWithLatestInstance(ctx context.Context, filter *types.GitspaceFilter) ([]*types.GitspaceConfig, error)
// Count the number of gitspace configs in a space matching the given filter.
Count(ctx context.Context, filter *types.GitspaceFilter) (int64, error)
// ListActiveConfigsForInfraProviderResource returns all active configs for the given infra resource.
ListActiveConfigsForInfraProviderResource(
ctx context.Context,
infraProviderResourceID int64,
) ([]*types.GitspaceConfig, error)
}
GitspaceInstanceStore interface {
// Find returns a gitspace instance given a gitspace instance ID from the datastore.
Find(ctx context.Context, id int64) (*types.GitspaceInstance, error)
// FindByIdentifier returns a gitspace instance given a gitspace instance identifier from the datastore.
// TODO: Fix this. It needs to use space ID as well.
FindByIdentifier(ctx context.Context, identifier string) (*types.GitspaceInstance, error)
// FindLatestByGitspaceConfigID returns the latest gitspace instance given a gitspace config ID from the datastore.
FindLatestByGitspaceConfigID(
ctx context.Context,
gitspaceConfigID int64,
) (*types.GitspaceInstance, error)
// Create creates a new gitspace instance in the datastore.
Create(ctx context.Context, gitspaceInstance *types.GitspaceInstance) error
// Update tries to update a gitspace instance in the datastore with optimistic locking.
Update(ctx context.Context, gitspaceInstance *types.GitspaceInstance) error
// List lists the gitspace instance present in a parent space ID in the datastore.
List(ctx context.Context, filter *types.GitspaceInstanceFilter) ([]*types.GitspaceInstance, error)
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | true |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/store_test.go | app/store/store_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/logs.go | app/store/logs.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"context"
"io"
)
// LogStore provides an interface for the persistent log store backend.
type LogStore interface {
// Find returns a log stream from the datastore.
Find(ctx context.Context, stepID int64) (io.ReadCloser, error)
// Create writes copies the log stream from Reader r to the datastore.
Create(ctx context.Context, stepID int64, r io.Reader) error
// Update copies the log stream from Reader r to the datastore.
Update(ctx context.Context, stepID int64, r io.Reader) error
// Delete purges the log stream from the datastore.
Delete(ctx context.Context, stepID int64) error
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/transformation.go | app/store/transformation.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"strings"
)
// PrincipalUIDTransformation transforms a principalUID to a value that should be duplicate free.
// This allows us to simply switch between principalUIDs being case sensitive, insensitive or anything in between.
type PrincipalUIDTransformation func(uid string) (string, error)
func ToLowerPrincipalUIDTransformation(uid string) (string, error) {
return strings.ToLower(uid), nil
}
// SpacePathTransformation transforms a path to a value that should be duplicate free.
// This allows us to simply switch between paths being case sensitive, insensitive or anything in between.
type SpacePathTransformation func(original string, isRoot bool) string
func ToLowerSpacePathTransformation(original string, _ bool) string {
return strings.ToLower(original)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/logs/wire.go | app/store/logs/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logs
import (
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/types"
"github.com/google/wire"
"github.com/jmoiron/sqlx"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideLogStore,
)
func ProvideLogStore(db *sqlx.DB, config *types.Config) store.LogStore {
s := NewDatabaseLogStore(db)
if config.Logs.S3.Bucket != "" {
p := NewS3LogStore(
config.Logs.S3.Bucket,
config.Logs.S3.Prefix,
config.Logs.S3.Endpoint,
config.Logs.S3.PathStyle,
)
return NewCombined(p, s)
}
return s
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/logs/s3.go | app/store/logs/s3.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logs
import (
"context"
"fmt"
"io"
"path"
"strings"
"github.com/harness/gitness/app/store"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
// NewS3Env returns a new S3 log store.
func NewS3LogStore(bucket, prefix, endpoint string, pathStyle bool) store.LogStore {
disableSSL := false
if endpoint != "" {
disableSSL = !strings.HasPrefix(endpoint, "https://")
}
return &s3store{
bucket: bucket,
prefix: prefix,
session: session.Must(
session.NewSession(&aws.Config{
Endpoint: aws.String(endpoint),
DisableSSL: aws.Bool(disableSSL),
S3ForcePathStyle: aws.Bool(pathStyle),
}),
),
}
}
type s3store struct {
bucket string
prefix string
session *session.Session
}
func (s *s3store) Find(_ context.Context, step int64) (io.ReadCloser, error) {
svc := s3.New(s.session)
out, err := svc.GetObject(&s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(s.key(step)),
})
if err != nil {
return nil, err
}
return out.Body, nil
}
func (s *s3store) Create(_ context.Context, step int64, r io.Reader) error {
uploader := s3manager.NewUploader(s.session)
input := &s3manager.UploadInput{
ACL: aws.String("private"),
Bucket: aws.String(s.bucket),
Key: aws.String(s.key(step)),
Body: r,
}
_, err := uploader.Upload(input)
return err
}
func (s *s3store) Update(ctx context.Context, step int64, r io.Reader) error {
return s.Create(ctx, step, r)
}
func (s *s3store) Delete(_ context.Context, step int64) error {
svc := s3.New(s.session)
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(s.key(step)),
})
return err
}
func (s *s3store) key(step int64) string {
return path.Join("/", s.prefix, fmt.Sprint(step))
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/logs/combine.go | app/store/logs/combine.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logs
import (
"context"
"io"
"github.com/harness/gitness/app/store"
)
// NewCombined returns a new combined log store that will fallback
// to a secondary log store when necessary. This can be useful when
// migrating from database logs to s3, where logs for older builds
// are still being stored in the database, and newer logs in s3.
func NewCombined(primary, secondary store.LogStore) store.LogStore {
return &combined{
primary: primary,
secondary: secondary,
}
}
type combined struct {
primary, secondary store.LogStore
}
func (s *combined) Find(ctx context.Context, step int64) (io.ReadCloser, error) {
rc, err := s.primary.Find(ctx, step)
if err == nil {
return rc, nil
}
return s.secondary.Find(ctx, step)
}
func (s *combined) Create(ctx context.Context, step int64, r io.Reader) error {
return s.primary.Create(ctx, step, r)
}
func (s *combined) Update(ctx context.Context, step int64, r io.Reader) error {
return s.primary.Update(ctx, step, r)
}
func (s *combined) Delete(ctx context.Context, step int64) error {
err := s.primary.Delete(ctx, step)
if err != nil {
err = s.secondary.Delete(ctx, step)
}
return err
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/logs/db.go | app/store/logs/db.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logs
import (
"bytes"
"context"
"fmt"
"io"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/jmoiron/sqlx"
)
var _ store.LogStore = (*logStore)(nil)
// not used out of this package.
type logs struct {
ID int64 `db:"log_id"`
Data []byte `db:"log_data"`
}
// NewDatabaseLogStore returns a new LogStore.
func NewDatabaseLogStore(db *sqlx.DB) store.LogStore {
return &logStore{
db: db,
}
}
type logStore struct {
db *sqlx.DB
}
// Find returns a log given a log ID.
func (s *logStore) Find(ctx context.Context, stepID int64) (io.ReadCloser, error) {
const findQueryStmt = `
SELECT
log_id, log_data
FROM logs
WHERE log_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
var err error
dst := new(logs)
if err = db.GetContext(ctx, dst, findQueryStmt, stepID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find log")
}
return io.NopCloser(
bytes.NewBuffer(dst.Data),
), err
}
// Create creates a log.
func (s *logStore) Create(ctx context.Context, stepID int64, r io.Reader) error {
const logInsertStmt = `
INSERT INTO logs (
log_id
,log_data
) values (
:log_id
,:log_data
)`
data, err := io.ReadAll(r)
if err != nil {
return fmt.Errorf("could not read log data: %w", err)
}
params := &logs{
ID: stepID,
Data: data,
}
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(logInsertStmt, params)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind log object")
}
if _, err := db.ExecContext(ctx, query, arg...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "log query failed")
}
return nil
}
// Update overrides existing logs data.
func (s *logStore) Update(ctx context.Context, stepID int64, r io.Reader) error {
const logUpdateStmt = `
UPDATE logs
SET
log_data = :log_data
WHERE log_id = :log_id`
data, err := io.ReadAll(r)
if err != nil {
return fmt.Errorf("could not read log data: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(logUpdateStmt, &logs{ID: stepID, Data: data})
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind log object")
}
_, err = db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update log")
}
return nil
}
// Delete deletes a log given a log ID.
func (s *logStore) Delete(ctx context.Context, stepID int64) error {
const logDeleteStmt = `
DELETE FROM logs
WHERE log_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, logDeleteStmt, stepID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Could not delete log")
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/cache/repo_id.go | app/store/cache/repo_id.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/cache"
"github.com/harness/gitness/types"
)
func NewRepoIDCache(
appCtx context.Context,
repoStore store.RepoStore,
evictorSpace Evictor[*types.SpaceCore],
evictorRepo Evictor[*types.RepositoryCore],
dur time.Duration,
) store.RepoIDCache {
c := cache.New[int64, *types.RepositoryCore](repoIDCacheGetter{repoStore: repoStore}, dur)
// In case when a space is updated, it's possible that a repo in the cache belongs the space or one of its parents.
// Rather than to dig through the cache to find if this is actually the case, it's simpler to clear the cache.
// Update of a space core (space identifier or space path) is a rare operation, so clearing cache is justified.
evictorSpace.Subscribe(appCtx, func(*types.SpaceCore) error {
c.EvictAll(appCtx)
return nil
})
evictorRepo.Subscribe(appCtx, func(repoCore *types.RepositoryCore) error {
c.Evict(appCtx, repoCore.ID)
return nil
})
return c
}
type repoIDCacheGetter struct {
repoStore store.RepoStore
}
func (c repoIDCacheGetter) Find(ctx context.Context, repoID int64) (*types.RepositoryCore, error) {
repo, err := c.repoStore.Find(ctx, repoID)
if err != nil {
return nil, fmt.Errorf("failed to find repo by ID: %w", err)
}
return repo.Core(), nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/cache/repo_ref.go | app/store/cache/repo_ref.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/cache"
"github.com/harness/gitness/types"
)
func NewRepoRefCache(
appCtx context.Context,
repoStore store.RepoStore,
evictorSpace Evictor[*types.SpaceCore],
evictorRepo Evictor[*types.RepositoryCore],
dur time.Duration,
) store.RepoRefCache {
c := cache.New[types.RepoCacheKey, int64](repoCacheGetter{repoStore: repoStore}, dur)
// In case when a space is updated, it's possible that a repo in the cache belongs the space or one of its parents.
// Rather than to dig through the cache to find if this is actually the case, it's simpler to clear the cache.
// Update of a space core (space identifier or space path) is a rare operation, so clearing cache is justified.
evictorSpace.Subscribe(appCtx, func(*types.SpaceCore) error {
c.EvictAll(appCtx)
return nil
})
evictorRepo.Subscribe(appCtx, func(repoCore *types.RepositoryCore) error {
c.Evict(appCtx, types.RepoCacheKey{
SpaceID: repoCore.ParentID,
RepoIdentifier: repoCore.Identifier,
})
return nil
})
return c
}
type repoCacheGetter struct {
repoStore store.RepoStore
}
func (c repoCacheGetter) Find(ctx context.Context, repoKey types.RepoCacheKey) (int64, error) {
repo, err := c.repoStore.FindActiveByUID(ctx, repoKey.SpaceID, repoKey.RepoIdentifier)
if err != nil {
return 0, fmt.Errorf("failed to find repo by space ID and repo uid: %w", err)
}
return repo.ID, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/cache/wire.go | app/store/cache/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import (
"context"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/cache"
"github.com/harness/gitness/pubsub"
"github.com/harness/gitness/types"
"github.com/google/wire"
)
// WireSetSpace provides a wire set for this package.
var WireSetSpace = wire.NewSet(
ProvidePrincipalInfoCache,
ProvideEvictorSpaceCore,
ProvideSpaceIDCache,
ProvideSpacePathCache,
ProvideInfraProviderResourceCache,
)
// WireSetRepo provides a repository related wire set for this package.
var WireSetRepo = wire.NewSet(
ProvideEvictorRepositoryCore,
ProvideRepoIDCache,
ProvideRepoRefCache,
)
const (
principalInfoCacheDuration = 30 * time.Second
spaceCacheDuration = 15 * time.Minute
repositoryCacheDuration = 15 * time.Minute
)
const (
pubsubNamespace = "cache-evictor"
pubsubTopicSpaceCoreUpdate = "space-core-update"
pubsubTopicRepoCoreUpdate = "repo-core-update"
)
func ProvideEvictorSpaceCore(pubsub pubsub.PubSub) Evictor[*types.SpaceCore] {
return NewEvictor[*types.SpaceCore](pubsubNamespace, pubsubTopicSpaceCoreUpdate, pubsub)
}
func ProvideEvictorRepositoryCore(pubsub pubsub.PubSub) Evictor[*types.RepositoryCore] {
return NewEvictor[*types.RepositoryCore](pubsubNamespace, pubsubTopicRepoCoreUpdate, pubsub)
}
// ProvidePrincipalInfoCache provides a cache for storing types.PrincipalInfo objects.
func ProvidePrincipalInfoCache(getter store.PrincipalInfoView) store.PrincipalInfoCache {
return cache.NewExtended[int64, *types.PrincipalInfo](getter, principalInfoCacheDuration)
}
func ProvideSpaceIDCache(
appCtx context.Context,
spaceStore store.SpaceStore,
evictor Evictor[*types.SpaceCore],
) store.SpaceIDCache {
return NewSpaceIDCache(appCtx, spaceStore, evictor, spaceCacheDuration)
}
// ProvideSpacePathCache provides a cache for storing routing paths and their types.SpacePath objects.
func ProvideSpacePathCache(
appCtx context.Context,
pathStore store.SpacePathStore,
evictor Evictor[*types.SpaceCore],
spacePathTransformation store.SpacePathTransformation,
) store.SpacePathCache {
return New(appCtx, pathStore, spacePathTransformation, evictor, spaceCacheDuration)
}
func ProvideRepoIDCache(
appCtx context.Context,
repoStore store.RepoStore,
evictorSpace Evictor[*types.SpaceCore],
evictorRepo Evictor[*types.RepositoryCore],
) store.RepoIDCache {
return NewRepoIDCache(appCtx, repoStore, evictorSpace, evictorRepo, repositoryCacheDuration)
}
func ProvideRepoRefCache(
appCtx context.Context,
repoStore store.RepoStore,
evictorSpace Evictor[*types.SpaceCore],
evictorRepo Evictor[*types.RepositoryCore],
) store.RepoRefCache {
return NewRepoRefCache(appCtx, repoStore, evictorSpace, evictorRepo, repositoryCacheDuration)
}
// ProvideInfraProviderResourceCache provides a cache for storing types.InfraProviderResource objects.
func ProvideInfraProviderResourceCache(getter store.InfraProviderResourceView) store.InfraProviderResourceCache {
return cache.NewExtended[int64, *types.InfraProviderResource](getter, 5*time.Minute)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/cache/evictor.go | app/store/cache/evictor.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import (
"bytes"
"context"
"encoding/gob"
"github.com/harness/gitness/pubsub"
"github.com/rs/zerolog/log"
)
type Evictor[T any] struct {
nameSpace string
topicName string
bus pubsub.PubSub
}
func NewEvictor[T any](
nameSpace string,
topicName string,
bus pubsub.PubSub,
) Evictor[T] {
return Evictor[T]{
nameSpace: nameSpace,
topicName: topicName,
bus: bus,
}
}
func (e Evictor[T]) Subscribe(ctx context.Context, fn func(key T) error) {
if e.bus == nil {
return
}
_ = e.bus.Subscribe(ctx, e.topicName, func(payload []byte) error {
var key T
err := gob.NewDecoder(bytes.NewReader(payload)).Decode(&key)
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msgf("failed to process update event from type: %T", key)
return err
}
return fn(key)
}, pubsub.WithChannelNamespace(e.nameSpace))
}
func (e Evictor[T]) Evict(ctx context.Context, key T) {
if e.bus == nil {
return
}
buf := bytes.NewBuffer(nil)
_ = gob.NewEncoder(buf).Encode(key)
err := e.bus.Publish(
ctx,
e.topicName,
buf.Bytes(),
pubsub.WithPublishNamespace(e.nameSpace),
)
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msgf("failed to publish update event for type %T", key)
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/cache/space_id.go | app/store/cache/space_id.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/cache"
"github.com/harness/gitness/types"
)
func NewSpaceIDCache(
appCtx context.Context,
spaceStore store.SpaceStore,
evictor Evictor[*types.SpaceCore],
dur time.Duration,
) store.SpaceIDCache {
c := cache.New[int64, *types.SpaceCore](spaceIDCacheGetter{spaceStore: spaceStore}, dur)
// In case when a space is updated, we should remove from the cache the space and all of its subspaces.
// Rather than to dig through the cache to find all subspaces, it's simpler to clear the cache.
// Update of a space core (space identifier or space path) is a rare operation, so clearing cache is justified.
evictor.Subscribe(appCtx, func(*types.SpaceCore) error {
c.EvictAll(appCtx)
return nil
})
return c
}
type spaceIDCacheGetter struct {
spaceStore store.SpaceStore
}
func (g spaceIDCacheGetter) Find(ctx context.Context, spaceID int64) (*types.SpaceCore, error) {
space, err := g.spaceStore.Find(ctx, spaceID)
if err != nil {
return nil, fmt.Errorf("failed to find space by id: %w", err)
}
return space.Core(), nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/cache/space_path.go | app/store/cache/space_path.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import (
"context"
"time"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/cache"
"github.com/harness/gitness/types"
)
// pathCacheGetter is used to hook a spacePathStore as source of a PathCache.
// IMPORTANT: It assumes that the spacePathCache already transformed the key.
type pathCacheGetter struct {
spacePathStore store.SpacePathStore
}
func New(
appCtx context.Context,
pathStore store.SpacePathStore,
spacePathTransformation store.SpacePathTransformation,
evictor Evictor[*types.SpaceCore],
dur time.Duration,
) store.SpacePathCache {
innerCache := cache.New[string, *types.SpacePath](&pathCacheGetter{spacePathStore: pathStore}, dur)
c := spacePathCache{
inner: innerCache,
spacePathTransformation: spacePathTransformation,
}
// In case when a space core is updated, we should remove from the cache its space path and all of its sub-paths.
// Update of a space core (space identifier or space path) is a rare operation, so clearing cache is justified.
evictor.Subscribe(appCtx, func(*types.SpaceCore) error {
innerCache.EvictAll(appCtx)
return nil
})
return c
}
func (g *pathCacheGetter) Find(ctx context.Context, key string) (*types.SpacePath, error) {
path, err := g.spacePathStore.FindByPath(ctx, key)
if err != nil {
return nil, err
}
return path, nil
}
// spacePathCache is a decorator of a Cache required to handle path transformations.
type spacePathCache struct {
inner cache.Cache[string, *types.SpacePath]
spacePathTransformation store.SpacePathTransformation
}
func (c spacePathCache) Get(ctx context.Context, key string) (*types.SpacePath, error) {
// build unique key from provided value
segments := paths.Segments(key)
uniqueKey := ""
for i, segment := range segments {
uniqueKey = paths.Concatenate(uniqueKey, c.spacePathTransformation(segment, i == 0))
}
return c.inner.Get(ctx, uniqueKey)
}
func (c spacePathCache) Stats() (int64, int64) {
return c.inner.Stats()
}
func (c spacePathCache) Evict(ctx context.Context, key string) {
c.inner.Evict(ctx, key)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/space_test.go | app/store/database/space_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database_test
import (
"context"
"encoding/json"
"fmt"
"testing"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/stretchr/testify/require"
)
func TestDatabase_GetRootSpace(t *testing.T) {
db, teardown := setupDB(t)
defer teardown()
principalStore, spaceStore, spacePathStore, _ := setupStores(t, db)
ctx := context.Background()
createUser(ctx, t, principalStore)
numSpaces := createNestedSpaces(ctx, t, spaceStore, spacePathStore)
for i := 1; i <= numSpaces; i++ {
rootSpc, err := spaceStore.GetRootSpace(ctx, int64(i))
if err != nil {
t.Fatalf("failed to get root space %v", err)
}
if rootSpc.ID != 1 {
t.Errorf("rootSpc.ID = %v, want %v", rootSpc.ID, 1)
}
}
}
func TestSpaceStore_FindByIDs(t *testing.T) {
db, teardown := setupDB(t)
defer teardown()
principalStore, spaceStore, spacePathStore, _ := setupStores(t, db)
ctx := context.Background()
createUser(ctx, t, principalStore)
_ = createNestedSpaces(ctx, t, spaceStore, spacePathStore)
spaces, err := spaceStore.FindByIDs(ctx, 4, 5, 6)
require.NoError(t, err)
require.Len(t, spaces, 3)
require.Equal(t, int64(4), spaces[0].ID)
require.Equal(t, int64(5), spaces[1].ID)
require.Equal(t, int64(6), spaces[2].ID)
}
func createNestedSpacesForStorageSize(
ctx context.Context,
t *testing.T,
spaceStore store.SpaceStore,
) []types.Space {
// Create root spaces
rootSpaces := []types.Space{
{Identifier: "root-space-1", Description: "Root Space 1"},
{Identifier: "root-space-2", Description: "Root Space 2"},
{Identifier: "root-space-3", Description: "Root Space 3"},
}
for _, rootSpace := range rootSpaces {
err := spaceStore.Create(ctx, &rootSpace)
require.NoError(t, err)
// Create nested subspaces for each root space
for j := 1; j <= 3; j++ { // Create 3 subspaces for each root space
subSpace := types.Space{
Identifier: fmt.Sprintf("sub-space-%d-of-%s", j, rootSpace.Identifier),
ParentID: rootSpace.ID, // Set the parent ID to the root space ID
Description: fmt.Sprintf("Sub Space %d of %s", j, rootSpace.Identifier),
}
err = spaceStore.Create(ctx, &subSpace)
require.NoError(t, err)
}
}
return rootSpaces
}
func createRepositoriesForSpaces(
ctx context.Context,
t *testing.T,
db dbtx.Accessor,
repoStore store.RepoStore,
spaceStore store.SpaceStore,
) (rootSpaces []types.Space, total int64) {
rootSpaces = createNestedSpacesForStorageSize(ctx, t, spaceStore)
type row struct {
ID int64
Identifier string
ParentID *int64
}
// Directly query the database for all spaces
var spaces []row
query := "SELECT space_id, space_uid, space_parent_id FROM spaces"
rows, err := db.QueryContext(ctx, query)
require.NoError(t, err)
defer rows.Close()
for rows.Next() {
var space row
err := rows.Scan(
&space.ID,
&space.Identifier,
&space.ParentID,
)
require.NoError(t, err)
spaces = append(spaces, space)
}
require.NoError(t, rows.Err())
defaultSize := int64(100)
// Print out the spaces
for i, space := range spaces {
t.Logf("Space ID: %d, Identifier: %s, Size: %d", space.ID, space.Identifier, defaultSize)
repo := &types.Repository{
ParentID: space.ID,
GitUID: fmt.Sprintf("repo-%d", i),
Identifier: fmt.Sprintf("repo-%d", i),
Tags: json.RawMessage(`{}`),
}
err := repoStore.Create(ctx, repo) // Assuming CreateRepository is defined
require.NoError(t, err)
err = repoStore.UpdateSize(ctx, repo.ID, defaultSize, defaultSize)
require.NoError(t, err)
total += defaultSize
}
// add one deleted repo
repo := &types.Repository{
ParentID: spaces[0].ID,
GitUID: "repo-deleted",
Identifier: "repo-deleted",
Tags: json.RawMessage(`{}`),
}
err = repoStore.Create(ctx, repo) // Assuming CreateRepository is defined
require.NoError(t, err)
err = repoStore.UpdateSize(ctx, repo.ID, defaultSize, defaultSize)
require.NoError(t, err)
err = repoStore.SoftDelete(ctx, repo, time.Now().Unix())
require.NoError(t, err)
return rootSpaces, total
}
func TestSpaceStore_GetRootSpacesSize(t *testing.T) {
db, teardown := setupDB(t)
defer teardown()
principalStore, spaceStore, _, repoStore := setupStores(t, db)
ctx := context.Background()
// Create a user for context
createUser(ctx, t, principalStore)
// Create repositories for each space
rootSpaces, totalSize := createRepositoriesForSpaces(ctx, t, db, repoStore, spaceStore)
// Call the GetRootSpacesSize function
spaces, err := spaceStore.GetRootSpacesSize(ctx)
require.NoError(t, err)
// Verify the results
require.NotNil(t, spaces)
require.Greater(t, len(spaces), 0, "Expected at least one root space")
expectedSize := totalSize / int64(len(rootSpaces))
for i, space := range rootSpaces {
require.Equal(t, space.Identifier, spaces[i].Identifier)
require.Equal(t, expectedSize, spaces[i].Size)
require.Equal(t, expectedSize, spaces[i].LFSSize)
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/label_pullreq.go | app/store/database/label_pullreq.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/gotidy/ptr"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
var _ store.PullReqLabelAssignmentStore = (*pullReqLabelStore)(nil)
func NewPullReqLabelStore(db *sqlx.DB) store.PullReqLabelAssignmentStore {
return &pullReqLabelStore{
db: db,
}
}
type pullReqLabelStore struct {
db *sqlx.DB
}
type pullReqLabel struct {
PullReqID int64 `db:"pullreq_label_pullreq_id"`
LabelID int64 `db:"pullreq_label_label_id"`
LabelValueID null.Int `db:"pullreq_label_label_value_id"`
Created int64 `db:"pullreq_label_created"`
Updated int64 `db:"pullreq_label_updated"`
CreatedBy int64 `db:"pullreq_label_created_by"`
UpdatedBy int64 `db:"pullreq_label_updated_by"`
}
type pullReqAssignmentInfo struct {
PullReqID int64 `db:"pullreq_label_pullreq_id"`
LabelID int64 `db:"label_id"`
LabelKey string `db:"label_key"`
LabelColor enum.LabelColor `db:"label_color"`
LabelScope int64 `db:"label_scope"`
ValueCount int64 `db:"label_value_count"`
ValueID null.Int `db:"label_value_id"`
Value null.String `db:"label_value_value"`
ValueColor null.String `db:"label_value_color"` // get's converted to *enum.LabelColor
}
const (
pullReqLabelColumns = `
pullreq_label_pullreq_id
,pullreq_label_label_id
,pullreq_label_label_value_id
,pullreq_label_created
,pullreq_label_updated
,pullreq_label_created_by
,pullreq_label_updated_by`
)
func (s *pullReqLabelStore) Assign(ctx context.Context, label *types.PullReqLabel) error {
const sqlQuery = `
INSERT INTO pullreq_labels (` + pullReqLabelColumns + `)
values (
:pullreq_label_pullreq_id
,:pullreq_label_label_id
,:pullreq_label_label_value_id
,:pullreq_label_created
,:pullreq_label_updated
,:pullreq_label_created_by
,:pullreq_label_updated_by
)
ON CONFLICT (pullreq_label_pullreq_id, pullreq_label_label_id)
DO UPDATE SET
pullreq_label_label_value_id = EXCLUDED.pullreq_label_label_value_id,
pullreq_label_updated = EXCLUDED.pullreq_label_updated,
pullreq_label_updated_by = EXCLUDED.pullreq_label_updated_by
RETURNING pullreq_label_created, pullreq_label_created_by
`
db := dbtx.GetAccessor(ctx, s.db)
query, args, err := db.BindNamed(sqlQuery, mapInternalPullReqLabel(label))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to bind query")
}
if err = db.QueryRowContext(ctx, query, args...).Scan(&label.Created, &label.CreatedBy); err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to create pull request label")
}
return nil
}
func (s *pullReqLabelStore) Unassign(ctx context.Context, pullreqID int64, labelID int64) error {
const sqlQuery = `
DELETE FROM pullreq_labels
WHERE pullreq_label_pullreq_id = $1 AND pullreq_label_label_id = $2`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sqlQuery, pullreqID, labelID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to delete pullreq label")
}
return nil
}
func (s *pullReqLabelStore) FindByLabelID(
ctx context.Context,
pullreqID int64,
labelID int64,
) (*types.PullReqLabel, error) {
const sqlQuery = `SELECT ` + pullReqLabelColumns + `
FROM pullreq_labels
WHERE pullreq_label_pullreq_id = $1 AND pullreq_label_label_id = $2`
db := dbtx.GetAccessor(ctx, s.db)
var dst pullReqLabel
if err := db.GetContext(ctx, &dst, sqlQuery, pullreqID, labelID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to find pullreq label by id")
}
return mapPullReqLabel(&dst), nil
}
func (s *pullReqLabelStore) ListAssigned(
ctx context.Context,
pullreqID int64,
) (map[int64]*types.LabelAssignment, error) {
const sqlQuery = `
SELECT
label_id
,label_repo_id
,label_space_id
,label_key
,label_value_id
,label_value_label_id
,label_value_value
,label_color
,label_value_color
,label_scope
,label_type
FROM pullreq_labels
INNER JOIN labels ON pullreq_label_label_id = label_id
LEFT JOIN label_values ON pullreq_label_label_value_id = label_value_id
WHERE pullreq_label_pullreq_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
var dst []*struct {
labelInfo
labelValueInfo
}
if err := db.SelectContext(ctx, &dst, sqlQuery, pullreqID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to list assigned label")
}
ret := make(map[int64]*types.LabelAssignment, len(dst))
for _, res := range dst {
li := mapLabelInfo(&res.labelInfo)
lvi := mapLabeValuelInfo(&res.labelValueInfo)
ret[li.ID] = &types.LabelAssignment{
LabelInfo: *li,
AssignedValue: lvi,
}
}
return ret, nil
}
func (s *pullReqLabelStore) ListAssignedByPullreqIDs(
ctx context.Context,
pullreqIDs []int64,
) (map[int64][]*types.LabelPullReqAssignmentInfo, error) {
stmt := database.Builder.Select(`
pullreq_label_pullreq_id
,label_id
,label_key
,label_color
,label_scope
,label_value_count
,label_value_id
,label_value_value
,label_value_color
`).
From("pullreq_labels").
InnerJoin("labels ON pullreq_label_label_id = label_id").
LeftJoin("label_values ON pullreq_label_label_value_id = label_value_id").
Where(squirrel.Eq{"pullreq_label_pullreq_id": pullreqIDs})
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var dst []*pullReqAssignmentInfo
if err := db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to list assigned label")
}
return mapPullReqAssignmentInfos(dst), nil
}
func (s *pullReqLabelStore) FindValueByLabelID(
ctx context.Context,
pullreqID int64,
labelID int64,
) (*types.LabelValue, error) {
const sqlQuery = `SELECT label_value_id, ` + labelValueColumns + `
FROM pullreq_labels
JOIN label_values ON pullreq_label_label_value_id = label_value_id
WHERE pullreq_label_pullreq_id = $1 AND pullreq_label_label_id = $2`
db := dbtx.GetAccessor(ctx, s.db)
var dst labelValue
if err := db.GetContext(ctx, &dst, sqlQuery, pullreqID, labelID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find label")
}
return mapLabelValue(&dst), nil
}
func (s *pullReqLabelStore) CountPullreqAssignments(
ctx context.Context,
labelIDs []int64,
) (map[int64]int64, error) {
stmt := database.Builder.Select("pullreq_label_label_id, COUNT(pullreq_label_pullreq_id)").
From("pullreq_labels").
Where(squirrel.Eq{"pullreq_label_label_id": labelIDs}).
GroupBy("pullreq_label_label_id")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var dst []struct {
LabelID int64 `db:"pullreq_label_label_id"`
Count int64 `db:"count"`
}
if err := db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to count PR label assignments")
}
counts := make(map[int64]int64, len(dst))
for _, res := range dst {
counts[res.LabelID] = res.Count
}
return counts, nil
}
func mapInternalPullReqLabel(lbl *types.PullReqLabel) *pullReqLabel {
return &pullReqLabel{
PullReqID: lbl.PullReqID,
LabelID: lbl.LabelID,
LabelValueID: null.IntFromPtr(lbl.ValueID),
Created: lbl.Created,
Updated: lbl.Updated,
CreatedBy: lbl.CreatedBy,
UpdatedBy: lbl.UpdatedBy,
}
}
func mapPullReqLabel(lbl *pullReqLabel) *types.PullReqLabel {
return &types.PullReqLabel{
PullReqID: lbl.PullReqID,
LabelID: lbl.LabelID,
ValueID: lbl.LabelValueID.Ptr(),
Created: lbl.Created,
Updated: lbl.Updated,
CreatedBy: lbl.CreatedBy,
UpdatedBy: lbl.UpdatedBy,
}
}
func mapPullReqAssignmentInfo(lbl *pullReqAssignmentInfo) *types.LabelPullReqAssignmentInfo {
var valueColor *enum.LabelColor
if lbl.ValueColor.Valid {
valueColor = ptr.Of(enum.LabelColor(lbl.ValueColor.String))
}
return &types.LabelPullReqAssignmentInfo{
PullReqID: lbl.PullReqID,
LabelID: lbl.LabelID,
LabelKey: lbl.LabelKey,
LabelColor: lbl.LabelColor,
LabelScope: lbl.LabelScope,
ValueCount: lbl.ValueCount,
ValueID: lbl.ValueID.Ptr(),
Value: lbl.Value.Ptr(),
ValueColor: valueColor,
}
}
func mapPullReqAssignmentInfos(
dbLabels []*pullReqAssignmentInfo,
) map[int64][]*types.LabelPullReqAssignmentInfo {
result := make(map[int64][]*types.LabelPullReqAssignmentInfo)
for _, lbl := range dbLabels {
result[lbl.PullReqID] = append(result[lbl.PullReqID], mapPullReqAssignmentInfo(lbl))
}
return result
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/execution_map.go | app/store/database/execution_map.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"github.com/harness/gitness/types"
)
func mapInternalToExecution(in *execution) (*types.Execution, error) {
var params map[string]string
err := in.Params.Unmarshal(¶ms)
if err != nil {
return nil, err
}
return &types.Execution{
ID: in.ID,
PipelineID: in.PipelineID,
CreatedBy: in.CreatedBy,
RepoID: in.RepoID,
Trigger: in.Trigger,
Number: in.Number,
Parent: in.Parent,
Status: in.Status,
Error: in.Error,
Event: in.Event,
Action: in.Action,
Link: in.Link,
Timestamp: in.Timestamp,
Title: in.Title,
Message: in.Message,
Before: in.Before,
After: in.After,
Ref: in.Ref,
Fork: in.Fork,
Source: in.Source,
Target: in.Target,
Author: in.Author,
AuthorName: in.AuthorName,
AuthorEmail: in.AuthorEmail,
AuthorAvatar: in.AuthorAvatar,
Sender: in.Sender,
Params: params,
Cron: in.Cron,
Deploy: in.Deploy,
DeployID: in.DeployID,
Debug: in.Debug,
Started: in.Started,
Finished: in.Finished,
Created: in.Created,
Updated: in.Updated,
Version: in.Version,
}, nil
}
func mapExecutionToInternal(in *types.Execution) *execution {
return &execution{
ID: in.ID,
PipelineID: in.PipelineID,
CreatedBy: in.CreatedBy,
RepoID: in.RepoID,
Trigger: in.Trigger,
Number: in.Number,
Parent: in.Parent,
Status: in.Status,
Error: in.Error,
Event: in.Event,
Action: in.Action,
Link: in.Link,
Timestamp: in.Timestamp,
Title: in.Title,
Message: in.Message,
Before: in.Before,
After: in.After,
Ref: in.Ref,
Fork: in.Fork,
Source: in.Source,
Target: in.Target,
Author: in.Author,
AuthorName: in.AuthorName,
AuthorEmail: in.AuthorEmail,
AuthorAvatar: in.AuthorAvatar,
Sender: in.Sender,
Params: EncodeToSQLXJSON(in.Params),
Cron: in.Cron,
Deploy: in.Deploy,
DeployID: in.DeployID,
Debug: in.Debug,
Started: in.Started,
Finished: in.Finished,
Created: in.Created,
Updated: in.Updated,
Version: in.Version,
}
}
func mapInternalToExecutionList(in []*execution) ([]*types.Execution, error) {
executions := make([]*types.Execution, len(in))
for i, k := range in {
e, err := mapInternalToExecution(k)
if err != nil {
return nil, err
}
executions[i] = e
}
return executions, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/wire.go | app/store/database/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/store/database/migrate"
"github.com/harness/gitness/job"
"github.com/harness/gitness/store/database"
"github.com/google/wire"
"github.com/jmoiron/sqlx"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideDatabase,
ProvidePrincipalStore,
ProvideUserGroupStore,
ProvideUserGroupReviewerStore,
ProvidePrincipalInfoView,
ProvideInfraProviderResourceView,
ProvideSpacePathStore,
ProvideSpaceStore,
ProvideRepoStore,
ProvideLinkRepoStore,
ProvideBranchStore,
ProvideRuleStore,
ProvideJobStore,
ProvideExecutionStore,
ProvidePipelineStore,
ProvideStageStore,
ProvideStepStore,
ProvideSecretStore,
ProvideMembershipStore,
ProvideTokenStore,
ProvidePullReqStore,
ProvidePullReqActivityStore,
ProvideCodeCommentView,
ProvidePullReqReviewStore,
ProvidePullReqReviewerStore,
ProvidePullReqFileViewStore,
ProvideWebhookStore,
ProvideWebhookExecutionStore,
ProvideSettingsStore,
ProvidePublicAccessStore,
ProvideCheckStore,
ProvideConnectorStore,
ProvideTemplateStore,
ProvideTriggerStore,
ProvidePluginStore,
ProvidePublicKeyStore,
ProvidePublicKeySubKeyStore,
ProvideGitSignatureResultStore,
ProvideInfraProviderConfigStore,
ProvideInfraProviderResourceStore,
ProvideGitspaceConfigStore,
ProvideGitspaceInstanceStore,
ProvideGitspaceEventStore,
ProvideLabelStore,
ProvideLabelValueStore,
ProvidePullReqLabelStore,
ProvideLFSObjectStore,
ProvideInfraProviderTemplateStore,
ProvideInfraProvisionedStore,
ProvideUsageMetricStore,
ProvideCDEGatewayStore,
ProvideFavoriteStore,
ProvideGitspaceSettingsStore,
ProvideAITaskStore,
)
// migrator is helper function to set up the database by performing automated
// database migration steps.
func migrator(ctx context.Context, db *sqlx.DB) error {
return migrate.Migrate(ctx, db)
}
// ProvideDatabase provides a database connection.
func ProvideDatabase(ctx context.Context, config database.Config) (*sqlx.DB, error) {
return database.ConnectAndMigrate(
ctx,
config.Driver,
config.Datasource,
migrator,
)
}
// ProvidePrincipalStore provides a principal store.
func ProvidePrincipalStore(db *sqlx.DB, uidTransformation store.PrincipalUIDTransformation) store.PrincipalStore {
return NewPrincipalStore(db, uidTransformation)
}
// ProvideUserGroupStore provides a principal store.
func ProvideUserGroupStore(db *sqlx.DB) store.UserGroupStore {
return NewUserGroupStore(db)
}
// ProvideUserGroupReviewerStore provides a usergroup reviewer store.
func ProvideUserGroupReviewerStore(
db *sqlx.DB,
pInfoCache store.PrincipalInfoCache,
userGroupStore store.UserGroupStore,
) store.UserGroupReviewerStore {
return NewUsergroupReviewerStore(db, pInfoCache, userGroupStore)
}
// ProvidePrincipalInfoView provides a principal info store.
func ProvidePrincipalInfoView(db *sqlx.DB) store.PrincipalInfoView {
return NewPrincipalInfoView(db)
}
// ProvideInfraProviderResourceView provides a principal info store.
func ProvideInfraProviderResourceView(
db *sqlx.DB, spaceStore store.SpaceStore,
) store.InfraProviderResourceView {
return NewInfraProviderResourceView(db, spaceStore)
}
// ProvideSpacePathStore provides a space path store.
func ProvideSpacePathStore(
db *sqlx.DB,
spacePathTransformation store.SpacePathTransformation,
) store.SpacePathStore {
return NewSpacePathStore(db, spacePathTransformation)
}
// ProvideSpaceStore provides a space store.
func ProvideSpaceStore(
db *sqlx.DB,
spacePathCache store.SpacePathCache,
spacePathStore store.SpacePathStore,
) store.SpaceStore {
return NewSpaceStore(db, spacePathCache, spacePathStore)
}
// ProvideRepoStore provides a repo store.
func ProvideRepoStore(
db *sqlx.DB,
spacePathCache store.SpacePathCache,
spacePathStore store.SpacePathStore,
spaceStore store.SpaceStore,
) store.RepoStore {
return NewRepoStore(db, spacePathCache, spacePathStore, spaceStore)
}
func ProvideLinkRepoStore(
db *sqlx.DB,
) store.LinkedRepoStore {
return NewLinkedRepoStore(db)
}
// ProvideRuleStore provides a rule store.
func ProvideRuleStore(
db *sqlx.DB,
principalInfoCache store.PrincipalInfoCache,
) store.RuleStore {
return NewRuleStore(db, principalInfoCache)
}
// ProvideJobStore provides a job store.
func ProvideJobStore(db *sqlx.DB) job.Store {
return NewJobStore(db)
}
// ProvidePipelineStore provides a pipeline store.
func ProvidePipelineStore(db *sqlx.DB) store.PipelineStore {
return NewPipelineStore(db)
}
// ProvideInfraProviderConfigStore provides a infraprovider config store.
func ProvideInfraProviderConfigStore(
db *sqlx.DB,
spaceIDCache store.SpaceIDCache,
) store.InfraProviderConfigStore {
return NewInfraProviderConfigStore(db, spaceIDCache)
}
// ProvideGitspaceInstanceStore provides a infraprovider resource store.
func ProvideInfraProviderResourceStore(
db *sqlx.DB,
spaceIDCache store.SpaceIDCache,
) store.InfraProviderResourceStore {
return NewInfraProviderResourceStore(db, spaceIDCache)
}
// ProvideGitspaceConfigStore provides a gitspace config store.
func ProvideGitspaceConfigStore(
db *sqlx.DB,
pCache store.PrincipalInfoCache,
rCache store.InfraProviderResourceCache,
spaceIDCache store.SpaceIDCache,
) store.GitspaceConfigStore {
return NewGitspaceConfigStore(db, pCache, rCache, spaceIDCache)
}
// ProvideGitspaceSettingsStore provides a gitspace settings store.
func ProvideGitspaceSettingsStore(db *sqlx.DB) store.GitspaceSettingsStore {
return NewGitspaceSettingsStore(db)
}
// ProvideGitspaceInstanceStore provides a gitspace instance store.
func ProvideGitspaceInstanceStore(
db *sqlx.DB,
spaceIDCache store.SpaceIDCache,
) store.GitspaceInstanceStore {
return NewGitspaceInstanceStore(db, spaceIDCache)
}
// ProvideStageStore provides a stage store.
func ProvideStageStore(db *sqlx.DB) store.StageStore {
return NewStageStore(db)
}
// ProvideStepStore provides a step store.
func ProvideStepStore(db *sqlx.DB) store.StepStore {
return NewStepStore(db)
}
// ProvideSecretStore provides a secret store.
func ProvideSecretStore(db *sqlx.DB) store.SecretStore {
return NewSecretStore(db)
}
// ProvideConnectorStore provides a connector store.
func ProvideConnectorStore(db *sqlx.DB, secretStore store.SecretStore) store.ConnectorStore {
return NewConnectorStore(db, secretStore)
}
// ProvideTemplateStore provides a template store.
func ProvideTemplateStore(db *sqlx.DB) store.TemplateStore {
return NewTemplateStore(db)
}
// ProvideTriggerStore provides a trigger store.
func ProvideTriggerStore(db *sqlx.DB) store.TriggerStore {
return NewTriggerStore(db)
}
// ProvideExecutionStore provides an execution store.
func ProvideExecutionStore(db *sqlx.DB) store.ExecutionStore {
return NewExecutionStore(db)
}
// ProvidePluginStore provides a plugin store.
func ProvidePluginStore(db *sqlx.DB) store.PluginStore {
return NewPluginStore(db)
}
func ProvideMembershipStore(
db *sqlx.DB,
principalInfoCache store.PrincipalInfoCache,
spacePathStore store.SpacePathStore,
spaceStore store.SpaceStore,
) store.MembershipStore {
return NewMembershipStore(db, principalInfoCache, spacePathStore, spaceStore)
}
// ProvideTokenStore provides a token store.
func ProvideTokenStore(db *sqlx.DB) store.TokenStore {
return NewTokenStore(db)
}
// ProvidePullReqStore provides a pull request store.
func ProvidePullReqStore(
db *sqlx.DB,
principalInfoCache store.PrincipalInfoCache,
) store.PullReqStore {
return NewPullReqStore(db, principalInfoCache)
}
// ProvidePullReqActivityStore provides a pull request activity store.
func ProvidePullReqActivityStore(
db *sqlx.DB,
principalInfoCache store.PrincipalInfoCache,
) store.PullReqActivityStore {
return NewPullReqActivityStore(db, principalInfoCache)
}
// ProvideCodeCommentView provides a code comment view.
func ProvideCodeCommentView(db *sqlx.DB) store.CodeCommentView {
return NewCodeCommentView(db)
}
// ProvidePullReqReviewStore provides a pull request review store.
func ProvidePullReqReviewStore(db *sqlx.DB) store.PullReqReviewStore {
return NewPullReqReviewStore(db)
}
// ProvidePullReqReviewerStore provides a pull request reviewer store.
func ProvidePullReqReviewerStore(
db *sqlx.DB,
principalInfoCache store.PrincipalInfoCache,
) store.PullReqReviewerStore {
return NewPullReqReviewerStore(db, principalInfoCache)
}
// ProvidePullReqFileViewStore provides a pull request file view store.
func ProvidePullReqFileViewStore(db *sqlx.DB) store.PullReqFileViewStore {
return NewPullReqFileViewStore(db)
}
// ProvideWebhookStore provides a webhook store.
func ProvideWebhookStore(db *sqlx.DB) store.WebhookStore {
return NewWebhookStore(db)
}
// ProvideWebhookExecutionStore provides a webhook execution store.
func ProvideWebhookExecutionStore(db *sqlx.DB) store.WebhookExecutionStore {
return NewWebhookExecutionStore(db)
}
// ProvideCheckStore provides a status check result store.
func ProvideCheckStore(
db *sqlx.DB,
principalInfoCache store.PrincipalInfoCache,
) store.CheckStore {
return NewCheckStore(db, principalInfoCache)
}
// ProvideSettingsStore provides a settings store.
func ProvideSettingsStore(db *sqlx.DB) store.SettingsStore {
return NewSettingsStore(db)
}
// ProvidePublicAccessStore provides a public access store.
func ProvidePublicAccessStore(db *sqlx.DB) store.PublicAccessStore {
return NewPublicAccessStore(db)
}
// ProvidePublicKeyStore provides a public key store.
func ProvidePublicKeyStore(db *sqlx.DB) store.PublicKeyStore {
return NewPublicKeyStore(db)
}
// ProvidePublicKeySubKeyStore provides a public key sub key store.
func ProvidePublicKeySubKeyStore(db *sqlx.DB) store.PublicKeySubKeyStore {
return NewPublicKeySubKeyStore(db)
}
func ProvideGitSignatureResultStore(db *sqlx.DB) store.GitSignatureResultStore {
return NewGitSignatureResultStore(db)
}
// ProvideBranchStore provides a branch store.
func ProvideBranchStore(db *sqlx.DB) store.BranchStore {
return NewBranchStore(db)
}
// ProvideGitspaceEventStore provides a gitspace event store.
func ProvideGitspaceEventStore(db *sqlx.DB) store.GitspaceEventStore {
return NewGitspaceEventStore(db)
}
// ProvideLabelStore provides a label store.
func ProvideLabelStore(db *sqlx.DB) store.LabelStore {
return NewLabelStore(db)
}
// ProvideLabelValueStore provides a label value store.
func ProvideLabelValueStore(db *sqlx.DB) store.LabelValueStore {
return NewLabelValueStore(db)
}
// ProvideLabelValueStore provides a label value store.
func ProvidePullReqLabelStore(db *sqlx.DB) store.PullReqLabelAssignmentStore {
return NewPullReqLabelStore(db)
}
// ProvideLFSObjectStore provides an lfs object store.
func ProvideLFSObjectStore(db *sqlx.DB) store.LFSObjectStore {
return NewLFSObjectStore(db)
}
// ProvideInfraProviderTemplateStore provides a infraprovider template store.
func ProvideInfraProviderTemplateStore(db *sqlx.DB) store.InfraProviderTemplateStore {
return NewInfraProviderTemplateStore(db)
}
// ProvideInfraProvisionedStore provides a provisioned infra store.
func ProvideInfraProvisionedStore(db *sqlx.DB) store.InfraProvisionedStore {
return NewInfraProvisionedStore(db)
}
func ProvideUsageMetricStore(db *sqlx.DB) store.UsageMetricStore {
return NewUsageMetricsStore(db)
}
func ProvideCDEGatewayStore(db *sqlx.DB) store.CDEGatewayStore {
return NewCDEGatewayStore(db)
}
func ProvideFavoriteStore(db *sqlx.DB) store.FavoriteStore {
return NewFavoriteStore(db)
}
func ProvideAITaskStore(db *sqlx.DB) store.AITaskStore {
return NewAITaskStore(db)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/pullreq_file_view_store.go | app/store/database/pullreq_file_view_store.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
var _ store.PullReqFileViewStore = (*PullReqFileViewStore)(nil)
// NewPullReqFileViewStore returns a new PullReqFileViewStore.
func NewPullReqFileViewStore(
db *sqlx.DB,
) *PullReqFileViewStore {
return &PullReqFileViewStore{
db: db,
}
}
// PullReqFileViewStore implements store.PullReqFileViewStore backed by a relational database.
type PullReqFileViewStore struct {
db *sqlx.DB
}
type pullReqFileView struct {
PullReqID int64 `db:"pullreq_file_view_pullreq_id"`
PrincipalID int64 `db:"pullreq_file_view_principal_id"`
Path string `db:"pullreq_file_view_path"`
SHA string `db:"pullreq_file_view_sha"`
Obsolete bool `db:"pullreq_file_view_obsolete"`
Created int64 `db:"pullreq_file_view_created"`
Updated int64 `db:"pullreq_file_view_updated"`
}
const (
pullReqFileViewsColumn = `
pullreq_file_view_pullreq_id
,pullreq_file_view_principal_id
,pullreq_file_view_path
,pullreq_file_view_sha
,pullreq_file_view_obsolete
,pullreq_file_view_created
,pullreq_file_view_updated`
)
// Upsert inserts or updates the latest viewed sha for a file in a PR.
func (s *PullReqFileViewStore) Upsert(ctx context.Context, view *types.PullReqFileView) error {
const sqlQuery = `
INSERT INTO pullreq_file_views (
pullreq_file_view_pullreq_id
,pullreq_file_view_principal_id
,pullreq_file_view_path
,pullreq_file_view_sha
,pullreq_file_view_obsolete
,pullreq_file_view_created
,pullreq_file_view_updated
) VALUES (
:pullreq_file_view_pullreq_id
,:pullreq_file_view_principal_id
,:pullreq_file_view_path
,:pullreq_file_view_sha
,:pullreq_file_view_obsolete
,:pullreq_file_view_created
,:pullreq_file_view_updated
)
ON CONFLICT (pullreq_file_view_pullreq_id, pullreq_file_view_principal_id, pullreq_file_view_path) DO
UPDATE SET
pullreq_file_view_updated = :pullreq_file_view_updated
,pullreq_file_view_sha = :pullreq_file_view_sha
,pullreq_file_view_obsolete = :pullreq_file_view_obsolete
RETURNING pullreq_file_view_created`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, mapToInternalPullreqFileView(view))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind pullreq file view object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&view.Created); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Upsert query failed")
}
return nil
}
// DeleteByFileForPrincipal deletes the entry for the specified PR, principal, and file.
func (s *PullReqFileViewStore) DeleteByFileForPrincipal(
ctx context.Context,
prID int64,
principalID int64,
filePath string,
) error {
const sqlQuery = `
DELETE from pullreq_file_views
WHERE pullreq_file_view_pullreq_id = $1 AND
pullreq_file_view_principal_id = $2 AND
pullreq_file_view_path = $3`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sqlQuery, prID, principalID, filePath); err != nil {
return database.ProcessSQLErrorf(ctx, err, "delete query failed")
}
return nil
}
// MarkObsolete updates all entries of the files as obsolete for the PR.
func (s *PullReqFileViewStore) MarkObsolete(ctx context.Context, prID int64, filePaths []string) error {
stmt := database.Builder.
Update("pullreq_file_views").
Set("pullreq_file_view_obsolete", true).
Set("pullreq_file_view_updated", time.Now().UnixMilli()).
Where("pullreq_file_view_pullreq_id = ?", prID).
Where(squirrel.Eq{"pullreq_file_view_path": filePaths}).
Where("pullreq_file_view_obsolete = ?", false)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to create sql query")
}
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to execute update query")
}
return nil
}
// List lists all files marked as viewed by the user for the specified PR.
func (s *PullReqFileViewStore) List(
ctx context.Context,
prID int64,
principalID int64,
) ([]*types.PullReqFileView, error) {
stmt := database.Builder.
Select(pullReqFileViewsColumn).
From("pullreq_file_views").
Where("pullreq_file_view_pullreq_id = ?", prID).
Where("pullreq_file_view_principal_id = ?", principalID)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var dst []*pullReqFileView
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to execute list query")
}
return mapToPullreqFileViews(dst), nil
}
func mapToInternalPullreqFileView(view *types.PullReqFileView) *pullReqFileView {
return &pullReqFileView{
PullReqID: view.PullReqID,
PrincipalID: view.PrincipalID,
Path: view.Path,
SHA: view.SHA,
Obsolete: view.Obsolete,
Created: view.Created,
Updated: view.Updated,
}
}
func mapToPullreqFileView(view *pullReqFileView) *types.PullReqFileView {
return &types.PullReqFileView{
PullReqID: view.PullReqID,
PrincipalID: view.PrincipalID,
Path: view.Path,
SHA: view.SHA,
Obsolete: view.Obsolete,
Created: view.Created,
Updated: view.Updated,
}
}
func mapToPullreqFileViews(views []*pullReqFileView) []*types.PullReqFileView {
m := make([]*types.PullReqFileView, len(views))
for i, view := range views {
m[i] = mapToPullreqFileView(view)
}
return m
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/gitspace_instance.go | app/store/database/gitspace_instance.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"database/sql"
"fmt"
"strings"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
var _ store.GitspaceInstanceStore = (*gitspaceInstanceStore)(nil)
const (
gitspaceInstanceInsertColumns = `
gits_gitspace_config_id,
gits_url,
gits_state,
gits_user_uid,
gits_resource_usage,
gits_space_id,
gits_created,
gits_updated,
gits_last_used,
gits_total_time_used,
gits_access_type,
gits_machine_user,
gits_uid,
gits_access_key_ref,
gits_last_heartbeat,
gits_active_time_started,
gits_active_time_ended,
gits_has_git_changes,
gits_error_message,
gits_ssh_command`
gitspaceInstanceSelectColumns = "gits_id," + gitspaceInstanceInsertColumns
gitspaceInstanceTable = `gitspaces`
)
type gitspaceInstance struct {
ID int64 `db:"gits_id"`
GitSpaceConfigID int64 `db:"gits_gitspace_config_id"`
URL null.String `db:"gits_url"`
SSHCommand null.String `db:"gits_ssh_command"`
State enum.GitspaceInstanceStateType `db:"gits_state"`
// TODO: migrate to principal int64 id to use principal cache and consistent with Harness code.
UserUID string `db:"gits_user_uid"`
ResourceUsage null.String `db:"gits_resource_usage"`
SpaceID int64 `db:"gits_space_id"`
LastUsed null.Int `db:"gits_last_used"`
TotalTimeUsed int64 `db:"gits_total_time_used"`
AccessType enum.GitspaceAccessType `db:"gits_access_type"`
AccessKeyRef null.String `db:"gits_access_key_ref"`
MachineUser null.String `db:"gits_machine_user"`
Identifier string `db:"gits_uid"`
Created int64 `db:"gits_created"`
Updated int64 `db:"gits_updated"`
LastHeartbeat null.Int `db:"gits_last_heartbeat"`
ActiveTimeStarted null.Int `db:"gits_active_time_started"`
ActiveTimeEnded null.Int `db:"gits_active_time_ended"`
HasGitChanges null.Bool `db:"gits_has_git_changes"`
ErrorMessage null.String `db:"gits_error_message"`
}
// NewGitspaceInstanceStore returns a new GitspaceInstanceStore.
func NewGitspaceInstanceStore(
db *sqlx.DB,
spaceIDCache store.SpaceIDCache,
) store.GitspaceInstanceStore {
return &gitspaceInstanceStore{
db: db,
spaceIDCache: spaceIDCache,
}
}
type gitspaceInstanceStore struct {
db *sqlx.DB
spaceIDCache store.SpaceIDCache
}
func (g gitspaceInstanceStore) FindTotalUsage(
ctx context.Context,
fromTime int64,
toTime int64,
spaceIDs []int64,
) (int64, error) {
var greatest = "MAX"
var least = "MIN"
if g.db.DriverName() == "postgres" {
greatest = "GREATEST"
least = "LEAST"
}
innerQuery := squirrel.Select(
greatest+"(gits_active_time_started, ?) AS effective_start_time",
least+"(COALESCE(gits_active_time_ended, ?), ?) AS effective_end_time",
).
From(gitspaceInstanceTable).
Where(
squirrel.And{
squirrel.Lt{"gits_active_time_started": toTime},
squirrel.Or{
squirrel.Expr("gits_active_time_ended IS NULL"),
squirrel.Gt{"gits_active_time_ended": fromTime},
},
squirrel.Eq{"gits_space_id": spaceIDs},
},
)
innerQry, innerArgs, err := innerQuery.ToSql()
if err != nil {
return 0, err
}
query := squirrel.
Select("SUM(effective_end_time - effective_start_time) AS total_active_time").
From("(" + innerQry + ") AS subquery").PlaceholderFormat(squirrel.Dollar)
qry, _, err := query.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
args := append([]any{fromTime, toTime, toTime}, innerArgs...)
var totalActiveTime sql.NullInt64
db := dbtx.GetAccessor(ctx, g.db)
err = db.GetContext(ctx, &totalActiveTime, qry, args...)
if err != nil {
return 0, err
}
if totalActiveTime.Valid {
return totalActiveTime.Int64, nil
}
return 0, nil
}
func (g gitspaceInstanceStore) Find(ctx context.Context, id int64) (*types.GitspaceInstance, error) {
stmt := database.Builder.
Select(gitspaceInstanceSelectColumns).
From(gitspaceInstanceTable).
Where("gits_id = ?", id)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
gitspace := new(gitspaceInstance)
db := dbtx.GetAccessor(ctx, g.db)
if err := db.GetContext(ctx, gitspace, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find gitspace %d", id)
}
return g.mapDBToGitspaceInstance(ctx, gitspace)
}
func (g gitspaceInstanceStore) FindByIdentifier(
ctx context.Context,
identifier string,
) (*types.GitspaceInstance, error) {
stmt := database.Builder.
Select(gitspaceInstanceSelectColumns).
From(gitspaceInstanceTable).
Where("gits_uid = ?", identifier)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
gitspace := new(gitspaceInstance)
db := dbtx.GetAccessor(ctx, g.db)
if err := db.GetContext(ctx, gitspace, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find gitspace %s", identifier)
}
return g.mapDBToGitspaceInstance(ctx, gitspace)
}
func (g gitspaceInstanceStore) Create(ctx context.Context, gitspaceInstance *types.GitspaceInstance) error {
stmt := database.Builder.
Insert(gitspaceInstanceTable).
Columns(gitspaceInstanceInsertColumns).
Values(
gitspaceInstance.GitSpaceConfigID,
gitspaceInstance.URL,
gitspaceInstance.State,
gitspaceInstance.UserID,
gitspaceInstance.ResourceUsage,
gitspaceInstance.SpaceID,
gitspaceInstance.Created,
gitspaceInstance.Updated,
gitspaceInstance.LastUsed,
gitspaceInstance.TotalTimeUsed,
gitspaceInstance.AccessType,
gitspaceInstance.MachineUser,
gitspaceInstance.Identifier,
gitspaceInstance.AccessKeyRef,
gitspaceInstance.LastHeartbeat,
gitspaceInstance.ActiveTimeStarted,
gitspaceInstance.ActiveTimeEnded,
gitspaceInstance.HasGitChanges,
gitspaceInstance.ErrorMessage,
gitspaceInstance.SSHCommand,
).
Suffix(ReturningClause + "gits_id")
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, g.db)
if err = db.QueryRowContext(ctx, sql, args...).Scan(&gitspaceInstance.ID); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "gitspace instance query failed for %s", gitspaceInstance.Identifier)
}
return nil
}
func (g gitspaceInstanceStore) Update(
ctx context.Context,
gitspaceInstance *types.GitspaceInstance,
) error {
validateActiveTimeDetails(gitspaceInstance)
stmt := database.Builder.
Update(gitspaceInstanceTable).
Set("gits_state", gitspaceInstance.State).
Set("gits_total_time_used", gitspaceInstance.TotalTimeUsed).
Set("gits_updated", gitspaceInstance.Updated).
Where("gits_id = ?", gitspaceInstance.ID)
// Conditionally set pointer fields
if gitspaceInstance.LastUsed != nil {
stmt = stmt.Set("gits_last_used", *gitspaceInstance.LastUsed)
}
if gitspaceInstance.LastHeartbeat != nil {
stmt = stmt.Set("gits_last_heartbeat", *gitspaceInstance.LastHeartbeat)
}
if gitspaceInstance.URL != nil {
stmt = stmt.Set("gits_url", *gitspaceInstance.URL)
}
if gitspaceInstance.ActiveTimeStarted != nil {
stmt = stmt.Set("gits_active_time_started", *gitspaceInstance.ActiveTimeStarted)
}
if gitspaceInstance.ActiveTimeEnded != nil {
stmt = stmt.Set("gits_active_time_ended", *gitspaceInstance.ActiveTimeEnded)
}
if gitspaceInstance.HasGitChanges != nil {
stmt = stmt.Set("gits_has_git_changes", *gitspaceInstance.HasGitChanges)
}
if gitspaceInstance.ErrorMessage != nil {
stmt = stmt.Set("gits_error_message", *gitspaceInstance.ErrorMessage)
}
if gitspaceInstance.SSHCommand != nil {
stmt = stmt.Set("gits_ssh_command", *gitspaceInstance.SSHCommand)
}
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, g.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "Failed to update gitspace instance for %s", gitspaceInstance.Identifier)
}
return nil
}
func (g gitspaceInstanceStore) FindLatestByGitspaceConfigID(
ctx context.Context,
gitspaceConfigID int64,
) (*types.GitspaceInstance, error) {
stmt := database.Builder.
Select(gitspaceInstanceSelectColumns).
From(gitspaceInstanceTable).
Where("gits_gitspace_config_id = ?", gitspaceConfigID).
OrderBy("gits_created DESC")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
gitspace := new(gitspaceInstance)
db := dbtx.GetAccessor(ctx, g.db)
if err := db.GetContext(ctx, gitspace, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(
ctx, err, "Failed to find latest gitspace instance for %d", gitspaceConfigID)
}
return g.mapDBToGitspaceInstance(ctx, gitspace)
}
func (g gitspaceInstanceStore) List(
ctx context.Context,
filter *types.GitspaceInstanceFilter,
) ([]*types.GitspaceInstance, error) {
stmt := database.Builder.
Select(gitspaceInstanceSelectColumns).
From(gitspaceInstanceTable).
OrderBy("gits_created ASC")
stmt = addGitspaceInstanceFilter(stmt, filter)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, g.db)
var dst []*gitspaceInstance
if err := db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing gitspace instance list query")
}
return g.mapToGitspaceInstances(ctx, dst)
}
func (g gitspaceInstanceStore) Count(ctx context.Context, filter *types.GitspaceInstanceFilter) (int64, error) {
db := dbtx.GetAccessor(ctx, g.db)
countStmt := database.Builder.
Select("COUNT(*)").
From(gitspaceInstanceTable)
countStmt = addGitspaceInstanceFilter(countStmt, filter)
sql, args, err := countStmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing custom count query")
}
return count, nil
}
func (g gitspaceInstanceStore) FindAllLatestByGitspaceConfigID(
ctx context.Context,
gitspaceConfigIDs []int64,
) ([]*types.GitspaceInstance, error) {
var whereClause = "(1=0)"
if len(gitspaceConfigIDs) > 0 {
whereClause = fmt.Sprintf("gits_gitspace_config_id IN (%s)",
strings.Trim(strings.Join(strings.Split(fmt.Sprint(gitspaceConfigIDs), " "), ","), "[]"))
}
baseSelect := squirrel.Select("*",
"ROW_NUMBER() OVER (PARTITION BY gits_gitspace_config_id "+
"ORDER BY gits_created DESC) AS rn").
From(gitspaceInstanceTable).
Where(whereClause)
// Use the base select query in a common table expression (CTE)
stmt := squirrel.Select(gitspaceInstanceSelectColumns).
FromSelect(baseSelect, "RankedRows").
Where("rn = 1")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, g.db)
var dst []*gitspaceInstance
if err := db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(
ctx, err, "Failed executing all latest gitspace instance list query")
}
return g.mapToGitspaceInstances(ctx, dst)
}
func addGitspaceInstanceFilter(
stmt squirrel.SelectBuilder,
filter *types.GitspaceInstanceFilter,
) squirrel.SelectBuilder {
if !filter.AllowAllSpaces {
stmt = stmt.Where(squirrel.Eq{"gits_space_id": filter.SpaceIDs})
}
if filter.UserIdentifier != "" {
stmt = stmt.Where(squirrel.Eq{"gits_user_id": filter.UserIdentifier})
}
if filter.LastHeartBeatBefore > 0 {
stmt = stmt.Where(squirrel.Lt{"gits_last_heartbeat": filter.LastHeartBeatBefore})
}
if filter.LastUsedBefore > 0 {
stmt = stmt.Where(squirrel.Lt{"gits_last_used": filter.LastUsedBefore})
}
if filter.LastUpdatedBefore > 0 {
stmt = stmt.Where(squirrel.Lt{"gits_updated": filter.LastUpdatedBefore})
}
if len(filter.States) > 0 {
stmt = stmt.Where(squirrel.Eq{"gits_state": filter.States})
}
if filter.Limit > 0 {
stmt = stmt.Limit(database.Limit(filter.Limit))
}
return stmt
}
func (g gitspaceInstanceStore) mapDBToGitspaceInstance(
ctx context.Context,
in *gitspaceInstance,
) (*types.GitspaceInstance, error) {
res := toGitspaceInstance(in)
spaceCore, err := g.spaceIDCache.Get(ctx, in.SpaceID)
if err != nil {
return nil, fmt.Errorf("couldn't set space path to the gitspace instance in DB: %d", in.SpaceID)
}
res.SpacePath = spaceCore.Path
return res, nil
}
func (g gitspaceInstanceStore) mapToGitspaceInstances(
ctx context.Context,
instances []*gitspaceInstance,
) ([]*types.GitspaceInstance, error) {
var err error
res := make([]*types.GitspaceInstance, len(instances))
for i := range instances {
res[i], err = g.mapDBToGitspaceInstance(ctx, instances[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func validateActiveTimeDetails(gitspaceInstance *types.GitspaceInstance) {
if (gitspaceInstance.State == enum.GitspaceInstanceStateStarting ||
gitspaceInstance.State == enum.GitspaceInstanceStateUninitialized) &&
(gitspaceInstance.ActiveTimeStarted != nil ||
gitspaceInstance.ActiveTimeEnded != nil ||
gitspaceInstance.TotalTimeUsed != 0) {
log.Warn().Msgf("instance has incorrect active time, details: identifier %s state %s active time start "+
"%d active time end %d total time used %d", gitspaceInstance.Identifier, gitspaceInstance.State,
gitspaceInstance.ActiveTimeStarted, gitspaceInstance.ActiveTimeEnded, gitspaceInstance.TotalTimeUsed)
}
if (gitspaceInstance.State == enum.GitspaceInstanceStateRunning) &&
(gitspaceInstance.ActiveTimeStarted == nil ||
gitspaceInstance.ActiveTimeEnded != nil ||
gitspaceInstance.TotalTimeUsed != 0) {
log.Warn().Msgf(
"instance is missing active time start or has incorrect end/total timestamps, details: "+
" identifier %s state %s active time start %d active time end %d total time used %d", // nolint:goconst
gitspaceInstance.Identifier, gitspaceInstance.State, gitspaceInstance.ActiveTimeStarted,
gitspaceInstance.ActiveTimeEnded, gitspaceInstance.TotalTimeUsed)
}
if (gitspaceInstance.State == enum.GitspaceInstanceStateDeleted ||
gitspaceInstance.State == enum.GitspaceInstanceStateStopping) &&
(gitspaceInstance.ActiveTimeStarted == nil ||
gitspaceInstance.ActiveTimeEnded == nil ||
gitspaceInstance.TotalTimeUsed == 0) {
log.Warn().Msgf("instance is missing active time start/end/total timestamp, details: "+
" identifier %s state %s active time start %d active time end %d total time used %d", // nolint:goconst
gitspaceInstance.Identifier, gitspaceInstance.State, gitspaceInstance.ActiveTimeStarted,
gitspaceInstance.ActiveTimeEnded, gitspaceInstance.TotalTimeUsed)
}
if gitspaceInstance.State == enum.GitspaceInstanceStateError &&
(gitspaceInstance.ActiveTimeStarted == nil) != (gitspaceInstance.ActiveTimeEnded == nil) {
log.Warn().Msgf("instance has incorrect active time start/end/total timestamp, details: "+
" identifier %s state %s active time start %d active time end %d total time used %d", // nolint:goconst
gitspaceInstance.Identifier, gitspaceInstance.State, gitspaceInstance.ActiveTimeStarted,
gitspaceInstance.ActiveTimeEnded, gitspaceInstance.TotalTimeUsed)
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/pullreq_reviewers.go | app/store/database/pullreq_reviewers.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
var _ store.PullReqReviewerStore = (*PullReqReviewerStore)(nil)
const maxPullRequestReviewers = 100
// NewPullReqReviewerStore returns a new PullReqReviewerStore.
func NewPullReqReviewerStore(db *sqlx.DB,
pCache store.PrincipalInfoCache) *PullReqReviewerStore {
return &PullReqReviewerStore{
db: db,
pCache: pCache,
}
}
// PullReqReviewerStore implements store.PullReqReviewerStore backed by a relational database.
type PullReqReviewerStore struct {
db *sqlx.DB
pCache store.PrincipalInfoCache
}
// pullReqReviewer is used to fetch pull request reviewer data from the database.
type pullReqReviewer struct {
PullReqID int64 `db:"pullreq_reviewer_pullreq_id"`
PrincipalID int64 `db:"pullreq_reviewer_principal_id"`
CreatedBy int64 `db:"pullreq_reviewer_created_by"`
Created int64 `db:"pullreq_reviewer_created"`
Updated int64 `db:"pullreq_reviewer_updated"`
RepoID int64 `db:"pullreq_reviewer_repo_id"`
Type enum.PullReqReviewerType `db:"pullreq_reviewer_type"`
LatestReviewID null.Int `db:"pullreq_reviewer_latest_review_id"`
ReviewDecision enum.PullReqReviewDecision `db:"pullreq_reviewer_review_decision"`
SHA string `db:"pullreq_reviewer_sha"`
}
const (
pullreqReviewerColumns = `
pullreq_reviewer_pullreq_id
,pullreq_reviewer_principal_id
,pullreq_reviewer_created_by
,pullreq_reviewer_created
,pullreq_reviewer_updated
,pullreq_reviewer_repo_id
,pullreq_reviewer_type
,pullreq_reviewer_latest_review_id
,pullreq_reviewer_review_decision
,pullreq_reviewer_sha`
pullreqReviewerSelectBase = `
SELECT` + pullreqReviewerColumns + `
FROM pullreq_reviewers`
)
// Find finds the pull request reviewer by pull request id and principal id.
func (s *PullReqReviewerStore) Find(ctx context.Context, prID, principalID int64) (*types.PullReqReviewer, error) {
const sqlQuery = pullreqReviewerSelectBase + `
WHERE pullreq_reviewer_pullreq_id = $1 AND pullreq_reviewer_principal_id = $2`
db := dbtx.GetAccessor(ctx, s.db)
dst := &pullReqReviewer{}
if err := db.GetContext(ctx, dst, sqlQuery, prID, principalID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find pull request reviewer")
}
return s.mapPullReqReviewer(ctx, dst), nil
}
// Create creates a new pull request reviewer.
func (s *PullReqReviewerStore) Create(ctx context.Context, v *types.PullReqReviewer) error {
const sqlQuery = `
INSERT INTO pullreq_reviewers (
pullreq_reviewer_pullreq_id
,pullreq_reviewer_principal_id
,pullreq_reviewer_created_by
,pullreq_reviewer_created
,pullreq_reviewer_updated
,pullreq_reviewer_repo_id
,pullreq_reviewer_type
,pullreq_reviewer_latest_review_id
,pullreq_reviewer_review_decision
,pullreq_reviewer_sha
) values (
:pullreq_reviewer_pullreq_id
,:pullreq_reviewer_principal_id
,:pullreq_reviewer_created_by
,:pullreq_reviewer_created
,:pullreq_reviewer_updated
,:pullreq_reviewer_repo_id
,:pullreq_reviewer_type
,:pullreq_reviewer_latest_review_id
,:pullreq_reviewer_review_decision
,:pullreq_reviewer_sha
)`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, mapInternalPullReqReviewer(v))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind pull request reviewer object")
}
if _, err = db.ExecContext(ctx, query, arg...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to insert pull request reviewer")
}
return nil
}
// Update updates the pull request reviewer.
func (s *PullReqReviewerStore) Update(ctx context.Context, v *types.PullReqReviewer) error {
const sqlQuery = `
UPDATE pullreq_reviewers
SET
pullreq_reviewer_updated = :pullreq_reviewer_updated
,pullreq_reviewer_latest_review_id = :pullreq_reviewer_latest_review_id
,pullreq_reviewer_review_decision = :pullreq_reviewer_review_decision
,pullreq_reviewer_sha = :pullreq_reviewer_sha
WHERE pullreq_reviewer_pullreq_id = :pullreq_reviewer_pullreq_id AND
pullreq_reviewer_principal_id = :pullreq_reviewer_principal_id`
db := dbtx.GetAccessor(ctx, s.db)
updatedAt := time.Now()
dbv := mapInternalPullReqReviewer(v)
dbv.Updated = updatedAt.UnixMilli()
query, arg, err := db.BindNamed(sqlQuery, dbv)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind pull request activity object")
}
_, err = db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update pull request activity")
}
v.Updated = dbv.Updated
return nil
}
// Delete deletes the pull request reviewer.
func (s *PullReqReviewerStore) Delete(ctx context.Context, prID, reviewerID int64) error {
const sqlQuery = `
DELETE from pullreq_reviewers
WHERE pullreq_reviewer_pullreq_id = $1 AND
pullreq_reviewer_principal_id = $2`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sqlQuery, prID, reviewerID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "delete reviewer query failed")
}
return nil
}
// List returns a list of pull reviewers for a pull request.
func (s *PullReqReviewerStore) List(ctx context.Context, prID int64) ([]*types.PullReqReviewer, error) {
stmt := database.Builder.
Select(pullreqReviewerColumns).
From("pullreq_reviewers").
Where("pullreq_reviewer_pullreq_id = ?", prID).
OrderBy("pullreq_reviewer_created asc").
Limit(maxPullRequestReviewers) // memory safety limit
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert pull request reviewer list query to sql")
}
dst := make([]*pullReqReviewer, 0)
db := dbtx.GetAccessor(ctx, s.db)
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing pull request reviewer list query")
}
result, err := s.mapSlicePullReqReviewer(ctx, dst)
if err != nil {
return nil, err
}
return result, nil
}
func mapPullReqReviewer(v *pullReqReviewer) *types.PullReqReviewer {
m := &types.PullReqReviewer{
PullReqID: v.PullReqID,
PrincipalID: v.PrincipalID,
CreatedBy: v.CreatedBy,
Created: v.Created,
Updated: v.Updated,
RepoID: v.RepoID,
Type: v.Type,
LatestReviewID: v.LatestReviewID.Ptr(),
ReviewDecision: v.ReviewDecision,
SHA: v.SHA,
}
return m
}
func mapInternalPullReqReviewer(v *types.PullReqReviewer) *pullReqReviewer {
m := &pullReqReviewer{
PullReqID: v.PullReqID,
PrincipalID: v.PrincipalID,
CreatedBy: v.CreatedBy,
Created: v.Created,
Updated: v.Updated,
RepoID: v.RepoID,
Type: v.Type,
LatestReviewID: null.IntFromPtr(v.LatestReviewID),
ReviewDecision: v.ReviewDecision,
SHA: v.SHA,
}
return m
}
func (s *PullReqReviewerStore) mapPullReqReviewer(ctx context.Context, v *pullReqReviewer) *types.PullReqReviewer {
m := &types.PullReqReviewer{
PullReqID: v.PullReqID,
PrincipalID: v.PrincipalID,
CreatedBy: v.CreatedBy,
Created: v.Created,
Updated: v.Updated,
RepoID: v.RepoID,
Type: v.Type,
LatestReviewID: v.LatestReviewID.Ptr(),
ReviewDecision: v.ReviewDecision,
SHA: v.SHA,
}
addedBy, err := s.pCache.Get(ctx, v.CreatedBy)
if err != nil {
log.Ctx(ctx).Err(err).Msg("failed to load PR reviewer addedBy")
}
if addedBy != nil {
m.AddedBy = *addedBy
}
reviewer, err := s.pCache.Get(ctx, v.PrincipalID)
if err != nil {
log.Ctx(ctx).Err(err).Msg("failed to load PR reviewer principal")
}
if reviewer != nil {
m.Reviewer = *reviewer
}
return m
}
func (s *PullReqReviewerStore) mapSlicePullReqReviewer(ctx context.Context,
reviewers []*pullReqReviewer) ([]*types.PullReqReviewer, error) {
// collect all principal IDs
ids := make([]int64, 0, 2*len(reviewers))
for _, v := range reviewers {
ids = append(ids, v.CreatedBy)
ids = append(ids, v.PrincipalID)
}
// pull principal infos from cache
infoMap, err := s.pCache.Map(ctx, ids)
if err != nil {
return nil, fmt.Errorf("failed to load PR principal infos: %w", err)
}
// attach the principal infos back to the slice items
m := make([]*types.PullReqReviewer, len(reviewers))
for i, v := range reviewers {
m[i] = mapPullReqReviewer(v)
if addedBy, ok := infoMap[v.CreatedBy]; ok {
m[i].AddedBy = *addedBy
}
if reviewer, ok := infoMap[v.PrincipalID]; ok {
m[i].Reviewer = *reviewer
}
}
return m, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/cde_gateway.go | app/store/database/cde_gateway.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
var _ store.CDEGatewayStore = (*CDEGatewayStore)(nil)
const (
cdeGatewayIDColumn = `cgate_id`
cdeGatewayInsertColumns = `
cgate_name,
cgate_group_name,
cgate_space_id,
cgate_infra_provider_config_id,
cgate_region,
cgate_zone,
cgate_version,
cgate_health,
cgate_envoy_health,
cgate_created,
cgate_updated
`
cdeGatewaySelectColumns = cdeGatewayIDColumn + "," + cdeGatewayInsertColumns
cdeGatewayTable = `cde_gateways`
)
// NewCDEGatewayStore returns a new CDEGatewayStore.
func NewCDEGatewayStore(db *sqlx.DB) *CDEGatewayStore {
return &CDEGatewayStore{
db: db,
}
}
// CDEGatewayStore implements store.CDEGatewayStore backed by a relational database.
type CDEGatewayStore struct {
db *sqlx.DB
}
func (c *CDEGatewayStore) Upsert(ctx context.Context, in *types.CDEGateway) error {
stmt := database.Builder.
Insert(cdeGatewayTable).
Columns(cdeGatewayInsertColumns).
Values(
in.Name,
in.GroupName,
in.SpaceID,
in.InfraProviderConfigID,
in.Region,
in.Zone,
in.Version,
in.Health,
in.EnvoyHealth,
in.Created,
in.Updated).
Suffix(`
ON CONFLICT (cgate_space_id, cgate_infra_provider_config_id, cgate_region, cgate_group_name, cgate_name)
DO UPDATE
SET
cgate_health = EXCLUDED.cgate_health,
cgate_envoy_health = EXCLUDED.cgate_envoy_health,
cgate_updated = EXCLUDED.cgate_updated,
cgate_zone = EXCLUDED.cgate_zone,
cgate_version = EXCLUDED.cgate_version`)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, c.db)
if _, err = db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "cde gateway upsert create query failed for %s", in.Name)
}
return nil
}
func (c *CDEGatewayStore) List(ctx context.Context, filter *types.CDEGatewayFilter) ([]*types.CDEGateway, error) {
stmt := database.Builder.
Select(cdeGatewaySelectColumns).
From(cdeGatewayTable)
if filter != nil && len(filter.InfraProviderConfigIDs) > 0 {
stmt = stmt.Where(squirrel.Eq{"cgate_infra_provider_config_id": filter.InfraProviderConfigIDs})
}
if filter != nil && filter.Health == types.GatewayHealthHealthy {
stmt = stmt.Where(squirrel.Eq{"cgate_health": filter.Health}).
Where(squirrel.Eq{"cgate_envoy_health": filter.Health}).
Where(squirrel.Gt{"cgate_updated": time.Now().Add(
-time.Duration(filter.HealthReportValidityInMins) * time.Minute).UnixMilli()})
}
if filter != nil && filter.Health == types.GatewayHealthUnhealthy {
stmt = stmt.Where(
squirrel.Or{
squirrel.LtOrEq{"cgate_updated": time.Now().Add(
time.Minute * -time.Duration(filter.HealthReportValidityInMins)).UnixMilli()},
squirrel.Eq{"cgate_envoy_health": filter.Health},
},
)
}
if filter != nil && filter.IsLatest {
subQuery := squirrel.
Select("MAX(cgate_updated) AS max_updated", "cgate_region AS max_region").
From(cdeGatewayTable).
GroupBy("cgate_region")
subQuerySQL, subQueryArgs, err := subQuery.ToSql()
if err != nil {
return nil, errors.Wrap(err, "failed to build subquery for latest entries")
}
stmt = stmt.JoinClause(fmt.Sprintf(
"JOIN (%s) AS latest ON latest.max_region = cgate_region AND latest.max_updated = cgate_updated", subQuerySQL),
subQueryArgs...,
)
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, c.db)
dst := new([]*cdeGateway)
if err := db.SelectContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to list cde gatways")
}
return entitiesToDTOs(*dst), nil
}
type cdeGateway struct {
ID int64 `db:"cgate_id"`
Name string `db:"cgate_name"`
GroupName string `db:"cgate_group_name"`
SpaceID int64 `db:"cgate_space_id"`
InfraProviderConfigID int64 `db:"cgate_infra_provider_config_id"`
Region string `db:"cgate_region"`
Zone string `db:"cgate_zone"`
Version string `db:"cgate_version"`
Health string `db:"cgate_health"`
EnvoyHealth string `db:"cgate_envoy_health"`
Created int64 `db:"cgate_created"`
Updated int64 `db:"cgate_updated"`
}
func entitiesToDTOs(entities []*cdeGateway) []*types.CDEGateway {
var dtos []*types.CDEGateway
for _, entity := range entities {
dtos = append(dtos, entityToDTO(*entity))
}
return dtos
}
func entityToDTO(entity cdeGateway) *types.CDEGateway {
dto := &types.CDEGateway{}
dto.Name = entity.Name
dto.GroupName = entity.GroupName
dto.SpaceID = entity.SpaceID
dto.InfraProviderConfigID = entity.InfraProviderConfigID
dto.Region = entity.Region
dto.Zone = entity.Zone
dto.Version = entity.Version
dto.Health = entity.Health
dto.EnvoyHealth = entity.EnvoyHealth
dto.Created = entity.Created
dto.Updated = entity.Updated
return dto
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/job.go | app/store/database/job.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"database/sql"
"errors"
"fmt"
"time"
"github.com/harness/gitness/job"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types/enum"
"github.com/jmoiron/sqlx"
)
var _ job.Store = (*JobStore)(nil)
func NewJobStore(db *sqlx.DB) *JobStore {
return &JobStore{
db: db,
}
}
type JobStore struct {
db *sqlx.DB
}
const (
jobColumns = `
job_uid
,job_created
,job_updated
,job_type
,job_priority
,job_data
,job_result
,job_max_duration_seconds
,job_max_retries
,job_state
,job_scheduled
,job_total_executions
,job_run_by
,job_run_deadline
,job_run_progress
,job_last_executed
,job_is_recurring
,job_recurring_cron
,job_consecutive_failures
,job_last_failure_error
,job_group_id`
jobSelectBase = `
SELECT` + jobColumns + `
FROM jobs`
)
// Find fetches a job by its unique identifier.
func (s *JobStore) Find(ctx context.Context, uid string) (*job.Job, error) {
const sqlQuery = jobSelectBase + `
WHERE job_uid = $1`
db := dbtx.GetAccessor(ctx, s.db)
result := &job.Job{}
if err := db.GetContext(ctx, result, sqlQuery, uid); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find job by uid")
}
return result, nil
}
// DeleteByGroupID deletes all jobs for a group id.
func (s *JobStore) DeleteByGroupID(ctx context.Context, groupID string) (int64, error) {
stmt := database.Builder.
Delete("jobs").
Where("(job_group_id = ?)", groupID)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert delete by group id jobs query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
result, err := db.ExecContext(ctx, sql, args...)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to execute delete jobs by group id query")
}
n, err := result.RowsAffected()
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to get number of deleted jobs in group")
}
return n, nil
}
// ListByGroupID fetches all jobs for a group id.
func (s *JobStore) ListByGroupID(ctx context.Context, groupID string) ([]*job.Job, error) {
const sqlQuery = jobSelectBase + `
WHERE job_group_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := make([]*job.Job, 0)
if err := db.SelectContext(ctx, &dst, sqlQuery, groupID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find job by group id")
}
return dst, nil
}
// Create creates a new job.
func (s *JobStore) Create(ctx context.Context, job *job.Job) error {
const sqlQuery = `
INSERT INTO jobs (` + jobColumns + `
) VALUES (
:job_uid
,:job_created
,:job_updated
,:job_type
,:job_priority
,:job_data
,:job_result
,:job_max_duration_seconds
,:job_max_retries
,:job_state
,:job_scheduled
,:job_total_executions
,:job_run_by
,:job_run_deadline
,:job_run_progress
,:job_last_executed
,:job_is_recurring
,:job_recurring_cron
,:job_consecutive_failures
,:job_last_failure_error
,:job_group_id
)`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, job)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind job object")
}
if _, err := db.ExecContext(ctx, query, arg...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert query failed")
}
return nil
}
// Upsert creates or updates a job. If the job didn't exist it will insert it in the database,
// otherwise it will update it but only if its definition has changed.
func (s *JobStore) Upsert(ctx context.Context, job *job.Job) error {
const sqlQuery = `
INSERT INTO jobs (` + jobColumns + `
) VALUES (
:job_uid
,:job_created
,:job_updated
,:job_type
,:job_priority
,:job_data
,:job_result
,:job_max_duration_seconds
,:job_max_retries
,:job_state
,:job_scheduled
,:job_total_executions
,:job_run_by
,:job_run_deadline
,:job_run_progress
,:job_last_executed
,:job_is_recurring
,:job_recurring_cron
,:job_consecutive_failures
,:job_last_failure_error
,:job_group_id
)
ON CONFLICT (job_uid) DO
UPDATE SET
job_updated = :job_updated
,job_type = :job_type
,job_priority = :job_priority
,job_data = :job_data
,job_result = :job_result
,job_max_duration_seconds = :job_max_duration_seconds
,job_max_retries = :job_max_retries
,job_state = :job_state
,job_scheduled = :job_scheduled
,job_is_recurring = :job_is_recurring
,job_recurring_cron = :job_recurring_cron
WHERE
jobs.job_type <> :job_type OR
jobs.job_priority <> :job_priority OR
jobs.job_data <> :job_data OR
jobs.job_max_duration_seconds <> :job_max_duration_seconds OR
jobs.job_max_retries <> :job_max_retries OR
jobs.job_is_recurring <> :job_is_recurring OR
jobs.job_recurring_cron <> :job_recurring_cron`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, job)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind job object")
}
if _, err := db.ExecContext(ctx, query, arg...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Upsert query failed")
}
return nil
}
// UpdateDefinition is used to update a job definition.
func (s *JobStore) UpdateDefinition(ctx context.Context, job *job.Job) error {
const sqlQuery = `
UPDATE jobs
SET
job_updated = :job_updated
,job_type = :job_type
,job_priority = :job_priority
,job_data = :job_data
,job_result = :job_result
,job_max_duration_seconds = :job_max_duration_seconds
,job_max_retries = :job_max_retries
,job_state = :job_state
,job_scheduled = :job_scheduled
,job_is_recurring = :job_is_recurring
,job_recurring_cron = :job_recurring_cron
,job_group_id = :job_group_id
WHERE job_uid = :job_uid`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, job)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind job object for update")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update job definition")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrResourceNotFound
}
return nil
}
// UpdateExecution is used to update a job before and after execution.
func (s *JobStore) UpdateExecution(ctx context.Context, job *job.Job) error {
const sqlQuery = `
UPDATE jobs
SET
job_updated = :job_updated
,job_result = :job_result
,job_state = :job_state
,job_scheduled = :job_scheduled
,job_total_executions = :job_total_executions
,job_run_by = :job_run_by
,job_run_deadline = :job_run_deadline
,job_last_executed = :job_last_executed
,job_consecutive_failures = :job_consecutive_failures
,job_last_failure_error = :job_last_failure_error
WHERE job_uid = :job_uid`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, job)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind job object for update")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update job execution")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrResourceNotFound
}
return nil
}
func (s *JobStore) UpdateProgress(ctx context.Context, job *job.Job) error {
const sqlQuery = `
UPDATE jobs
SET
job_updated = :job_updated
,job_result = :job_result
,job_run_progress = :job_run_progress
WHERE job_uid = :job_uid AND job_state = 'running'`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, job)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind job object for update")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update job progress")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrResourceNotFound
}
return nil
}
// CountRunning returns number of jobs that are currently being run.
func (s *JobStore) CountRunning(ctx context.Context) (int, error) {
stmt := database.Builder.
Select("count(*)").
From("jobs").
Where("job_state = ?", enum.JobStateRunning)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert count running jobs query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed executing count running jobs query")
}
return int(count), nil
}
// ListReady returns a list of jobs that are ready for execution:
// The jobs with state="scheduled" and scheduled time in the past.
func (s *JobStore) ListReady(ctx context.Context, now time.Time, limit int) ([]*job.Job, error) {
stmt := database.Builder.
Select(jobColumns).
From("jobs").
Where("job_state = ?", enum.JobStateScheduled).
Where("job_scheduled <= ?", now.UnixMilli()).
OrderBy("job_priority desc, job_scheduled asc, job_uid asc").
Limit(uint64(limit)) //nolint:gosec
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert list scheduled jobs query to sql: %w", err)
}
result := make([]*job.Job, 0)
db := dbtx.GetAccessor(ctx, s.db)
if err = db.SelectContext(ctx, &result, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to execute list scheduled jobs query")
}
return result, nil
}
// ListDeadlineExceeded returns a list of jobs that have exceeded their execution deadline.
func (s *JobStore) ListDeadlineExceeded(ctx context.Context, now time.Time) ([]*job.Job, error) {
stmt := database.Builder.
Select(jobColumns).
From("jobs").
Where("job_state = ?", enum.JobStateRunning).
Where("job_run_deadline < ?", now.UnixMilli()).
OrderBy("job_run_deadline asc")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert list overdue jobs query to sql: %w", err)
}
result := make([]*job.Job, 0)
db := dbtx.GetAccessor(ctx, s.db)
if err = db.SelectContext(ctx, &result, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to execute list overdue jobs query")
}
return result, nil
}
// NextScheduledTime returns a scheduled time of the next ready job or zero time if no such job exists.
func (s *JobStore) NextScheduledTime(ctx context.Context, now time.Time) (time.Time, error) {
stmt := database.Builder.
Select("job_scheduled").
From("jobs").
Where("job_state = ?", enum.JobStateScheduled).
Where("job_scheduled > ?", now.UnixMilli()).
OrderBy("job_scheduled asc").
Limit(1)
query, args, err := stmt.ToSql()
if err != nil {
return time.Time{}, fmt.Errorf("failed to convert next scheduled time query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
var result int64
err = db.QueryRowContext(ctx, query, args...).Scan(&result)
if errors.Is(err, sql.ErrNoRows) {
return time.Time{}, nil
}
if err != nil {
return time.Time{}, database.ProcessSQLErrorf(ctx, err, "failed to execute next scheduled time query")
}
return time.UnixMilli(result), nil
}
// DeleteOld removes non-recurring jobs that have finished execution or have failed.
func (s *JobStore) DeleteOld(ctx context.Context, olderThan time.Time) (int64, error) {
stmt := database.Builder.
Delete("jobs").
Where("(job_state = ? OR job_state = ? OR job_state = ?)",
enum.JobStateFinished, enum.JobStateFailed, enum.JobStateCanceled).
Where("job_is_recurring = false").
Where("job_last_executed < ?", olderThan.UnixMilli())
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert delete done jobs query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
result, err := db.ExecContext(ctx, sql, args...)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to execute delete done jobs query")
}
n, err := result.RowsAffected()
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to get number of deleted jobs")
}
return n, nil
}
// DeleteByID deletes a job by its unique identifier.
func (s *JobStore) DeleteByUID(ctx context.Context, jobUID string) error {
stmt := database.Builder.
Delete("jobs").
Where("job_uid = ?", jobUID)
sql, args, err := stmt.ToSql()
if err != nil {
return fmt.Errorf("failed to convert delete job query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
_, err = db.ExecContext(ctx, sql, args...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to execute delete job query")
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/stage_map.go | app/store/database/stage_map.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
sqlxtypes "github.com/jmoiron/sqlx/types"
"github.com/pkg/errors"
)
type nullstep struct {
ID sql.NullInt64 `db:"step_id"`
StageID sql.NullInt64 `db:"step_stage_id"`
Number sql.NullInt64 `db:"step_number"`
ParentGroupID sql.NullInt64 `db:"step_parent_group_id"`
Name sql.NullString `db:"step_name"`
Status sql.NullString `db:"step_status"`
Error sql.NullString `db:"step_error"`
ErrIgnore sql.NullBool `db:"step_errignore"`
ExitCode sql.NullInt64 `db:"step_exit_code"`
Started sql.NullInt64 `db:"step_started"`
Stopped sql.NullInt64 `db:"step_stopped"`
Version sql.NullInt64 `db:"step_version"`
DependsOn sqlxtypes.JSONText `db:"step_depends_on"`
Image sql.NullString `db:"step_image"`
Detached sql.NullBool `db:"step_detached"`
Schema sql.NullString `db:"step_schema"`
}
// used for join operations where fields may be null.
func convertFromNullStep(nullstep *nullstep) (*types.Step, error) {
var dependsOn []string
err := json.Unmarshal(nullstep.DependsOn, &dependsOn)
if err != nil {
return nil, fmt.Errorf("could not unmarshal step.depends_on: %w", err)
}
return &types.Step{
ID: nullstep.ID.Int64,
StageID: nullstep.StageID.Int64,
Number: nullstep.Number.Int64,
Name: nullstep.Name.String,
Status: enum.ParseCIStatus(nullstep.Status.String),
Error: nullstep.Error.String,
ErrIgnore: nullstep.ErrIgnore.Bool,
ExitCode: int(nullstep.ExitCode.Int64),
Started: nullstep.Started.Int64,
Stopped: nullstep.Stopped.Int64,
Version: nullstep.Version.Int64,
DependsOn: dependsOn,
Image: nullstep.Image.String,
Detached: nullstep.Detached.Bool,
Schema: nullstep.Schema.String,
}, nil
}
func mapInternalToStage(in *stage) (*types.Stage, error) {
var dependsOn []string
err := json.Unmarshal(in.DependsOn, &dependsOn)
if err != nil {
return nil, errors.Wrap(err, "could not unmarshal stage.depends_on")
}
var labels map[string]string
err = json.Unmarshal(in.Labels, &labels)
if err != nil {
return nil, errors.Wrap(err, "could not unmarshal stage.labels")
}
return &types.Stage{
ID: in.ID,
ExecutionID: in.ExecutionID,
RepoID: in.RepoID,
Number: in.Number,
Name: in.Name,
Kind: in.Kind,
Type: in.Type,
Status: in.Status,
Error: in.Error,
ErrIgnore: in.ErrIgnore,
ExitCode: in.ExitCode,
Machine: in.Machine,
OS: in.OS,
Arch: in.Arch,
Variant: in.Variant,
Kernel: in.Kernel,
Limit: in.Limit,
LimitRepo: in.LimitRepo,
Started: in.Started,
Stopped: in.Stopped,
Created: in.Created,
Updated: in.Updated,
Version: in.Version,
OnSuccess: in.OnSuccess,
OnFailure: in.OnFailure,
DependsOn: dependsOn,
Labels: labels,
}, nil
}
func mapStageToInternal(in *types.Stage) *stage {
return &stage{
ID: in.ID,
ExecutionID: in.ExecutionID,
RepoID: in.RepoID,
Number: in.Number,
Name: in.Name,
Kind: in.Kind,
Type: in.Type,
Status: in.Status,
Error: in.Error,
ErrIgnore: in.ErrIgnore,
ExitCode: in.ExitCode,
Machine: in.Machine,
OS: in.OS,
Arch: in.Arch,
Variant: in.Variant,
Kernel: in.Kernel,
Limit: in.Limit,
LimitRepo: in.LimitRepo,
Started: in.Started,
Stopped: in.Stopped,
Created: in.Created,
Updated: in.Updated,
Version: in.Version,
OnSuccess: in.OnSuccess,
OnFailure: in.OnFailure,
DependsOn: EncodeToSQLXJSON(in.DependsOn),
Labels: EncodeToSQLXJSON(in.Labels),
}
}
func mapInternalToStageList(in []*stage) ([]*types.Stage, error) {
stages := make([]*types.Stage, len(in))
for i, k := range in {
s, err := mapInternalToStage(k)
if err != nil {
return nil, err
}
stages[i] = s
}
return stages, nil
}
// helper function scans the sql.Row and copies the column
// values to the destination object.
func scanRowsWithSteps(rows *sql.Rows) ([]*types.Stage, error) {
defer rows.Close()
stages := []*types.Stage{}
var curr *types.Stage
for rows.Next() {
stage := new(types.Stage)
step := new(nullstep)
err := scanRowStep(rows, stage, step)
if err != nil {
return nil, err
}
if curr == nil || curr.ID != stage.ID {
curr = stage
stages = append(stages, curr)
}
if step.ID.Valid {
convertedStep, err := convertFromNullStep(step)
if err != nil {
return nil, err
}
curr.Steps = append(curr.Steps, convertedStep)
}
}
return stages, nil
}
// helper function scans the sql.Row and copies the column
// values to the destination object.
func scanRowStep(rows *sql.Rows, stage *types.Stage, step *nullstep) error {
depJSON := sqlxtypes.JSONText{}
labJSON := sqlxtypes.JSONText{}
stepDepJSON := sqlxtypes.JSONText{}
err := rows.Scan(
&stage.ID,
&stage.ExecutionID,
&stage.RepoID,
&stage.Number,
&stage.Name,
&stage.Kind,
&stage.Type,
&stage.Status,
&stage.Error,
&stage.ErrIgnore,
&stage.ExitCode,
&stage.Machine,
&stage.OS,
&stage.Arch,
&stage.Variant,
&stage.Kernel,
&stage.Limit,
&stage.LimitRepo,
&stage.Started,
&stage.Stopped,
&stage.Created,
&stage.Updated,
&stage.Version,
&stage.OnSuccess,
&stage.OnFailure,
&depJSON,
&labJSON,
&step.ID,
&step.StageID,
&step.Number,
&step.Name,
&step.Status,
&step.Error,
&step.ErrIgnore,
&step.ExitCode,
&step.Started,
&step.Stopped,
&step.Version,
&stepDepJSON,
&step.Image,
&step.Detached,
&step.Schema,
)
if err != nil {
return fmt.Errorf("failed to scan row: %w", err)
}
err = json.Unmarshal(depJSON, &stage.DependsOn)
if err != nil {
return fmt.Errorf("failed to unmarshal depJSON: %w", err)
}
err = json.Unmarshal(labJSON, &stage.Labels)
if err != nil {
return fmt.Errorf("failed to unmarshal labJSON: %w", err)
}
if step.ID.Valid {
// try to unmarshal step dependencies if step exists
err = json.Unmarshal(stepDepJSON, &step.DependsOn)
if err != nil {
return fmt.Errorf("failed to unmarshal stepDepJSON: %w", err)
}
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/gitspace_config.go | app/store/database/gitspace_config.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"strings"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
const (
gitspaceConfigsTable = `gitspace_configs`
gitspaceConfigInsertColumns = `
gconf_uid,
gconf_display_name,
gconf_ide,
gconf_infra_provider_resource_id,
gconf_code_auth_type,
gconf_code_auth_id,
gconf_code_repo_type,
gconf_code_repo_is_private,
gconf_code_repo_url,
gconf_devcontainer_path,
gconf_branch,
gconf_user_uid,
gconf_space_id,
gconf_created,
gconf_updated,
gconf_is_deleted,
gconf_code_repo_ref,
gconf_ssh_token_identifier,
gconf_created_by,
gconf_is_marked_for_deletion,
gconf_is_marked_for_reset,
gconf_is_marked_for_infra_reset,
gconf_ai_agents`
ReturningClause = "RETURNING "
gitspaceConfigSelectColumns = "gconf_id," + gitspaceConfigInsertColumns
)
type gitspaceConfig struct {
ID int64 `db:"gconf_id"`
Identifier string `db:"gconf_uid"`
Name string `db:"gconf_display_name"`
IDE enum.IDEType `db:"gconf_ide"`
InfraProviderResourceID int64 `db:"gconf_infra_provider_resource_id"`
CodeAuthType string `db:"gconf_code_auth_type"`
CodeRepoRef null.String `db:"gconf_code_repo_ref"`
CodeAuthID string `db:"gconf_code_auth_id"`
CodeRepoType enum.GitspaceCodeRepoType `db:"gconf_code_repo_type"`
CodeRepoIsPrivate bool `db:"gconf_code_repo_is_private"`
CodeRepoURL string `db:"gconf_code_repo_url"`
DevcontainerPath null.String `db:"gconf_devcontainer_path"`
Branch string `db:"gconf_branch"`
// TODO: migrate to principal int64 id to use principal cache and consistent with Harness code.
UserUID string `db:"gconf_user_uid"`
SpaceID int64 `db:"gconf_space_id"`
Created int64 `db:"gconf_created"`
Updated int64 `db:"gconf_updated"`
IsDeleted bool `db:"gconf_is_deleted"`
SSHTokenIdentifier string `db:"gconf_ssh_token_identifier"`
CreatedBy null.Int `db:"gconf_created_by"`
IsMarkedForDeletion bool `db:"gconf_is_marked_for_deletion"`
IsMarkedForReset bool `db:"gconf_is_marked_for_reset"`
IsMarkedForInfraReset bool `db:"gconf_is_marked_for_infra_reset"`
AIAgents null.String `db:"gconf_ai_agents"`
}
type gitspaceConfigWithLatestInstance struct {
gitspaceConfig
// gitspace instance information
gitspaceInstance
}
var _ store.GitspaceConfigStore = (*gitspaceConfigStore)(nil)
// NewGitspaceConfigStore returns a new GitspaceConfigStore.
func NewGitspaceConfigStore(
db *sqlx.DB,
pCache store.PrincipalInfoCache,
rCache store.InfraProviderResourceCache,
spaceIDCache store.SpaceIDCache,
) store.GitspaceConfigStore {
return &gitspaceConfigStore{
db: db,
pCache: pCache,
rCache: rCache,
spaceIDCache: spaceIDCache,
}
}
type gitspaceConfigStore struct {
db *sqlx.DB
pCache store.PrincipalInfoCache
rCache store.InfraProviderResourceCache
spaceIDCache store.SpaceIDCache
}
func (s gitspaceConfigStore) Count(ctx context.Context, filter *types.GitspaceFilter) (int64, error) {
db := dbtx.GetAccessor(ctx, s.db)
gitsSelectStr := getLatestInstanceQuery()
countStmt := squirrel.Select("COUNT(*)").
From(gitspaceConfigsTable).
LeftJoin("(" + gitsSelectStr +
") AS gits ON gitspace_configs.gconf_id = gits.gits_gitspace_config_id AND gits.rn = 1").
PlaceholderFormat(squirrel.Dollar)
countStmt = addGitspaceFilter(countStmt, filter)
countStmt = addGitspaceQueryFilter(countStmt, filter.QueryFilter)
sql, args, err := countStmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing custom count query")
}
return count, nil
}
func (s gitspaceConfigStore) Find(ctx context.Context, id int64, includeDeleted bool) (*types.GitspaceConfig, error) {
stmt := database.Builder.
Select(gitspaceConfigSelectColumns).
From(gitspaceConfigsTable).
Where("gconf_id = ?", id) //nolint:goconst
if !includeDeleted {
stmt = stmt.Where("gconf_is_deleted = ?", false)
}
dst := new(gitspaceConfig)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find gitspace config for %d", id)
}
return s.mapDBToGitspaceConfig(ctx, dst)
}
func (s gitspaceConfigStore) FindByIdentifier(
ctx context.Context,
spaceID int64,
identifier string,
) (*types.GitspaceConfig, error) {
stmt := database.Builder.
Select(gitspaceConfigSelectColumns).
From(gitspaceConfigsTable).
Where("LOWER(gconf_uid) = $1", strings.ToLower(identifier)).
Where("gconf_space_id = $2", spaceID).
Where("gconf_is_deleted = $3", false)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
dst := new(gitspaceConfig)
db := dbtx.GetAccessor(ctx, s.db)
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find gitspace config for %s", identifier)
}
return s.mapDBToGitspaceConfig(ctx, dst)
}
func (s gitspaceConfigStore) FindAllByIdentifier(
ctx context.Context,
spaceID int64,
identifiers []string,
) ([]types.GitspaceConfig, error) {
stmt := database.Builder.
Select(gitspaceConfigSelectColumns).
From(gitspaceConfigsTable).
Where(squirrel.Eq{"gconf_uid": identifiers}).
Where(squirrel.Eq{"gconf_space_id": spaceID})
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
var dst []*gitspaceConfig
db := dbtx.GetAccessor(ctx, s.db)
if err := db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find gitspace configs, identifiers: %s",
identifiers)
}
gitspaceConfigs, err := s.mapToGitspaceConfigs(ctx, dst)
if err != nil {
return nil, err
}
return sortBy(gitspaceConfigs, identifiers), nil
}
func (s gitspaceConfigStore) Create(ctx context.Context, gitspaceConfig *types.GitspaceConfig) error {
aiAgentsStr := aiAgentsToStringPtr(gitspaceConfig.AIAgents)
stmt := database.Builder.
Insert(gitspaceConfigsTable).
Columns(gitspaceConfigInsertColumns).
Values(
gitspaceConfig.Identifier,
gitspaceConfig.Name,
gitspaceConfig.IDE,
gitspaceConfig.InfraProviderResource.ID,
gitspaceConfig.CodeRepo.AuthType,
gitspaceConfig.CodeRepo.AuthID,
gitspaceConfig.CodeRepo.Type,
gitspaceConfig.CodeRepo.IsPrivate,
gitspaceConfig.CodeRepo.URL,
gitspaceConfig.DevcontainerPath,
gitspaceConfig.Branch,
gitspaceConfig.GitspaceUser.Identifier,
gitspaceConfig.SpaceID,
gitspaceConfig.Created,
gitspaceConfig.Updated,
gitspaceConfig.IsDeleted,
gitspaceConfig.CodeRepo.Ref,
gitspaceConfig.SSHTokenIdentifier,
gitspaceConfig.GitspaceUser.ID,
gitspaceConfig.IsMarkedForDeletion,
gitspaceConfig.IsMarkedForReset,
gitspaceConfig.IsMarkedForInfraReset,
aiAgentsStr,
).
Suffix(ReturningClause + "gconf_id")
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
if err = db.QueryRowContext(ctx, sql, args...).Scan(&gitspaceConfig.ID); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "gitspace config create query failed for %s", gitspaceConfig.Identifier)
}
return nil
}
func (s gitspaceConfigStore) Update(ctx context.Context,
gitspaceConfig *types.GitspaceConfig) error {
dbGitspaceConfig := mapToInternalGitspaceConfig(gitspaceConfig)
stmt := database.Builder.
Update(gitspaceConfigsTable).
Set("gconf_display_name", dbGitspaceConfig.Name).
Set("gconf_ide", dbGitspaceConfig.IDE).
Set("gconf_updated", dbGitspaceConfig.Updated).
Set("gconf_infra_provider_resource_id", dbGitspaceConfig.InfraProviderResourceID).
Set("gconf_is_deleted", dbGitspaceConfig.IsDeleted).
Set("gconf_is_marked_for_deletion", dbGitspaceConfig.IsMarkedForDeletion).
Set("gconf_is_marked_for_reset", dbGitspaceConfig.IsMarkedForReset).
Set("gconf_is_marked_for_infra_reset", dbGitspaceConfig.IsMarkedForInfraReset).
Set("gconf_ssh_token_identifier", dbGitspaceConfig.SSHTokenIdentifier).
Set("gconf_ai_agents", dbGitspaceConfig.AIAgents.Ptr()).
Where("gconf_id = ?", gitspaceConfig.ID)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "Failed to update gitspace config for %s", gitspaceConfig.Identifier)
}
return nil
}
func mapToInternalGitspaceConfig(config *types.GitspaceConfig) *gitspaceConfig {
aiAgentsStr := aiAgentsToStringPtr(config.AIAgents)
return &gitspaceConfig{
ID: config.ID,
Identifier: config.Identifier,
Name: config.Name,
IDE: config.IDE,
InfraProviderResourceID: config.InfraProviderResource.ID,
CodeAuthType: config.CodeRepo.AuthType,
CodeAuthID: config.CodeRepo.AuthID,
CodeRepoIsPrivate: config.CodeRepo.IsPrivate,
CodeRepoType: config.CodeRepo.Type,
CodeRepoRef: null.StringFromPtr(config.CodeRepo.Ref),
CodeRepoURL: config.CodeRepo.URL,
DevcontainerPath: null.StringFromPtr(config.DevcontainerPath),
Branch: config.Branch,
UserUID: config.GitspaceUser.Identifier,
SpaceID: config.SpaceID,
IsDeleted: config.IsDeleted,
IsMarkedForDeletion: config.IsMarkedForDeletion,
IsMarkedForReset: config.IsMarkedForReset,
IsMarkedForInfraReset: config.IsMarkedForInfraReset,
Created: config.Created,
Updated: config.Updated,
SSHTokenIdentifier: config.SSHTokenIdentifier,
CreatedBy: null.IntFromPtr(config.GitspaceUser.ID),
AIAgents: null.StringFromPtr(aiAgentsStr),
}
}
// ListWithLatestInstance returns gitspace configs for the given filter with the latest gitspace instance information.
func (s gitspaceConfigStore) ListWithLatestInstance(
ctx context.Context,
filter *types.GitspaceFilter,
) ([]*types.GitspaceConfig, error) {
gitsSelectStr := getLatestInstanceQuery()
stmt := squirrel.Select(
gitspaceConfigSelectColumns,
gitspaceInstanceSelectColumns).
From(gitspaceConfigsTable).
LeftJoin("(" + gitsSelectStr +
") AS gits ON gitspace_configs.gconf_id = gits.gits_gitspace_config_id AND gits.rn = 1").
PlaceholderFormat(squirrel.Dollar)
stmt = addGitspaceFilter(stmt, filter)
stmt = addGitspaceQueryFilter(stmt, filter.QueryFilter)
stmt = addOrderBy(stmt, filter)
stmt = stmt.Limit(database.Limit(filter.QueryFilter.Size))
stmt = stmt.Offset(database.Offset(filter.QueryFilter.Page, filter.QueryFilter.Size))
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var dst []*gitspaceConfigWithLatestInstance
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing list gitspace config query")
}
return s.ToGitspaceConfigs(ctx, dst)
}
func getLatestInstanceQuery() string {
return fmt.Sprintf("SELECT %s, %s FROM %s",
gitspaceInstanceSelectColumns,
"ROW_NUMBER() OVER (PARTITION BY gits_gitspace_config_id ORDER BY gits_created DESC) AS rn",
gitspaceInstanceTable,
)
}
func addGitspaceFilter(stmt squirrel.SelectBuilder, filter *types.GitspaceFilter) squirrel.SelectBuilder {
stmt = stmt.Where(squirrel.Gt{"gits_id": 0})
if filter.Deleted != nil {
stmt = stmt.Where(squirrel.Eq{"gconf_is_deleted": filter.Deleted})
}
if filter.Owner == enum.GitspaceOwnerSelf && filter.UserIdentifier != "" {
stmt = stmt.Where(squirrel.Eq{"gconf_user_uid": filter.UserIdentifier})
}
if filter.MarkedForDeletion != nil {
stmt = stmt.Where(squirrel.Eq{"gconf_is_marked_for_deletion": filter.MarkedForDeletion})
}
if !filter.AllowAllSpaces {
stmt = stmt.Where(squirrel.Eq{"gconf_space_id": filter.SpaceIDs})
}
if len(filter.CodeRepoTypes) > 0 {
stmt = stmt.Where(squirrel.Eq{"gconf_code_repo_type": filter.CodeRepoTypes})
}
if filter.LastHeartBeatBefore > 0 {
stmt = stmt.Where(squirrel.Lt{"gits_last_heartbeat": filter.LastHeartBeatBefore})
}
if filter.LastUsedBefore > 0 {
stmt = stmt.Where(squirrel.Lt{"gits_last_used": filter.LastUsedBefore})
}
if filter.LastUpdatedBefore > 0 {
stmt = stmt.Where(squirrel.Lt{"gits_updated": filter.LastUpdatedBefore})
}
if len(filter.GitspaceFilterStates) > 0 && len(filter.States) > 0 {
log.Warn().Msgf("both view list filter and states are set for gitspace, the states[] are ignored")
}
if len(filter.GitspaceFilterStates) > 0 {
instanceStateTypes := make([]enum.GitspaceInstanceStateType, 0, len(filter.GitspaceFilterStates))
for _, state := range filter.GitspaceFilterStates {
switch state {
case enum.GitspaceFilterStateError:
instanceStateTypes =
append(
instanceStateTypes,
enum.GitspaceInstanceStateError,
enum.GitspaceInstanceStateUnknown,
)
case enum.GitspaceFilterStateRunning:
instanceStateTypes =
append(instanceStateTypes, enum.GitspaceInstanceStateRunning)
case enum.GitspaceFilterStateStopped:
instanceStateTypes = append(
instanceStateTypes,
enum.GitspaceInstanceStateStopped,
enum.GitspaceInstanceStateCleaned,
enum.GitspaceInstanceStateDeleted,
enum.GitspaceInstanceStateUninitialized,
)
}
}
stmt = stmt.Where(squirrel.Eq{"gits_state": instanceStateTypes})
} else if len(filter.States) > 0 {
stmt = stmt.Where(squirrel.Eq{"gits_state": filter.States})
}
return stmt
}
func addOrderBy(stmt squirrel.SelectBuilder, filter *types.GitspaceFilter) squirrel.SelectBuilder {
switch filter.Sort {
case enum.GitspaceSortLastUsed:
return stmt.OrderBy("gits_last_used " + filter.Order.String())
case enum.GitspaceSortCreated:
return stmt.OrderBy("gconf_created " + filter.Order.String())
case enum.GitspaceSortLastActivated:
return stmt.OrderBy("gits_created " + filter.Order.String())
default:
return stmt.OrderBy("gits_created " + filter.Order.String())
}
}
func (s gitspaceConfigStore) FindAll(ctx context.Context, ids []int64) ([]*types.GitspaceConfig, error) {
stmt := database.Builder.
Select(gitspaceConfigSelectColumns).
From(gitspaceConfigsTable).
Where(squirrel.Eq{"gconf_id": ids}). //nolint:goconst
Where("gconf_is_deleted = ?", false)
var dst []*gitspaceConfig
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
if err := db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find all gitspace configs for %v", ids)
}
return s.mapToGitspaceConfigs(ctx, dst)
}
func (s gitspaceConfigStore) ListActiveConfigsForInfraProviderResource(
ctx context.Context,
infraProviderResourceID int64,
) ([]*types.GitspaceConfig, error) {
stmt := database.Builder.
Select(gitspaceConfigSelectColumns).
From(gitspaceConfigsTable).
Where("gconf_infra_provider_resource_id = ?", infraProviderResourceID).
Where("gconf_is_deleted = false").
Where("gconf_is_marked_for_deletion = false")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var dst []*gitspaceConfigWithLatestInstance
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing list gitspace config query")
}
return s.ToGitspaceConfigs(ctx, dst)
}
func (s gitspaceConfigStore) mapDBToGitspaceConfig(
ctx context.Context,
in *gitspaceConfig,
) (*types.GitspaceConfig, error) {
codeRepo := types.CodeRepo{
URL: in.CodeRepoURL,
Ref: in.CodeRepoRef.Ptr(),
Type: in.CodeRepoType,
Branch: in.Branch,
DevcontainerPath: in.DevcontainerPath.Ptr(),
IsPrivate: in.CodeRepoIsPrivate,
AuthType: in.CodeAuthType,
AuthID: in.CodeAuthID,
}
aiAgentTypes := stringToAIAgents(in.AIAgents.Ptr())
var result = &types.GitspaceConfig{
ID: in.ID,
Identifier: in.Identifier,
Name: in.Name,
IDE: in.IDE,
SpaceID: in.SpaceID,
Created: in.Created,
Updated: in.Updated,
SSHTokenIdentifier: in.SSHTokenIdentifier,
IsMarkedForDeletion: in.IsMarkedForDeletion,
IsMarkedForReset: in.IsMarkedForReset,
IsMarkedForInfraReset: in.IsMarkedForInfraReset,
IsDeleted: in.IsDeleted,
AIAgents: aiAgentTypes,
CodeRepo: codeRepo,
GitspaceUser: types.GitspaceUser{
ID: in.CreatedBy.Ptr(),
Identifier: in.UserUID},
}
if result.GitspaceUser.ID != nil {
author, _ := s.pCache.Get(ctx, *result.GitspaceUser.ID)
if author != nil {
result.GitspaceUser.DisplayName = author.DisplayName
result.GitspaceUser.Email = author.Email
}
}
resource, err := s.rCache.Get(ctx, in.InfraProviderResourceID)
if err != nil {
return nil, fmt.Errorf("couldn't set resource to the gitspace config in DB: %s", in.Identifier)
}
result.InfraProviderResource = *resource
spaceCore, err := s.spaceIDCache.Get(ctx, in.SpaceID)
if err != nil {
return nil, fmt.Errorf("couldn't set space path to the gitspace config in DB: %d", in.SpaceID)
}
result.SpacePath = spaceCore.Path
return result, nil
}
func (s gitspaceConfigStore) ToGitspaceConfig(
ctx context.Context,
in *gitspaceConfigWithLatestInstance,
) (*types.GitspaceConfig, error) {
result, err := s.mapDBToGitspaceConfig(ctx, &in.gitspaceConfig)
if err != nil {
return nil, err
}
instance, err := s.mapDBToGitspaceInstance(ctx, &in.gitspaceInstance)
if err != nil {
log.Ctx(ctx).Error().Err(err).Msgf("Failed to convert to gitspace instance, gitspace configID: %d",
in.gitspaceInstance.ID,
)
instance = nil
}
if instance == nil {
result.State = enum.GitspaceStateUninitialized
} else {
gitspaceStateType, err := instance.GetGitspaceState()
if err != nil {
return nil, err
}
result.State = gitspaceStateType
}
result.GitspaceInstance = instance
return result, nil
}
func (s gitspaceConfigStore) mapDBToGitspaceInstance(
ctx context.Context,
in *gitspaceInstance,
) (*types.GitspaceInstance, error) {
res := toGitspaceInstance(in)
spaceCore, err := s.spaceIDCache.Get(ctx, in.SpaceID)
if err != nil {
return nil, fmt.Errorf("couldn't set space path to the config in DB: %d", in.SpaceID)
}
res.SpacePath = spaceCore.Path
return res, nil
}
func toGitspaceInstance(in *gitspaceInstance) *types.GitspaceInstance {
var res = &types.GitspaceInstance{
ID: in.ID,
Identifier: in.Identifier,
GitSpaceConfigID: in.GitSpaceConfigID,
URL: in.URL.Ptr(),
SSHCommand: in.SSHCommand.Ptr(),
State: in.State,
UserID: in.UserUID,
ResourceUsage: in.ResourceUsage.Ptr(),
LastUsed: in.LastUsed.Ptr(),
TotalTimeUsed: in.TotalTimeUsed,
AccessType: in.AccessType,
AccessKeyRef: in.AccessKeyRef.Ptr(),
MachineUser: in.MachineUser.Ptr(),
SpaceID: in.SpaceID,
Created: in.Created,
Updated: in.Updated,
LastHeartbeat: in.LastHeartbeat.Ptr(),
ActiveTimeEnded: in.ActiveTimeEnded.Ptr(),
ActiveTimeStarted: in.ActiveTimeStarted.Ptr(),
HasGitChanges: in.HasGitChanges.Ptr(),
ErrorMessage: in.ErrorMessage.Ptr(),
}
return res
}
func (s gitspaceConfigStore) mapToGitspaceConfigs(
ctx context.Context,
configs []*gitspaceConfig,
) ([]*types.GitspaceConfig, error) {
var err error
res := make([]*types.GitspaceConfig, len(configs))
for i := range configs {
res[i], err = s.mapDBToGitspaceConfig(ctx, configs[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func (s gitspaceConfigStore) ToGitspaceConfigs(
ctx context.Context,
configs []*gitspaceConfigWithLatestInstance,
) ([]*types.GitspaceConfig, error) {
var err error
res := make([]*types.GitspaceConfig, len(configs))
for i := range configs {
res[i], err = s.ToGitspaceConfig(ctx, configs[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func addGitspaceQueryFilter(stmt squirrel.SelectBuilder, filter types.ListQueryFilter) squirrel.SelectBuilder {
if filter.Query != "" {
stmt = stmt.Where(squirrel.Or{
squirrel.Expr(PartialMatch("gconf_uid", filter.Query)),
squirrel.Expr(PartialMatch("gconf_display_name", filter.Query)),
})
}
return stmt
}
func sortBy(configs []*types.GitspaceConfig, idsInOrder []string) []types.GitspaceConfig {
idsIdxMap := make(map[string]int)
for i, id := range idsInOrder {
idsIdxMap[id] = i
}
orderedConfigs := make([]types.GitspaceConfig, len(configs))
for _, config := range configs {
orderedConfigs[idsIdxMap[config.Identifier]] = *config
}
return orderedConfigs
}
// aiAgentsToStringPtr converts a slice of AIAgent to a comma-delimited string.
func aiAgentsToStringPtr(aiAgents []enum.AIAgent) *string {
if len(aiAgents) == 0 {
return nil
}
aiAgentsStrList := make([]string, len(aiAgents))
for i, agentType := range aiAgents {
aiAgentsStrList[i] = string(agentType)
}
aiAgentsStr := strings.Join(aiAgentsStrList, ",")
return &aiAgentsStr
}
// stringToAIAgents converts a comma-delimited string to a slice of AIAgent.
func stringToAIAgents(aiAgentsStrPtr *string) []enum.AIAgent {
if aiAgentsStrPtr == nil {
return nil
}
strTypes := strings.Split(*aiAgentsStrPtr, ",")
aiAgents := make([]enum.AIAgent, 0, len(strTypes))
for _, strType := range strTypes {
strType = strings.TrimSpace(strType)
if strType != "" {
aiAgents = append(aiAgents, enum.AIAgent(strType))
}
}
return aiAgents
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/webhook_execution.go | app/store/database/webhook_execution.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
)
var _ store.WebhookExecutionStore = (*WebhookExecutionStore)(nil)
// NewWebhookExecutionStore returns a new WebhookExecutionStore.
func NewWebhookExecutionStore(db *sqlx.DB) *WebhookExecutionStore {
return &WebhookExecutionStore{
db: db,
}
}
// WebhookExecutionStore implements store.WebhookExecution backed by a relational database.
type WebhookExecutionStore struct {
db *sqlx.DB
}
// webhookExecution is used to store executions of webhooks
// The object should be later re-packed into a different struct to return it as an API response.
type webhookExecution struct {
ID int64 `db:"webhook_execution_id"`
RetriggerOf null.Int `db:"webhook_execution_retrigger_of"`
Retriggerable bool `db:"webhook_execution_retriggerable"`
WebhookID int64 `db:"webhook_execution_webhook_id"`
TriggerType enum.WebhookTrigger `db:"webhook_execution_trigger_type"`
TriggerID string `db:"webhook_execution_trigger_id"`
Result enum.WebhookExecutionResult `db:"webhook_execution_result"`
Created int64 `db:"webhook_execution_created"`
Duration int64 `db:"webhook_execution_duration"`
Error string `db:"webhook_execution_error"`
RequestURL string `db:"webhook_execution_request_url"`
RequestHeaders string `db:"webhook_execution_request_headers"`
RequestBody string `db:"webhook_execution_request_body"`
ResponseStatusCode int `db:"webhook_execution_response_status_code"`
ResponseStatus string `db:"webhook_execution_response_status"`
ResponseHeaders string `db:"webhook_execution_response_headers"`
ResponseBody string `db:"webhook_execution_response_body"`
}
const (
webhookExecutionColumns = `
webhook_execution_id
,webhook_execution_retrigger_of
,webhook_execution_retriggerable
,webhook_execution_webhook_id
,webhook_execution_trigger_type
,webhook_execution_trigger_id
,webhook_execution_result
,webhook_execution_created
,webhook_execution_duration
,webhook_execution_error
,webhook_execution_request_url
,webhook_execution_request_headers
,webhook_execution_request_body
,webhook_execution_response_status_code
,webhook_execution_response_status
,webhook_execution_response_headers
,webhook_execution_response_body`
webhookExecutionSelectBase = `
SELECT` + webhookExecutionColumns + `
FROM webhook_executions`
)
// Find finds the webhook execution by id.
func (s *WebhookExecutionStore) Find(ctx context.Context, id int64) (*types.WebhookExecution, error) {
const sqlQuery = webhookExecutionSelectBase + `
WHERE webhook_execution_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := &webhookExecution{}
if err := db.GetContext(ctx, dst, sqlQuery, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select query failed")
}
return mapToWebhookExecution(dst), nil
}
// Create creates a new webhook execution entry.
func (s *WebhookExecutionStore) Create(ctx context.Context, execution *types.WebhookExecution) error {
const sqlQuery = `
INSERT INTO webhook_executions (
webhook_execution_retrigger_of
,webhook_execution_retriggerable
,webhook_execution_webhook_id
,webhook_execution_trigger_type
,webhook_execution_trigger_id
,webhook_execution_result
,webhook_execution_created
,webhook_execution_duration
,webhook_execution_error
,webhook_execution_request_url
,webhook_execution_request_headers
,webhook_execution_request_body
,webhook_execution_response_status_code
,webhook_execution_response_status
,webhook_execution_response_headers
,webhook_execution_response_body
) values (
:webhook_execution_retrigger_of
,:webhook_execution_retriggerable
,:webhook_execution_webhook_id
,:webhook_execution_trigger_type
,:webhook_execution_trigger_id
,:webhook_execution_result
,:webhook_execution_created
,:webhook_execution_duration
,:webhook_execution_error
,:webhook_execution_request_url
,:webhook_execution_request_headers
,:webhook_execution_request_body
,:webhook_execution_response_status_code
,:webhook_execution_response_status
,:webhook_execution_response_headers
,:webhook_execution_response_body
) RETURNING webhook_execution_id`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, mapToInternalWebhookExecution(execution))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind webhook execution object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&execution.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert query failed")
}
return nil
}
// DeleteOld removes all executions that are older than the provided time.
func (s *WebhookExecutionStore) DeleteOld(ctx context.Context, olderThan time.Time) (int64, error) {
stmt := database.Builder.
Delete("webhook_executions").
Where("webhook_execution_created < ?", olderThan.UnixMilli())
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert delete executions query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
result, err := db.ExecContext(ctx, sql, args...)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to execute delete executions query")
}
n, err := result.RowsAffected()
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to get number of deleted executions")
}
return n, nil
}
// ListForWebhook lists the webhook executions for a given webhook id.
func (s *WebhookExecutionStore) ListForWebhook(ctx context.Context, webhookID int64,
opts *types.WebhookExecutionFilter) ([]*types.WebhookExecution, error) {
stmt := database.Builder.
Select(webhookExecutionColumns).
From("webhook_executions").
Where("webhook_execution_webhook_id = ?", webhookID)
stmt = stmt.Limit(database.Limit(opts.Size))
stmt = stmt.Offset(database.Offset(opts.Page, opts.Size))
// fixed ordering by desc id (new ones first) - add customized ordering if deemed necessary.
stmt = stmt.OrderBy("webhook_execution_id DESC")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*webhookExecution{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select query failed")
}
return mapToWebhookExecutions(dst), nil
}
// CountForWebhook counts the total number of webhook executions for a given webhook ID.
func (s *WebhookExecutionStore) CountForWebhook(
ctx context.Context,
webhookID int64,
) (int64, error) {
stmt := database.Builder.
Select("COUNT(*)").
From("webhook_executions").
Where("webhook_execution_webhook_id = ?", webhookID)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
if err = db.GetContext(ctx, &count, sql, args...); err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Count query failed")
}
return count, nil
}
// ListForTrigger lists the webhook executions for a given trigger id.
func (s *WebhookExecutionStore) ListForTrigger(ctx context.Context,
triggerID string) ([]*types.WebhookExecution, error) {
const sqlQuery = webhookExecutionSelectBase + `
WHERE webhook_execution_trigger_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := []*webhookExecution{}
if err := db.SelectContext(ctx, &dst, sqlQuery, triggerID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select query failed")
}
return mapToWebhookExecutions(dst), nil
}
func mapToWebhookExecution(execution *webhookExecution) *types.WebhookExecution {
return &types.WebhookExecution{
ID: execution.ID,
RetriggerOf: execution.RetriggerOf.Ptr(),
Retriggerable: execution.Retriggerable,
Created: execution.Created,
WebhookID: execution.WebhookID,
TriggerType: execution.TriggerType,
TriggerID: execution.TriggerID,
Result: execution.Result,
Error: execution.Error,
Duration: execution.Duration,
Request: types.WebhookExecutionRequest{
URL: execution.RequestURL,
Headers: execution.RequestHeaders,
Body: execution.RequestBody,
},
Response: types.WebhookExecutionResponse{
StatusCode: execution.ResponseStatusCode,
Status: execution.ResponseStatus,
Headers: execution.ResponseHeaders,
Body: execution.ResponseBody,
},
}
}
func mapToInternalWebhookExecution(execution *types.WebhookExecution) *webhookExecution {
return &webhookExecution{
ID: execution.ID,
RetriggerOf: null.IntFromPtr(execution.RetriggerOf),
Retriggerable: execution.Retriggerable,
Created: execution.Created,
WebhookID: execution.WebhookID,
TriggerType: execution.TriggerType,
TriggerID: execution.TriggerID,
Result: execution.Result,
Error: execution.Error,
Duration: execution.Duration,
RequestURL: execution.Request.URL,
RequestHeaders: execution.Request.Headers,
RequestBody: execution.Request.Body,
ResponseStatusCode: execution.Response.StatusCode,
ResponseStatus: execution.Response.Status,
ResponseHeaders: execution.Response.Headers,
ResponseBody: execution.Response.Body,
}
}
func mapToWebhookExecutions(executions []*webhookExecution) []*types.WebhookExecution {
m := make([]*types.WebhookExecution, len(executions))
for i, hook := range executions {
m[i] = mapToWebhookExecution(hook)
}
return m
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/webhook.go | app/store/database/webhook.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/harness/gitness/app/store"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
var _ store.WebhookStore = (*WebhookStore)(nil)
// NewWebhookStore returns a new WebhookStore.
func NewWebhookStore(db *sqlx.DB) *WebhookStore {
return &WebhookStore{
db: db,
}
}
// WebhookStore implements store.Webhook backed by a relational database.
type WebhookStore struct {
db *sqlx.DB
}
// webhook is an internal representation used to store webhook data in the database.
type webhook struct {
ID int64 `db:"webhook_id"`
Version int64 `db:"webhook_version"`
RepoID null.Int `db:"webhook_repo_id"`
SpaceID null.Int `db:"webhook_space_id"`
CreatedBy int64 `db:"webhook_created_by"`
Created int64 `db:"webhook_created"`
Updated int64 `db:"webhook_updated"`
Type enum.WebhookType `db:"webhook_type"`
Scope int64 `db:"webhook_scope"`
Identifier string `db:"webhook_uid"`
// TODO [CODE-1364]: Remove once UID/Identifier migration is completed.
DisplayName string `db:"webhook_display_name"`
Description string `db:"webhook_description"`
URL string `db:"webhook_url"`
Secret string `db:"webhook_secret"`
Enabled bool `db:"webhook_enabled"`
Insecure bool `db:"webhook_insecure"`
Triggers string `db:"webhook_triggers"`
LatestExecutionResult null.String `db:"webhook_latest_execution_result"`
ExtraHeaders null.String `db:"webhook_extra_headers"`
}
const (
webhookColumns = `
webhook_id
,webhook_version
,webhook_repo_id
,webhook_space_id
,webhook_created_by
,webhook_created
,webhook_updated
,webhook_uid
,webhook_display_name
,webhook_description
,webhook_url
,webhook_secret
,webhook_enabled
,webhook_insecure
,webhook_triggers
,webhook_latest_execution_result
,webhook_type
,webhook_scope
,webhook_extra_headers`
webhookSelectBase = `
SELECT` + webhookColumns + `
FROM webhooks`
)
// Find finds the webhook by id.
func (s *WebhookStore) Find(ctx context.Context, id int64) (*types.Webhook, error) {
const sqlQuery = webhookSelectBase + `
WHERE webhook_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := &webhook{}
if err := db.GetContext(ctx, dst, sqlQuery, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select query failed")
}
res, err := mapToWebhook(dst)
if err != nil {
return nil, fmt.Errorf("failed to map webhook to external type: %w", err)
}
return res, nil
}
// FindByIdentifier finds the webhook with the given Identifier for the given parent.
func (s *WebhookStore) FindByIdentifier(
ctx context.Context,
parentType enum.WebhookParent,
parentID int64,
identifier string,
) (*types.Webhook, error) {
stmt := database.Builder.
Select(webhookColumns).
From("webhooks").
Where("LOWER(webhook_uid) = ?", strings.ToLower(identifier))
switch parentType {
case enum.WebhookParentRepo:
stmt = stmt.Where("webhook_repo_id = ?", parentID)
case enum.WebhookParentSpace:
stmt = stmt.Where("webhook_space_id = ?", parentID)
case enum.WebhookParentRegistry:
default:
return nil, fmt.Errorf("webhook parent type '%s' is not supported", parentType)
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
dst := &webhook{}
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select query failed")
}
res, err := mapToWebhook(dst)
if err != nil {
return nil, fmt.Errorf("failed to map webhook to external type: %w", err)
}
return res, nil
}
// Create creates a new webhook.
func (s *WebhookStore) Create(ctx context.Context, hook *types.Webhook) error {
const sqlQuery = `
INSERT INTO webhooks (
webhook_repo_id
,webhook_space_id
,webhook_created_by
,webhook_created
,webhook_updated
,webhook_uid
,webhook_display_name
,webhook_description
,webhook_url
,webhook_secret
,webhook_enabled
,webhook_insecure
,webhook_triggers
,webhook_latest_execution_result
,webhook_type
,webhook_scope
,webhook_extra_headers
) values (
:webhook_repo_id
,:webhook_space_id
,:webhook_created_by
,:webhook_created
,:webhook_updated
,:webhook_uid
,:webhook_display_name
,:webhook_description
,:webhook_url
,:webhook_secret
,:webhook_enabled
,:webhook_insecure
,:webhook_triggers
,:webhook_latest_execution_result
,:webhook_type
,:webhook_scope
,:webhook_extra_headers
) RETURNING webhook_id`
db := dbtx.GetAccessor(ctx, s.db)
dbHook, err := mapToInternalWebhook(hook)
if err != nil {
return fmt.Errorf("failed to map webhook to internal db type: %w", err)
}
query, arg, err := db.BindNamed(sqlQuery, dbHook)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind webhook object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&hook.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert query failed")
}
return nil
}
// Update updates an existing webhook.
func (s *WebhookStore) Update(ctx context.Context, hook *types.Webhook) error {
const sqlQuery = `
UPDATE webhooks
SET
webhook_version = :webhook_version
,webhook_updated = :webhook_updated
,webhook_uid = :webhook_uid
,webhook_display_name = :webhook_display_name
,webhook_description = :webhook_description
,webhook_url = :webhook_url
,webhook_secret = :webhook_secret
,webhook_enabled = :webhook_enabled
,webhook_insecure = :webhook_insecure
,webhook_triggers = :webhook_triggers
,webhook_latest_execution_result = :webhook_latest_execution_result
,webhook_extra_headers = :webhook_extra_headers
WHERE webhook_id = :webhook_id and webhook_version = :webhook_version - 1`
db := dbtx.GetAccessor(ctx, s.db)
dbHook, err := mapToInternalWebhook(hook)
if err != nil {
return fmt.Errorf("failed to map webhook to internal db type: %w", err)
}
// update Version (used for optimistic locking) and Updated time
dbHook.Version++
dbHook.Updated = time.Now().UnixMilli()
query, arg, err := db.BindNamed(sqlQuery, dbHook)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind webhook object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to update webhook")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
hook.Version = dbHook.Version
hook.Updated = dbHook.Updated
return nil
}
// UpdateOptLock updates the webhook using the optimistic locking mechanism.
func (s *WebhookStore) UpdateOptLock(
ctx context.Context, hook *types.Webhook,
mutateFn func(hook *types.Webhook) error,
) (*types.Webhook, error) {
for {
dup := *hook
err := mutateFn(&dup)
if err != nil {
return nil, fmt.Errorf("failed to mutate the webhook: %w", err)
}
err = s.Update(ctx, &dup)
if err == nil {
return &dup, nil
}
if !errors.Is(err, gitness_store.ErrVersionConflict) {
return nil, fmt.Errorf("failed to update the webhook: %w", err)
}
hook, err = s.Find(ctx, hook.ID)
if err != nil {
return nil, fmt.Errorf("failed to find the latst version of the webhook: %w", err)
}
}
}
// Delete deletes the webhook for the given id.
func (s *WebhookStore) Delete(ctx context.Context, id int64) error {
const sqlQuery = `
DELETE FROM webhooks
WHERE webhook_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sqlQuery, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "The delete query failed")
}
return nil
}
// DeleteByIdentifier deletes the webhook with the given Identifier for the given parent.
func (s *WebhookStore) DeleteByIdentifier(
ctx context.Context,
parentType enum.WebhookParent,
parentID int64,
identifier string,
) error {
stmt := database.Builder.
Delete("webhooks").
Where("LOWER(webhook_uid) = ?", strings.ToLower(identifier))
switch parentType {
case enum.WebhookParentRepo:
stmt = stmt.Where("webhook_repo_id = ?", parentID)
case enum.WebhookParentSpace:
stmt = stmt.Where("webhook_space_id = ?", parentID)
case enum.WebhookParentRegistry:
default:
return fmt.Errorf("webhook parent type '%s' is not supported", parentType)
}
sql, args, err := stmt.ToSql()
if err != nil {
return fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "The delete query failed")
}
return nil
}
// Count counts the webhooks for a given parent type and id.
func (s *WebhookStore) Count(
ctx context.Context,
parents []types.WebhookParentInfo,
opts *types.WebhookFilter,
) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("webhooks")
err := selectWebhookParents(parents, &stmt)
if err != nil {
return 0, fmt.Errorf("failed to select webhook parents: %w", err)
}
stmt = applyWebhookFilter(opts, stmt)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
func (s *WebhookStore) List(
ctx context.Context,
parents []types.WebhookParentInfo,
opts *types.WebhookFilter,
) ([]*types.Webhook, error) {
stmt := database.Builder.
Select(webhookColumns).
From("webhooks")
err := selectWebhookParents(parents, &stmt)
if err != nil {
return nil, fmt.Errorf("failed to select webhook parents: %w", err)
}
stmt = applyWebhookFilter(opts, stmt)
stmt = stmt.Limit(database.Limit(opts.Size))
stmt = stmt.Offset(database.Offset(opts.Page, opts.Size))
switch opts.Sort {
// TODO [CODE-1364]: Remove once UID/Identifier migration is completed
case enum.WebhookAttrID, enum.WebhookAttrNone:
// NOTE: string concatenation is safe because the
// order attribute is an enum and is not user-defined,
// and is therefore not subject to injection attacks.
stmt = stmt.OrderBy("webhook_id " + opts.Order.String())
// TODO [CODE-1363]: remove after identifier migration.
case enum.WebhookAttrUID, enum.WebhookAttrIdentifier:
stmt = stmt.OrderBy("LOWER(webhook_uid) " + opts.Order.String())
// TODO [CODE-1364]: Remove once UID/Identifier migration is completed
case enum.WebhookAttrDisplayName:
stmt = stmt.OrderBy("webhook_display_name " + opts.Order.String())
//TODO: Postgres does not support COLLATE NOCASE for UTF8
case enum.WebhookAttrCreated:
stmt = stmt.OrderBy("webhook_created " + opts.Order.String())
case enum.WebhookAttrUpdated:
stmt = stmt.OrderBy("webhook_updated " + opts.Order.String())
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*webhook{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select query failed")
}
res, err := mapToWebhooks(dst)
if err != nil {
return nil, fmt.Errorf("failed to map webhooks to external type: %w", err)
}
return res, nil
}
func (s *WebhookStore) UpdateParentSpace(
ctx context.Context,
srcParentSpaceID int64,
targetParentSpaceID int64,
) (int64, error) {
stmt := database.Builder.
Update("webhooks").
Set("webhook_space_id", targetParentSpaceID).
Where("webhook_space_id = ?", srcParentSpaceID)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
result, err := db.ExecContext(ctx, sql, args...)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "the update query failed")
}
count, err := result.RowsAffected()
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to get number of updated rows")
}
return count, nil
}
func mapToWebhook(hook *webhook) (*types.Webhook, error) {
res := &types.Webhook{
ID: hook.ID,
Version: hook.Version,
CreatedBy: hook.CreatedBy,
Created: hook.Created,
Updated: hook.Updated,
Identifier: hook.Identifier,
Scope: hook.Scope,
// TODO [CODE-1364]: Remove once UID/Identifier migration is completed
DisplayName: hook.DisplayName,
Description: hook.Description,
URL: hook.URL,
Secret: hook.Secret,
Enabled: hook.Enabled,
Insecure: hook.Insecure,
Triggers: triggersFromString(hook.Triggers),
LatestExecutionResult: (*enum.WebhookExecutionResult)(hook.LatestExecutionResult.Ptr()),
Type: hook.Type,
ExtraHeaders: extraHeadersFromString(hook.ExtraHeaders.String),
}
switch {
case hook.RepoID.Valid && hook.SpaceID.Valid:
return nil, fmt.Errorf("both repoID and spaceID are set for hook %d", hook.ID)
case hook.RepoID.Valid:
res.ParentType = enum.WebhookParentRepo
res.ParentID = hook.RepoID.Int64
case hook.SpaceID.Valid:
res.ParentType = enum.WebhookParentSpace
res.ParentID = hook.SpaceID.Int64
default:
return nil, fmt.Errorf("neither repoID nor spaceID are set for hook %d", hook.ID)
}
return res, nil
}
func mapToInternalWebhook(hook *types.Webhook) (*webhook, error) {
res := &webhook{
ID: hook.ID,
Version: hook.Version,
CreatedBy: hook.CreatedBy,
Created: hook.Created,
Updated: hook.Updated,
Identifier: hook.Identifier,
Scope: hook.Scope,
// TODO [CODE-1364]: Remove once UID/Identifier migration is completed
DisplayName: hook.DisplayName,
Description: hook.Description,
URL: hook.URL,
Secret: hook.Secret,
Enabled: hook.Enabled,
Insecure: hook.Insecure,
Triggers: triggersToString(hook.Triggers),
LatestExecutionResult: null.StringFromPtr((*string)(hook.LatestExecutionResult)),
Type: hook.Type,
ExtraHeaders: extraHeadersToNullString(hook.ExtraHeaders),
}
switch hook.ParentType {
case enum.WebhookParentRepo:
res.RepoID = null.IntFrom(hook.ParentID)
case enum.WebhookParentSpace:
res.SpaceID = null.IntFrom(hook.ParentID)
case enum.WebhookParentRegistry:
default:
return nil, fmt.Errorf("webhook parent type %q is not supported", hook.ParentType)
}
return res, nil
}
func mapToWebhooks(hooks []*webhook) ([]*types.Webhook, error) {
var err error
m := make([]*types.Webhook, len(hooks))
for i, hook := range hooks {
m[i], err = mapToWebhook(hook)
if err != nil {
return nil, err
}
}
return m, nil
}
// triggersSeparator defines the character that's used to join triggers for storing them in the DB
// ASSUMPTION: triggers are defined in an enum and don't contain ",".
const triggersSeparator = ","
func triggersFromString(triggersString string) []enum.WebhookTrigger {
if triggersString == "" {
return []enum.WebhookTrigger{}
}
rawTriggers := strings.Split(triggersString, triggersSeparator)
triggers := make([]enum.WebhookTrigger, len(rawTriggers))
for i, rawTrigger := range rawTriggers {
// ASSUMPTION: trigger is valid value (as we wrote it to DB)
triggers[i] = enum.WebhookTrigger(rawTrigger)
}
return triggers
}
func triggersToString(triggers []enum.WebhookTrigger) string {
rawTriggers := make([]string, len(triggers))
for i := range triggers {
rawTriggers[i] = string(triggers[i])
}
return strings.Join(rawTriggers, triggersSeparator)
}
// extraHeadersToNullString converts a slice of ExtraHeader to a null.String.
// Returns an invalid null.String (NULL) when headers is empty to avoid
// inserting an empty string into PostgreSQL JSONB columns.
func extraHeadersToNullString(headers []types.ExtraHeader) null.String {
if len(headers) == 0 {
return null.String{}
}
jsonData, err := json.Marshal(headers)
if err != nil {
return null.String{}
}
return null.StringFrom(string(jsonData))
}
// extraHeadersFromString converts a JSON string back to a slice of ExtraHeader.
func extraHeadersFromString(jsonStr string) []types.ExtraHeader {
if jsonStr == "" {
return nil
}
var headers []types.ExtraHeader
if err := json.Unmarshal([]byte(jsonStr), &headers); err != nil {
return nil
}
return headers
}
func applyWebhookFilter(
opts *types.WebhookFilter,
stmt squirrel.SelectBuilder,
) squirrel.SelectBuilder {
if opts.Query != "" {
stmt = stmt.Where(PartialMatch("webhook_uid", opts.Query))
}
if opts.SkipInternal {
stmt = stmt.Where("webhook_type = ?", enum.WebhookTypeExternal)
}
return stmt
}
func selectWebhookParents(
parents []types.WebhookParentInfo,
stmt *squirrel.SelectBuilder,
) error {
var parentSelector squirrel.Or
for _, parent := range parents {
switch parent.Type {
case enum.WebhookParentRepo:
parentSelector = append(parentSelector, squirrel.Eq{
"webhook_repo_id": parent.ID,
})
case enum.WebhookParentSpace:
parentSelector = append(parentSelector, squirrel.Eq{
"webhook_space_id": parent.ID,
})
case enum.WebhookParentRegistry:
default:
return fmt.Errorf("webhook parent type '%s' is not supported", parent.Type)
}
}
*stmt = stmt.Where(parentSelector)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/principal_user.go | app/store/database/principal_user.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"strings"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
// user is a DB representation of a user principal.
// It is required to allow storing transformed UIDs used for uniquness constraints and searching.
type user struct {
types.User
UIDUnique string `db:"principal_uid_unique"`
}
const userColumns = principalCommonColumns + `
,principal_user_password`
const userSelectBase = `
SELECT` + userColumns + `
FROM principals`
// FindUser finds the user by id.
func (s *PrincipalStore) FindUser(ctx context.Context, id int64) (*types.User, error) {
const sqlQuery = userSelectBase + `
WHERE principal_type = 'user' AND principal_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(user)
if err := db.GetContext(ctx, dst, sqlQuery, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select by id query failed")
}
return s.mapDBUser(dst), nil
}
// FindUserByUID finds the user by uid.
func (s *PrincipalStore) FindUserByUID(ctx context.Context, uid string) (*types.User, error) {
const sqlQuery = userSelectBase + `
WHERE principal_type = 'user' AND principal_uid_unique = $1`
// map the UID to unique UID before searching!
uidUnique, err := s.uidTransformation(uid)
if err != nil {
// in case we fail to transform, return a not found (as it can't exist in the first place)
log.Ctx(ctx).Debug().Msgf("failed to transform uid '%s': %s", uid, err.Error())
return nil, gitness_store.ErrResourceNotFound
}
db := dbtx.GetAccessor(ctx, s.db)
dst := new(user)
if err = db.GetContext(ctx, dst, sqlQuery, uidUnique); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select by uid query failed")
}
return s.mapDBUser(dst), nil
}
// FindUserByEmail finds the user by email.
func (s *PrincipalStore) FindUserByEmail(ctx context.Context, email string) (*types.User, error) {
const sqlQuery = userSelectBase + `
WHERE principal_type = 'user' AND LOWER(principal_email) = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(user)
if err := db.GetContext(ctx, dst, sqlQuery, strings.ToLower(email)); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select by email query failed")
}
return s.mapDBUser(dst), nil
}
// CreateUser saves the user details.
func (s *PrincipalStore) CreateUser(ctx context.Context, user *types.User) error {
const sqlQuery = `
INSERT INTO principals (
principal_type
,principal_uid
,principal_uid_unique
,principal_email
,principal_display_name
,principal_admin
,principal_blocked
,principal_salt
,principal_created
,principal_updated
,principal_user_password
) values (
'user'
,:principal_uid
,:principal_uid_unique
,:principal_email
,:principal_display_name
,:principal_admin
,:principal_blocked
,:principal_salt
,:principal_created
,:principal_updated
,:principal_user_password
) RETURNING principal_id`
dbUser, err := s.mapToDBUser(user)
if err != nil {
return fmt.Errorf("failed to map db user: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, dbUser)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind user object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&user.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert query failed")
}
return nil
}
// UpdateUser updates an existing user.
func (s *PrincipalStore) UpdateUser(ctx context.Context, user *types.User) error {
const sqlQuery = `
UPDATE principals
SET
principal_uid = :principal_uid
,principal_uid_unique = :principal_uid_unique
,principal_email = :principal_email
,principal_display_name = :principal_display_name
,principal_admin = :principal_admin
,principal_blocked = :principal_blocked
,principal_salt = :principal_salt
,principal_updated = :principal_updated
,principal_user_password = :principal_user_password
WHERE principal_type = 'user' AND principal_id = :principal_id`
dbUser, err := s.mapToDBUser(user)
if err != nil {
return fmt.Errorf("failed to map db user: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, dbUser)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind user object")
}
if _, err = db.ExecContext(ctx, query, arg...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Update query failed")
}
return err
}
// DeleteUser deletes the user.
func (s *PrincipalStore) DeleteUser(ctx context.Context, id int64) error {
const sqlQuery = `
DELETE FROM principals
WHERE principal_type = 'user' AND principal_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sqlQuery, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "The delete query failed")
}
return nil
}
// ListUsers returns a list of users.
func (s *PrincipalStore) ListUsers(ctx context.Context, opts *types.UserFilter) ([]*types.User, error) {
db := dbtx.GetAccessor(ctx, s.db)
dst := []*user{}
stmt := database.Builder.
Select(userColumns).
From("principals").
Where("principal_type = 'user'")
stmt = stmt.Limit(database.Limit(opts.Size))
stmt = stmt.Offset(database.Offset(opts.Page, opts.Size))
order := opts.Order
if order == enum.OrderDefault {
order = enum.OrderAsc
}
switch opts.Sort {
case enum.UserAttrName, enum.UserAttrNone:
// NOTE: string concatenation is safe because the
// order attribute is an enum and is not user-defined,
// and is therefore not subject to injection attacks.
stmt = stmt.OrderBy("principal_display_name " + order.String())
case enum.UserAttrCreated:
stmt = stmt.OrderBy("principal_created " + order.String())
case enum.UserAttrUpdated:
stmt = stmt.OrderBy("principal_updated " + order.String())
case enum.UserAttrEmail:
stmt = stmt.OrderBy("LOWER(principal_email) " + order.String())
case enum.UserAttrUID:
stmt = stmt.OrderBy("principal_uid " + order.String())
case enum.UserAttrAdmin:
stmt = stmt.OrderBy("principal_admin " + order.String())
}
sql, _, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
if err = db.SelectContext(ctx, &dst, sql); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return s.mapDBUsers(dst), nil
}
// CountUsers returns a count of users matching the given filter.
func (s *PrincipalStore) CountUsers(ctx context.Context, opts *types.UserFilter) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("principals").
Where("principal_type = 'user'")
if opts.Admin {
stmt = stmt.Where("principal_admin = ?", opts.Admin)
}
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
func (s *PrincipalStore) mapDBUser(dbUser *user) *types.User {
return &dbUser.User
}
func (s *PrincipalStore) mapDBUsers(dbUsers []*user) []*types.User {
res := make([]*types.User, len(dbUsers))
for i := range dbUsers {
res[i] = s.mapDBUser(dbUsers[i])
}
return res
}
func (s *PrincipalStore) mapToDBUser(usr *types.User) (*user, error) {
// user comes from outside.
if usr == nil {
return nil, fmt.Errorf("user is nil")
}
uidUnique, err := s.uidTransformation(usr.UID)
if err != nil {
return nil, fmt.Errorf("failed to transform user UID: %w", err)
}
dbUser := &user{
User: *usr,
UIDUnique: uidUnique,
}
return dbUser, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/trigger.go | app/store/database/trigger.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/harness/gitness/app/store"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/jmoiron/sqlx"
sqlxtypes "github.com/jmoiron/sqlx/types"
"github.com/pkg/errors"
)
var _ store.TriggerStore = (*triggerStore)(nil)
type trigger struct {
ID int64 `db:"trigger_id"`
Identifier string `db:"trigger_uid"`
Description string `db:"trigger_description"`
Type string `db:"trigger_type"`
Secret string `db:"trigger_secret"`
PipelineID int64 `db:"trigger_pipeline_id"`
RepoID int64 `db:"trigger_repo_id"`
CreatedBy int64 `db:"trigger_created_by"`
Disabled bool `db:"trigger_disabled"`
Actions sqlxtypes.JSONText `db:"trigger_actions"`
Created int64 `db:"trigger_created"`
Updated int64 `db:"trigger_updated"`
Version int64 `db:"trigger_version"`
}
func mapInternalToTrigger(trigger *trigger) (*types.Trigger, error) {
var actions []enum.TriggerAction
err := json.Unmarshal(trigger.Actions, &actions)
if err != nil {
return nil, errors.Wrap(err, "could not unmarshal trigger.actions")
}
return &types.Trigger{
ID: trigger.ID,
Description: trigger.Description,
Type: trigger.Type,
Secret: trigger.Secret,
PipelineID: trigger.PipelineID,
RepoID: trigger.RepoID,
CreatedBy: trigger.CreatedBy,
Disabled: trigger.Disabled,
Actions: actions,
Identifier: trigger.Identifier,
Created: trigger.Created,
Updated: trigger.Updated,
Version: trigger.Version,
}, nil
}
func mapInternalToTriggerList(triggers []*trigger) ([]*types.Trigger, error) {
ret := make([]*types.Trigger, len(triggers))
for i, t := range triggers {
trigger, err := mapInternalToTrigger(t)
if err != nil {
return nil, err
}
ret[i] = trigger
}
return ret, nil
}
func mapTriggerToInternal(t *types.Trigger) *trigger {
return &trigger{
ID: t.ID,
Identifier: t.Identifier,
Description: t.Description,
Type: t.Type,
PipelineID: t.PipelineID,
Secret: t.Secret,
RepoID: t.RepoID,
CreatedBy: t.CreatedBy,
Disabled: t.Disabled,
Actions: EncodeToSQLXJSON(t.Actions),
Created: t.Created,
Updated: t.Updated,
Version: t.Version,
}
}
// NewTriggerStore returns a new TriggerStore.
func NewTriggerStore(db *sqlx.DB) store.TriggerStore {
return &triggerStore{
db: db,
}
}
type triggerStore struct {
db *sqlx.DB
}
const (
triggerColumns = `
trigger_id
,trigger_uid
,trigger_disabled
,trigger_actions
,trigger_description
,trigger_pipeline_id
,trigger_created
,trigger_updated
,trigger_version
`
)
// Find returns an trigger given a pipeline ID and a trigger identifier.
func (s *triggerStore) FindByIdentifier(
ctx context.Context,
pipelineID int64,
identifier string,
) (*types.Trigger, error) {
const findQueryStmt = `
SELECT` + triggerColumns + `
FROM triggers
WHERE trigger_pipeline_id = $1 AND trigger_uid = $2`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(trigger)
if err := db.GetContext(ctx, dst, findQueryStmt, pipelineID, identifier); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find trigger")
}
return mapInternalToTrigger(dst)
}
// Create creates a new trigger in the datastore.
func (s *triggerStore) Create(ctx context.Context, t *types.Trigger) error {
const triggerInsertStmt = `
INSERT INTO triggers (
trigger_uid
,trigger_description
,trigger_actions
,trigger_disabled
,trigger_type
,trigger_secret
,trigger_created_by
,trigger_pipeline_id
,trigger_repo_id
,trigger_created
,trigger_updated
,trigger_version
) VALUES (
:trigger_uid
,:trigger_description
,:trigger_actions
,:trigger_disabled
,:trigger_type
,:trigger_secret
,:trigger_created_by
,:trigger_pipeline_id
,:trigger_repo_id
,:trigger_created
,:trigger_updated
,:trigger_version
) RETURNING trigger_id`
db := dbtx.GetAccessor(ctx, s.db)
trigger := mapTriggerToInternal(t)
query, arg, err := db.BindNamed(triggerInsertStmt, trigger)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind trigger object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&trigger.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Trigger query failed")
}
return nil
}
// Update tries to update an trigger in the datastore with optimistic locking.
func (s *triggerStore) Update(ctx context.Context, t *types.Trigger) error {
const triggerUpdateStmt = `
UPDATE triggers
SET
trigger_uid = :trigger_uid
,trigger_description = :trigger_description
,trigger_disabled = :trigger_disabled
,trigger_updated = :trigger_updated
,trigger_actions = :trigger_actions
,trigger_version = :trigger_version
WHERE trigger_id = :trigger_id AND trigger_version = :trigger_version - 1`
updatedAt := time.Now()
trigger := mapTriggerToInternal(t)
trigger.Version++
trigger.Updated = updatedAt.UnixMilli()
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(triggerUpdateStmt, trigger)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind trigger object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update trigger")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
t.Version = trigger.Version
t.Updated = trigger.Updated
return nil
}
// UpdateOptLock updates the pipeline using the optimistic locking mechanism.
func (s *triggerStore) UpdateOptLock(ctx context.Context,
trigger *types.Trigger,
mutateFn func(trigger *types.Trigger) error) (*types.Trigger, error) {
for {
dup := *trigger
err := mutateFn(&dup)
if err != nil {
return nil, err
}
err = s.Update(ctx, &dup)
if err == nil {
return &dup, nil
}
if !errors.Is(err, gitness_store.ErrVersionConflict) {
return nil, err
}
trigger, err = s.FindByIdentifier(ctx, trigger.PipelineID, trigger.Identifier)
if err != nil {
return nil, err
}
}
}
// List lists the triggers for a given pipeline ID.
func (s *triggerStore) List(
ctx context.Context,
pipelineID int64,
filter types.ListQueryFilter,
) ([]*types.Trigger, error) {
stmt := database.Builder.
Select(triggerColumns).
From("triggers").
Where("trigger_pipeline_id = ?", fmt.Sprint(pipelineID))
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("trigger_uid", filter.Query))
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*trigger{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return mapInternalToTriggerList(dst)
}
// ListAllEnabled lists all enabled triggers for a given repo without pagination.
func (s *triggerStore) ListAllEnabled(
ctx context.Context,
repoID int64,
) ([]*types.Trigger, error) {
stmt := database.Builder.
Select(triggerColumns).
From("triggers").
Where("trigger_repo_id = ? AND trigger_disabled = false", fmt.Sprint(repoID))
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*trigger{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return mapInternalToTriggerList(dst)
}
// Count of triggers under a given pipeline.
func (s *triggerStore) Count(ctx context.Context, pipelineID int64, filter types.ListQueryFilter) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("triggers").
Where("trigger_pipeline_id = ?", pipelineID)
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("trigger_uid", filter.Query))
}
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
// Delete deletes an trigger given a pipeline ID and a trigger identifier.
func (s *triggerStore) DeleteByIdentifier(ctx context.Context, pipelineID int64, identifier string) error {
const triggerDeleteStmt = `
DELETE FROM triggers
WHERE trigger_pipeline_id = $1 AND trigger_uid = $2`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, triggerDeleteStmt, pipelineID, identifier); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Could not delete trigger")
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/publickey.go | app/store/database/publickey.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
)
var _ store.PublicKeyStore = PublicKeyStore{}
// NewPublicKeyStore returns a new PublicKeyStore.
func NewPublicKeyStore(db *sqlx.DB) PublicKeyStore {
return PublicKeyStore{
db: db,
}
}
// PublicKeyStore implements a store.PublicKeyStore backed by a relational database.
type PublicKeyStore struct {
db *sqlx.DB
}
type publicKey struct {
ID int64 `db:"public_key_id"`
PrincipalID int64 `db:"public_key_principal_id"`
Created int64 `db:"public_key_created"`
Verified null.Int `db:"public_key_verified"`
Identifier string `db:"public_key_identifier"`
Usage string `db:"public_key_usage"`
Fingerprint string `db:"public_key_fingerprint"`
Content string `db:"public_key_content"`
Comment string `db:"public_key_comment"`
Type string `db:"public_key_type"`
Scheme string `db:"public_key_scheme"`
ValidFrom null.Int `db:"public_key_valid_from"`
ValidTo null.Int `db:"public_key_valid_to"`
RevocationReason null.String `db:"public_key_revocation_reason"`
Metadata json.RawMessage `db:"public_key_metadata"`
}
const (
publicKeyColumns = `
public_key_id
,public_key_principal_id
,public_key_created
,public_key_verified
,public_key_identifier
,public_key_usage
,public_key_fingerprint
,public_key_content
,public_key_comment
,public_key_type
,public_key_scheme
,public_key_valid_from
,public_key_valid_to
,public_key_revocation_reason
,public_key_metadata`
publicKeySelectBase = `
SELECT` + publicKeyColumns + `
FROM public_keys`
)
// Find fetches a job by its unique identifier.
func (s PublicKeyStore) Find(ctx context.Context, id int64) (*types.PublicKey, error) {
const sqlQuery = publicKeySelectBase + `
WHERE public_key_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
result := &publicKey{}
if err := db.GetContext(ctx, result, sqlQuery, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find public key by id")
}
key := mapToPublicKey(result)
return &key, nil
}
// FindByIdentifier returns a public key given a principal ID and an identifier.
func (s PublicKeyStore) FindByIdentifier(
ctx context.Context,
principalID int64,
identifier string,
) (*types.PublicKey, error) {
const sqlQuery = publicKeySelectBase + `
WHERE public_key_principal_id = $1 and LOWER(public_key_identifier) = $2`
db := dbtx.GetAccessor(ctx, s.db)
result := &publicKey{}
if err := db.GetContext(ctx, result, sqlQuery, principalID, strings.ToLower(identifier)); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find public key by principal and identifier")
}
key := mapToPublicKey(result)
return &key, nil
}
// Create creates a new public key.
func (s PublicKeyStore) Create(ctx context.Context, key *types.PublicKey) error {
const sqlQuery = `
INSERT INTO public_keys (
public_key_principal_id
,public_key_created
,public_key_verified
,public_key_identifier
,public_key_usage
,public_key_fingerprint
,public_key_content
,public_key_comment
,public_key_type
,public_key_scheme
,public_key_valid_from
,public_key_valid_to
,public_key_revocation_reason
,public_key_metadata
) values (
:public_key_principal_id
,:public_key_created
,:public_key_verified
,:public_key_identifier
,:public_key_usage
,:public_key_fingerprint
,:public_key_content
,:public_key_comment
,:public_key_type
,:public_key_scheme
,:public_key_valid_from
,:public_key_valid_to
,:public_key_revocation_reason
,:public_key_metadata
) RETURNING public_key_id`
db := dbtx.GetAccessor(ctx, s.db)
dbKey := mapToInternalPublicKey(key)
query, arg, err := db.BindNamed(sqlQuery, &dbKey)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind public key object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&dbKey.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert public key query failed")
}
key.ID = dbKey.ID
return nil
}
func (s PublicKeyStore) Update(ctx context.Context, publicKey *types.PublicKey) error {
const sqlQuery = `
UPDATE public_keys
SET
public_key_valid_from = :public_key_valid_from
,public_key_valid_to = :public_key_valid_to
,public_key_revocation_reason = :public_key_revocation_reason
WHERE public_key_id = :public_key_id`
dbPublicKey := mapToInternalPublicKey(publicKey)
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, dbPublicKey)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind public key object")
}
_, err = db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update public key")
}
return nil
}
// DeleteByIdentifier deletes a public key.
func (s PublicKeyStore) DeleteByIdentifier(ctx context.Context, principalID int64, identifier string) error {
const sqlQuery = `DELETE FROM public_keys WHERE public_key_principal_id = $1 and LOWER(public_key_identifier) = $2`
db := dbtx.GetAccessor(ctx, s.db)
result, err := db.ExecContext(ctx, sqlQuery, principalID, strings.ToLower(identifier))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Delete public key query failed")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "RowsAffected after delete of public key failed")
}
if count == 0 {
return errors.NotFound("Key not found")
}
return nil
}
// MarkAsVerified updates the public key to mark it as verified.
func (s PublicKeyStore) MarkAsVerified(ctx context.Context, id int64, verified int64) error {
const sqlQuery = `
UPDATE public_keys
SET public_key_verified = $1
WHERE public_key_id = $2`
if _, err := dbtx.GetAccessor(ctx, s.db).ExecContext(ctx, sqlQuery, verified, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to mark public key as varified")
}
return nil
}
func (s PublicKeyStore) Count(
ctx context.Context,
principalID *int64,
filter *types.PublicKeyFilter,
) (int, error) {
stmt := database.Builder.
Select("count(*)").
From("public_keys")
if principalID != nil {
stmt = stmt.Where("public_key_principal_id = ?", *principalID)
}
stmt = s.applyQueryFilter(stmt, filter)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
var count int
if err := db.QueryRowContext(ctx, sql, args...).Scan(&count); err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to execute count public keys query")
}
return count, nil
}
// List returns the public keys for the principal.
func (s PublicKeyStore) List(
ctx context.Context,
principalID *int64,
filter *types.PublicKeyFilter,
) ([]types.PublicKey, error) {
return s.list(ctx, func(stmt squirrel.SelectBuilder) squirrel.SelectBuilder {
if principalID != nil {
stmt = stmt.Where("public_key_principal_id = ?", *principalID)
}
stmt = s.applyQueryFilter(stmt, filter)
stmt = s.applyPagination(stmt, filter.Pagination)
stmt = s.applySortFilter(stmt, filter.Sort, filter.Order)
return stmt
})
}
// ListByFingerprint returns public keys given a fingerprint and key usage.
func (s PublicKeyStore) ListByFingerprint(
ctx context.Context,
fingerprint string,
principalID *int64,
usages []enum.PublicKeyUsage,
schemes []enum.PublicKeyScheme,
) ([]types.PublicKey, error) {
return s.list(ctx, func(stmt squirrel.SelectBuilder) squirrel.SelectBuilder {
stmt = stmt.Where("public_key_fingerprint = ?", fingerprint)
if principalID != nil {
stmt = stmt.Where("public_key_principal_id = ?", *principalID)
}
stmt = s.applyUsages(stmt, usages)
stmt = s.applySchemes(stmt, schemes)
stmt = s.applySortFilter(stmt, enum.PublicKeySortCreated, enum.OrderDesc)
return stmt
})
}
func (s PublicKeyStore) ListBySubKeyID(
ctx context.Context,
subKeyID string,
principalID *int64,
usages []enum.PublicKeyUsage,
schemes []enum.PublicKeyScheme,
) ([]types.PublicKey, error) {
return s.list(ctx, func(stmt squirrel.SelectBuilder) squirrel.SelectBuilder {
stmt = stmt.Join("public_key_sub_keys ON public_key_sub_key_public_key_id = public_key_id").
Where("public_key_sub_key_id = ?", subKeyID)
if principalID != nil {
stmt = stmt.Where("public_key_principal_id = ?", *principalID)
}
stmt = s.applyUsages(stmt, usages)
stmt = s.applySchemes(stmt, schemes)
stmt = s.applySortFilter(stmt, enum.PublicKeySortCreated, enum.OrderDesc)
return stmt
})
}
// List returns the public keys for the principal.
func (s PublicKeyStore) list(
ctx context.Context,
builder func(squirrel.SelectBuilder) squirrel.SelectBuilder,
) ([]types.PublicKey, error) {
stmt := database.Builder.
Select(publicKeyColumns).
From("public_keys")
stmt = builder(stmt)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
keys := make([]publicKey, 0)
if err = db.SelectContext(ctx, &keys, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to execute list public keys query")
}
return mapToPublicKeys(keys), nil
}
func (s PublicKeyStore) applyQueryFilter(
stmt squirrel.SelectBuilder,
filter *types.PublicKeyFilter,
) squirrel.SelectBuilder {
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("public_key_identifier", filter.Query))
}
stmt = s.applyUsages(stmt, filter.Usages)
stmt = s.applySchemes(stmt, filter.Schemes)
return stmt
}
func (s PublicKeyStore) applyUsages(
stmt squirrel.SelectBuilder,
usages []enum.PublicKeyUsage,
) squirrel.SelectBuilder {
if len(usages) == 1 {
stmt = stmt.Where("public_key_usage = ?", usages[0])
} else if len(usages) > 1 {
switch s.db.DriverName() {
case SqliteDriverName:
stmt = stmt.Where(squirrel.Eq{"public_key_usage": usages})
case PostgresDriverName:
stmt = stmt.Where("public_key_usage = ANY(?)", pq.Array(usages))
}
}
return stmt
}
func (s PublicKeyStore) applySchemes(
stmt squirrel.SelectBuilder,
schemes []enum.PublicKeyScheme,
) squirrel.SelectBuilder {
if len(schemes) == 1 {
stmt = stmt.Where("public_key_scheme = ?", schemes[0])
} else if len(schemes) > 1 {
switch s.db.DriverName() {
case SqliteDriverName:
stmt = stmt.Where(squirrel.Eq{"public_key_scheme": schemes})
case PostgresDriverName:
stmt = stmt.Where("public_key_scheme = ANY(?)", pq.Array(schemes))
}
}
return stmt
}
func (PublicKeyStore) applyPagination(
stmt squirrel.SelectBuilder,
filter types.Pagination,
) squirrel.SelectBuilder {
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
return stmt
}
func (PublicKeyStore) applySortFilter(
stmt squirrel.SelectBuilder,
sort enum.PublicKeySort,
order enum.Order,
) squirrel.SelectBuilder {
if order == enum.OrderDefault {
order = enum.OrderAsc
}
switch sort {
case enum.PublicKeySortIdentifier:
stmt = stmt.OrderBy("public_key_identifier " + order.String())
case enum.PublicKeySortCreated:
stmt = stmt.OrderBy("public_key_created " + order.String())
}
return stmt
}
func mapToInternalPublicKey(in *types.PublicKey) publicKey {
var revocationReason null.String
if in.RevocationReason != nil {
revocationReason.Valid = true
revocationReason.String = string(*in.RevocationReason)
}
return publicKey{
ID: in.ID,
PrincipalID: in.PrincipalID,
Created: in.Created,
Verified: null.IntFromPtr(in.Verified),
Identifier: in.Identifier,
Usage: string(in.Usage),
Fingerprint: in.Fingerprint,
Content: in.Content,
Comment: in.Comment,
Type: in.Type,
Scheme: string(in.Scheme),
ValidFrom: null.IntFromPtr(in.ValidFrom),
ValidTo: null.IntFromPtr(in.ValidTo),
RevocationReason: revocationReason,
Metadata: in.Metadata,
}
}
func mapToPublicKey(in *publicKey) types.PublicKey {
return types.PublicKey{
ID: in.ID,
PrincipalID: in.PrincipalID,
Created: in.Created,
Verified: in.Verified.Ptr(),
Identifier: in.Identifier,
Usage: enum.PublicKeyUsage(in.Usage),
Fingerprint: in.Fingerprint,
Content: in.Content,
Comment: in.Comment,
Type: in.Type,
Scheme: enum.PublicKeyScheme(in.Scheme),
ValidFrom: in.ValidFrom.Ptr(),
ValidTo: in.ValidTo.Ptr(),
RevocationReason: (*enum.RevocationReason)(in.RevocationReason.Ptr()),
Metadata: in.Metadata,
}
}
func mapToPublicKeys(
keys []publicKey,
) []types.PublicKey {
res := make([]types.PublicKey, len(keys))
for i := range keys {
res[i] = mapToPublicKey(&keys[i])
}
return res
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/stage.go | app/store/database/stage.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/jmoiron/sqlx"
sqlxtypes "github.com/jmoiron/sqlx/types"
)
var _ store.StageStore = (*stageStore)(nil)
const (
stageColumns = `
stage_id
,stage_execution_id
,stage_repo_id
,stage_number
,stage_name
,stage_kind
,stage_type
,stage_status
,stage_error
,stage_errignore
,stage_exit_code
,stage_machine
,stage_os
,stage_arch
,stage_variant
,stage_kernel
,stage_limit
,stage_limit_repo
,stage_started
,stage_stopped
,stage_created
,stage_updated
,stage_version
,stage_on_success
,stage_on_failure
,stage_depends_on
,stage_labels
`
)
type stage struct {
ID int64 `db:"stage_id"`
ExecutionID int64 `db:"stage_execution_id"`
RepoID int64 `db:"stage_repo_id"`
Number int64 `db:"stage_number"`
Name string `db:"stage_name"`
Kind string `db:"stage_kind"`
Type string `db:"stage_type"`
Status enum.CIStatus `db:"stage_status"`
Error string `db:"stage_error"`
ParentGroupID int64 `db:"stage_parent_group_id"`
ErrIgnore bool `db:"stage_errignore"`
ExitCode int `db:"stage_exit_code"`
Machine string `db:"stage_machine"`
OS string `db:"stage_os"`
Arch string `db:"stage_arch"`
Variant string `db:"stage_variant"`
Kernel string `db:"stage_kernel"`
Limit int `db:"stage_limit"`
LimitRepo int `db:"stage_limit_repo"`
Started int64 `db:"stage_started"`
Stopped int64 `db:"stage_stopped"`
Created int64 `db:"stage_created"`
Updated int64 `db:"stage_updated"`
Version int64 `db:"stage_version"`
OnSuccess bool `db:"stage_on_success"`
OnFailure bool `db:"stage_on_failure"`
DependsOn sqlxtypes.JSONText `db:"stage_depends_on"`
Labels sqlxtypes.JSONText `db:"stage_labels"`
}
// NewStageStore returns a new StageStore.
func NewStageStore(db *sqlx.DB) store.StageStore {
return &stageStore{
db: db,
}
}
type stageStore struct {
db *sqlx.DB
}
// FindByNumber returns a stage given an execution ID and a stage number.
func (s *stageStore) FindByNumber(ctx context.Context, executionID int64, stageNum int) (*types.Stage, error) {
const findQueryStmt = `
SELECT` + stageColumns + `
FROM stages
WHERE stage_execution_id = $1 AND stage_number = $2`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(stage)
if err := db.GetContext(ctx, dst, findQueryStmt, executionID, stageNum); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find stage")
}
return mapInternalToStage(dst)
}
// Create adds a stage in the database.
func (s *stageStore) Create(ctx context.Context, st *types.Stage) error {
const stageInsertStmt = `
INSERT INTO stages (
stage_execution_id
,stage_repo_id
,stage_number
,stage_name
,stage_kind
,stage_type
,stage_status
,stage_error
,stage_errignore
,stage_exit_code
,stage_machine
,stage_parent_group_id
,stage_os
,stage_arch
,stage_variant
,stage_kernel
,stage_limit
,stage_limit_repo
,stage_started
,stage_stopped
,stage_created
,stage_updated
,stage_version
,stage_on_success
,stage_on_failure
,stage_depends_on
,stage_labels
) VALUES (
:stage_execution_id
,:stage_repo_id
,:stage_number
,:stage_name
,:stage_kind
,:stage_type
,:stage_status
,:stage_error
,:stage_errignore
,:stage_exit_code
,:stage_machine
,:stage_parent_group_id
,:stage_os
,:stage_arch
,:stage_variant
,:stage_kernel
,:stage_limit
,:stage_limit_repo
,:stage_started
,:stage_stopped
,:stage_created
,:stage_updated
,:stage_version
,:stage_on_success
,:stage_on_failure
,:stage_depends_on
,:stage_labels
) RETURNING stage_id`
db := dbtx.GetAccessor(ctx, s.db)
stage := mapStageToInternal(st)
query, arg, err := db.BindNamed(stageInsertStmt, stage)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind stage object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&stage.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Stage query failed")
}
return nil
}
// ListWithSteps returns a stage with information about all its containing steps.
func (s *stageStore) ListWithSteps(ctx context.Context, executionID int64) ([]*types.Stage, error) {
const queryNumberWithSteps = `
SELECT` + stageColumns + "," + stepColumns + `
FROM stages
LEFT JOIN steps
ON stages.stage_id=steps.step_stage_id
WHERE stages.stage_execution_id = $1
ORDER BY
stage_id ASC
,step_id ASC
`
db := dbtx.GetAccessor(ctx, s.db)
rows, err := db.QueryContext(ctx, queryNumberWithSteps, executionID)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to query stages and steps")
}
return scanRowsWithSteps(rows)
}
// Find returns a stage given the stage ID.
func (s *stageStore) Find(ctx context.Context, stageID int64) (*types.Stage, error) {
const queryFind = `
SELECT` + stageColumns + `
FROM stages
WHERE stage_id = $1
`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(stage)
if err := db.GetContext(ctx, dst, queryFind, stageID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find stage")
}
return mapInternalToStage(dst)
}
// ListIncomplete returns a list of stages with a pending status.
func (s *stageStore) ListIncomplete(ctx context.Context) ([]*types.Stage, error) {
const queryListIncomplete = `
SELECT` + stageColumns + `
FROM stages
WHERE stage_status IN ('pending','running')
ORDER BY stage_id ASC
`
db := dbtx.GetAccessor(ctx, s.db)
dst := []*stage{}
if err := db.SelectContext(ctx, &dst, queryListIncomplete); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find incomplete stages")
}
// map stages list
return mapInternalToStageList(dst)
}
// List returns a list of stages corresponding to an execution ID.
func (s *stageStore) List(ctx context.Context, executionID int64) ([]*types.Stage, error) {
const queryList = `
SELECT` + stageColumns + `
FROM stages
WHERE stage_execution_id = $1
ORDER BY stage_number ASC
`
db := dbtx.GetAccessor(ctx, s.db)
dst := []*stage{}
if err := db.SelectContext(ctx, &dst, queryList, executionID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find stages")
}
// map stages list
return mapInternalToStageList(dst)
}
// Update tries to update a stage in the datastore and returns a locking error
// if it was unable to do so.
func (s *stageStore) Update(ctx context.Context, st *types.Stage) error {
const stageUpdateStmt = `
UPDATE stages
SET
stage_status = :stage_status
,stage_machine = :stage_machine
,stage_started = :stage_started
,stage_stopped = :stage_stopped
,stage_exit_code = :stage_exit_code
,stage_updated = :stage_updated
,stage_version = :stage_version
,stage_error = :stage_error
,stage_on_success = :stage_on_success
,stage_on_failure = :stage_on_failure
,stage_errignore = :stage_errignore
,stage_depends_on = :stage_depends_on
,stage_labels = :stage_labels
WHERE stage_id = :stage_id AND stage_version = :stage_version - 1`
updatedAt := time.Now()
steps := st.Steps
stage := mapStageToInternal(st)
stage.Version++
stage.Updated = updatedAt.UnixMilli()
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(stageUpdateStmt, stage)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind stage object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update stage")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
m, err := mapInternalToStage(stage)
if err != nil {
return fmt.Errorf("could not map stage object: %w", err)
}
*st = *m
st.Version = stage.Version
st.Updated = stage.Updated
st.Steps = steps // steps is not mapped in database.
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/lfs_objects.go | app/store/database/lfs_objects.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
)
var _ store.LFSObjectStore = (*LFSObjectStore)(nil)
func NewLFSObjectStore(db *sqlx.DB) *LFSObjectStore {
return &LFSObjectStore{
db: db,
}
}
type LFSObjectStore struct {
db *sqlx.DB
}
type lfsObject struct {
ID int64 `db:"lfs_object_id"`
OID string `db:"lfs_object_oid"`
Size int64 `db:"lfs_object_size"`
Created int64 `db:"lfs_object_created"`
CreatedBy int64 `db:"lfs_object_created_by"`
RepoID int64 `db:"lfs_object_repo_id"`
}
const (
lfsObjectColumns = `
lfs_object_id
,lfs_object_oid
,lfs_object_size
,lfs_object_created
,lfs_object_created_by
,lfs_object_repo_id`
)
func (s *LFSObjectStore) Find(
ctx context.Context,
repoID int64,
oid string,
) (*types.LFSObject, error) {
stmt := database.Builder.
Select(lfsObjectColumns).
From("lfs_objects").
Where("lfs_object_repo_id = ? AND lfs_object_oid = ?", repoID, oid)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
dst := &lfsObject{}
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select query failed")
}
return mapLFSObject(dst), nil
}
func (s *LFSObjectStore) FindMany(
ctx context.Context,
repoID int64,
oids []string,
) ([]*types.LFSObject, error) {
stmt := database.Builder.
Select(lfsObjectColumns).
From("lfs_objects").
Where("lfs_object_repo_id = ?", repoID).
Where(squirrel.Eq{"lfs_object_oid": oids})
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
var dst []*lfsObject
if err := db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select query failed")
}
return mapLFSObjects(dst), nil
}
func (s *LFSObjectStore) Create(ctx context.Context, obj *types.LFSObject) error {
const sqlQuery = `
INSERT INTO lfs_objects (
lfs_object_oid
,lfs_object_size
,lfs_object_created
,lfs_object_created_by
,lfs_object_repo_id
) VALUES (
:lfs_object_oid
,:lfs_object_size
,:lfs_object_created
,:lfs_object_created_by
,:lfs_object_repo_id
) RETURNING lfs_object_id`
db := dbtx.GetAccessor(ctx, s.db)
query, args, err := db.BindNamed(sqlQuery, mapInternalLFSObject(obj))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind query")
}
if err = db.QueryRowContext(ctx, query, args...).Scan(&obj.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to create LFS object")
}
return nil
}
// GetSizeInKBByRepoID returns the total size of LFS objects in KiB for a specified repo.
func (s *LFSObjectStore) GetSizeInKBByRepoID(ctx context.Context, repoID int64) (int64, error) {
stmt := database.Builder.
Select("CAST(COALESCE(SUM(lfs_object_size) / 1024, 0) AS BIGINT)").
From("lfs_objects").
Where("lfs_object_repo_id = ?", repoID)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
var size int64
if err := db.GetContext(ctx, &size, sql, args...); err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Select query failed")
}
return size, nil
}
func mapInternalLFSObject(obj *types.LFSObject) *lfsObject {
return &lfsObject{
ID: obj.ID,
OID: obj.OID,
Size: obj.Size,
Created: obj.Created,
CreatedBy: obj.CreatedBy,
RepoID: obj.RepoID,
}
}
func mapLFSObject(obj *lfsObject) *types.LFSObject {
return &types.LFSObject{
ID: obj.ID,
OID: obj.OID,
Size: obj.Size,
Created: obj.Created,
CreatedBy: obj.CreatedBy,
RepoID: obj.RepoID,
}
}
func mapLFSObjects(objs []*lfsObject) []*types.LFSObject {
res := make([]*types.LFSObject, len(objs))
for i := range objs {
res[i] = mapLFSObject(objs[i])
}
return res
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/infra_provider_config.go | app/store/database/infra_provider_config.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
const (
infraProviderConfigIDColumn = `ipconf_id`
infraProviderConfigInsertColumns = `
ipconf_uid,
ipconf_display_name,
ipconf_type,
ipconf_space_id,
ipconf_created,
ipconf_updated,
ipconf_metadata,
ipconf_is_deleted,
ipconf_deleted
`
infraProviderConfigSelectColumns = "ipconf_id," + infraProviderConfigInsertColumns
infraProviderConfigTable = `infra_provider_configs`
)
type infraProviderConfig struct {
ID int64 `db:"ipconf_id"`
Identifier string `db:"ipconf_uid"`
Name string `db:"ipconf_display_name"`
Type enum.InfraProviderType `db:"ipconf_type"`
Metadata []byte `db:"ipconf_metadata"`
SpaceID int64 `db:"ipconf_space_id"`
Created int64 `db:"ipconf_created"`
Updated int64 `db:"ipconf_updated"`
IsDeleted bool `db:"ipconf_is_deleted"`
Deleted null.Int `db:"ipconf_deleted"`
}
var _ store.InfraProviderConfigStore = (*infraProviderConfigStore)(nil)
func NewInfraProviderConfigStore(
db *sqlx.DB,
spaceIDCache store.SpaceIDCache,
) store.InfraProviderConfigStore {
return &infraProviderConfigStore{
db: db,
spaceIDCache: spaceIDCache,
}
}
type infraProviderConfigStore struct {
db *sqlx.DB
spaceIDCache store.SpaceIDCache
}
func (i infraProviderConfigStore) FindByType(
ctx context.Context,
spaceID int64,
infraType enum.InfraProviderType,
includeDeleted bool,
) (*types.InfraProviderConfig, error) {
stmt := database.Builder.
Select(infraProviderConfigSelectColumns).
From(infraProviderConfigTable).
Where("ipconf_is_deleted = ?", includeDeleted).
Where("ipconf_type = ?", string(infraType)).
Where("ipconf_space_id = ?", spaceID)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
dst := new(infraProviderConfig)
db := dbtx.GetAccessor(ctx, i.db)
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find infraprovider config %v", infraType)
}
return i.mapToInfraProviderConfig(ctx, dst)
}
func (i infraProviderConfigStore) Update(ctx context.Context, infraProviderConfig *types.InfraProviderConfig) error {
dbinfraProviderConfig, err := i.mapToInternalInfraProviderConfig(infraProviderConfig)
if err != nil {
return err
}
stmt := database.Builder.
Update(infraProviderConfigTable).
Set("ipconf_display_name", dbinfraProviderConfig.Name).
Set("ipconf_updated", dbinfraProviderConfig.Updated).
Set("ipconf_metadata", dbinfraProviderConfig.Metadata).
Where("ipconf_id = ?", infraProviderConfig.ID)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, i.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "Failed to update infra provider config %s", infraProviderConfig.Identifier)
}
return nil
}
func (i infraProviderConfigStore) Find(
ctx context.Context,
id int64,
includeDeleted bool,
) (*types.InfraProviderConfig, error) {
stmt := database.Builder.
Select(infraProviderConfigSelectColumns).
From(infraProviderConfigTable).
Where(infraProviderConfigIDColumn+" = ?", id) //nolint:goconst
if !includeDeleted {
stmt = stmt.Where("ipconf_is_deleted = false")
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
dst := new(infraProviderConfig)
db := dbtx.GetAccessor(ctx, i.db)
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find infraprovider config %d", id)
}
return i.mapToInfraProviderConfig(ctx, dst)
}
func (i infraProviderConfigStore) List(
ctx context.Context,
filter *types.InfraProviderConfigFilter,
) ([]*types.InfraProviderConfig, error) {
stmt := database.Builder.
Select(infraProviderConfigSelectColumns).
From(infraProviderConfigTable).
Where("ipconf_is_deleted = false")
if filter != nil && len(filter.SpaceIDs) > 0 {
stmt = stmt.Where(squirrel.Eq{"ipconf_space_id": filter.SpaceIDs})
}
if filter != nil && filter.Type != "" {
stmt = stmt.Where("ipconf_type = ?", filter.Type)
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, i.db)
dst := new([]*infraProviderConfig)
if err := db.SelectContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to list infraprovider configs")
}
return i.mapToInfraProviderConfigs(ctx, *dst)
}
func (i infraProviderConfigStore) FindByIdentifier(
ctx context.Context,
spaceID int64,
identifier string,
) (*types.InfraProviderConfig, error) {
stmt := database.Builder.
Select(infraProviderConfigSelectColumns).
From(infraProviderConfigTable).
Where("ipconf_is_deleted = false").
Where("ipconf_uid = ?", identifier). //nolint:goconst
Where("ipconf_space_id = ?", spaceID)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
dst := new(infraProviderConfig)
db := dbtx.GetAccessor(ctx, i.db)
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find infraprovider config %s", identifier)
}
return i.mapToInfraProviderConfig(ctx, dst)
}
func (i infraProviderConfigStore) Create(ctx context.Context, infraProviderConfig *types.InfraProviderConfig) error {
dbinfraProviderConfig, err := i.mapToInternalInfraProviderConfig(infraProviderConfig)
if err != nil {
return err
}
stmt := database.Builder.
Insert(infraProviderConfigTable).
Columns(infraProviderConfigInsertColumns).
Values(
dbinfraProviderConfig.Identifier,
dbinfraProviderConfig.Name,
dbinfraProviderConfig.Type,
dbinfraProviderConfig.SpaceID,
dbinfraProviderConfig.Created,
dbinfraProviderConfig.Updated,
dbinfraProviderConfig.Metadata,
dbinfraProviderConfig.IsDeleted,
dbinfraProviderConfig.Deleted,
).
Suffix(ReturningClause + infraProviderConfigIDColumn)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, i.db)
if err = db.QueryRowContext(ctx, sql, args...).Scan(&dbinfraProviderConfig.ID); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "infraprovider config create query failed for %s", infraProviderConfig.Identifier)
}
infraProviderConfig.ID = dbinfraProviderConfig.ID
return nil
}
func (i infraProviderConfigStore) Delete(ctx context.Context, id int64) error {
now := time.Now().UnixMilli()
stmt := database.Builder.
Update(infraProviderConfigTable).
Set("ipconf_updated", now).
Set("ipconf_deleted", now).
Set("ipconf_is_deleted", true).
Where(infraProviderConfigIDColumn+" = ?", id)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, i.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update infraprovider config %d", id)
}
return nil
}
func (i infraProviderConfigStore) mapToInfraProviderConfig(
ctx context.Context,
in *infraProviderConfig,
) (*types.InfraProviderConfig, error) {
metadataMap := make(map[string]any)
if len(in.Metadata) > 0 {
marshalErr := json.Unmarshal(in.Metadata, &metadataMap)
if marshalErr != nil {
return nil, marshalErr
}
}
infraProviderConfigEntity := &types.InfraProviderConfig{
ID: in.ID,
Identifier: in.Identifier,
Name: in.Name,
Type: in.Type,
Metadata: metadataMap,
SpaceID: in.SpaceID,
Created: in.Created,
Updated: in.Updated,
IsDeleted: in.IsDeleted,
Deleted: in.Deleted.Ptr(),
}
spaceCore, err := i.spaceIDCache.Get(ctx, infraProviderConfigEntity.SpaceID)
if err != nil {
return nil, fmt.Errorf("couldn't set space path to the infra config in DB: %d",
infraProviderConfigEntity.SpaceID)
}
infraProviderConfigEntity.SpacePath = spaceCore.Path
return infraProviderConfigEntity, nil
}
func (i infraProviderConfigStore) mapToInfraProviderConfigs(
ctx context.Context,
in []*infraProviderConfig,
) ([]*types.InfraProviderConfig, error) {
var err error
res := make([]*types.InfraProviderConfig, len(in))
for index := range in {
res[index], err = i.mapToInfraProviderConfig(ctx, in[index])
if err != nil {
return nil, err
}
}
return res, nil
}
func (i infraProviderConfigStore) mapToInternalInfraProviderConfig(
in *types.InfraProviderConfig,
) (*infraProviderConfig, error) {
var jsonBytes []byte
var marshalErr error
if len(in.Metadata) > 0 {
jsonBytes, marshalErr = json.Marshal(in.Metadata)
if marshalErr != nil {
return nil, marshalErr
}
}
infraProviderConfigEntity := &infraProviderConfig{
Identifier: in.Identifier,
Name: in.Name,
Type: in.Type,
SpaceID: in.SpaceID,
Created: in.Created,
Updated: in.Updated,
Metadata: jsonBytes,
IsDeleted: in.IsDeleted,
Deleted: null.IntFromPtr(in.Deleted),
}
return infraProviderConfigEntity, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/membership.go | app/store/database/membership.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
)
var _ store.MembershipStore = (*MembershipStore)(nil)
// NewMembershipStore returns a new MembershipStore.
func NewMembershipStore(
db *sqlx.DB,
pCache store.PrincipalInfoCache,
spacePathStore store.SpacePathStore,
spaceStore store.SpaceStore,
) *MembershipStore {
return &MembershipStore{
db: db,
pCache: pCache,
spacePathStore: spacePathStore,
spaceStore: spaceStore,
}
}
// MembershipStore implements store.MembershipStore backed by a relational database.
type MembershipStore struct {
db *sqlx.DB
pCache store.PrincipalInfoCache
spacePathStore store.SpacePathStore
spaceStore store.SpaceStore
}
type membership struct {
SpaceID int64 `db:"membership_space_id"`
PrincipalID int64 `db:"membership_principal_id"`
CreatedBy int64 `db:"membership_created_by"`
Created int64 `db:"membership_created"`
Updated int64 `db:"membership_updated"`
Role enum.MembershipRole `db:"membership_role"`
}
type membershipPrincipal struct {
membership
principalInfo
}
type membershipSpace struct {
membership
space
}
const (
membershipColumns = `
membership_space_id
,membership_principal_id
,membership_created_by
,membership_created
,membership_updated
,membership_role`
membershipSelectBase = `
SELECT` + membershipColumns + `
FROM memberships`
)
// Find finds the membership by space id and principal id.
func (s *MembershipStore) Find(ctx context.Context, key types.MembershipKey) (*types.Membership, error) {
const sqlQuery = membershipSelectBase + `
WHERE membership_space_id = $1 AND membership_principal_id = $2`
db := dbtx.GetAccessor(ctx, s.db)
dst := &membership{}
if err := db.GetContext(ctx, dst, sqlQuery, key.SpaceID, key.PrincipalID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find membership")
}
result := mapToMembership(dst)
return &result, nil
}
func (s *MembershipStore) FindUser(ctx context.Context, key types.MembershipKey) (*types.MembershipUser, error) {
m, err := s.Find(ctx, key)
if err != nil {
return nil, err
}
result, err := s.addPrincipalInfos(ctx, m)
if err != nil {
return nil, err
}
return &result, nil
}
// Create creates a new membership.
func (s *MembershipStore) Create(ctx context.Context, membership *types.Membership) error {
const sqlQuery = `
INSERT INTO memberships (
membership_space_id
,membership_principal_id
,membership_created_by
,membership_created
,membership_updated
,membership_role
) values (
:membership_space_id
,:membership_principal_id
,:membership_created_by
,:membership_created
,:membership_updated
,:membership_role
)`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, mapToInternalMembership(membership))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind membership object")
}
if _, err = db.ExecContext(ctx, query, arg...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to insert membership")
}
return nil
}
// Update updates the role of a member of a space.
func (s *MembershipStore) Update(ctx context.Context, membership *types.Membership) error {
const sqlQuery = `
UPDATE memberships
SET
membership_updated = :membership_updated
,membership_role = :membership_role
WHERE membership_space_id = :membership_space_id AND
membership_principal_id = :membership_principal_id`
db := dbtx.GetAccessor(ctx, s.db)
dbMembership := mapToInternalMembership(membership)
dbMembership.Updated = time.Now().UnixMilli()
query, arg, err := db.BindNamed(sqlQuery, dbMembership)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind membership object")
}
_, err = db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update membership role")
}
membership.Updated = dbMembership.Updated
return nil
}
// Delete deletes the membership.
func (s *MembershipStore) Delete(ctx context.Context, key types.MembershipKey) error {
const sqlQuery = `
DELETE from memberships
WHERE membership_space_id = $1 AND
membership_principal_id = $2`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sqlQuery, key.SpaceID, key.PrincipalID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "delete membership query failed")
}
return nil
}
// CountUsers returns a number of users memberships that matches the provided filter.
func (s *MembershipStore) CountUsers(ctx context.Context,
spaceID int64,
filter types.MembershipUserFilter,
) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("memberships").
InnerJoin("principals ON membership_principal_id = principal_id").
Where("membership_space_id = ?", spaceID)
stmt = applyMembershipUserFilter(stmt, filter)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert membership users count query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing membership users count query")
}
return count, nil
}
// ListUsers returns a list of memberships for a space or a user.
func (s *MembershipStore) ListUsers(ctx context.Context,
spaceID int64,
filter types.MembershipUserFilter,
) ([]types.MembershipUser, error) {
const columns = membershipColumns + "," + principalInfoCommonColumns
stmt := database.Builder.
Select(columns).
From("memberships").
InnerJoin("principals ON membership_principal_id = principal_id").
Where("membership_space_id = ?", spaceID)
stmt = applyMembershipUserFilter(stmt, filter)
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
order := filter.Order
if order == enum.OrderDefault {
order = enum.OrderAsc
}
switch filter.Sort {
case enum.MembershipUserSortName:
stmt = stmt.OrderBy("principal_display_name " + order.String())
case enum.MembershipUserSortCreated:
stmt = stmt.OrderBy("membership_created " + order.String())
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert membership users list query to sql: %w", err)
}
dst := make([]*membershipPrincipal, 0)
db := dbtx.GetAccessor(ctx, s.db)
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing membership users list query")
}
result, err := s.mapToMembershipUsers(ctx, dst)
if err != nil {
return nil, fmt.Errorf("failed to map memberships users to external type: %w", err)
}
return result, nil
}
func applyMembershipUserFilter(
stmt squirrel.SelectBuilder,
opts types.MembershipUserFilter,
) squirrel.SelectBuilder {
if opts.Query != "" {
stmt = stmt.Where(PartialMatch("principal_display_name", opts.Query))
}
return stmt
}
func (s *MembershipStore) CountSpaces(ctx context.Context,
userID int64,
filter types.MembershipSpaceFilter,
) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("memberships").
InnerJoin("spaces ON spaces.space_id = membership_space_id").
Where("membership_principal_id = ? AND spaces.space_deleted IS NULL", userID)
stmt = applyMembershipSpaceFilter(stmt, filter)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert membership spaces count query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing membership spaces count query")
}
return count, nil
}
// ListSpaces returns a list of spaces in which the provided user is a member.
func (s *MembershipStore) ListSpaces(ctx context.Context,
userID int64,
filter types.MembershipSpaceFilter,
) ([]types.MembershipSpace, error) {
const columns = membershipColumns + "," + spaceColumns
stmt := database.Builder.
Select(columns).
From("memberships").
InnerJoin("spaces ON spaces.space_id = membership_space_id").
Where("membership_principal_id = ? AND spaces.space_deleted IS NULL", userID)
stmt = applyMembershipSpaceFilter(stmt, filter)
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
order := filter.Order
if order == enum.OrderDefault {
order = enum.OrderAsc
}
switch filter.Sort {
// TODO [CODE-1363]: remove after identifier migration.
case enum.MembershipSpaceSortUID, enum.MembershipSpaceSortIdentifier:
stmt = stmt.OrderBy("space_uid " + order.String())
case enum.MembershipSpaceSortCreated:
stmt = stmt.OrderBy("membership_created " + order.String())
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert membership spaces list query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
dst := make([]*membershipSpace, 0)
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
result, err := s.mapToMembershipSpaces(ctx, dst)
if err != nil {
return nil, fmt.Errorf("failed to map memberships spaces to external type: %w", err)
}
return result, nil
}
func applyMembershipSpaceFilter(
stmt squirrel.SelectBuilder,
opts types.MembershipSpaceFilter,
) squirrel.SelectBuilder {
if opts.Query != "" {
stmt = stmt.Where(PartialMatch("space_uid", opts.Query))
}
return stmt
}
func mapToMembership(m *membership) types.Membership {
return types.Membership{
MembershipKey: types.MembershipKey{
SpaceID: m.SpaceID,
PrincipalID: m.PrincipalID,
},
CreatedBy: m.CreatedBy,
Created: m.Created,
Updated: m.Updated,
Role: m.Role,
}
}
func mapToInternalMembership(m *types.Membership) membership {
return membership{
SpaceID: m.SpaceID,
PrincipalID: m.PrincipalID,
CreatedBy: m.CreatedBy,
Created: m.Created,
Updated: m.Updated,
Role: m.Role,
}
}
func (s *MembershipStore) addPrincipalInfos(ctx context.Context, m *types.Membership) (types.MembershipUser, error) {
var result types.MembershipUser
// pull principal infos from cache
infoMap, err := s.pCache.Map(ctx, []int64{m.CreatedBy, m.PrincipalID})
if err != nil {
return result, fmt.Errorf("failed to load membership principal infos: %w", err)
}
if user, ok := infoMap[m.PrincipalID]; ok {
result.Principal = *user
} else {
return result, fmt.Errorf("failed to find membership principal info: %w", err)
}
if addedBy, ok := infoMap[m.CreatedBy]; ok {
result.AddedBy = *addedBy
}
result.Membership = *m
return result, nil
}
func (s *MembershipStore) mapToMembershipUsers(ctx context.Context,
ms []*membershipPrincipal,
) ([]types.MembershipUser, error) {
// collect all principal IDs
ids := make([]int64, 0, len(ms))
for _, m := range ms {
ids = append(ids, m.membership.CreatedBy)
}
// pull principal infos from cache
infoMap, err := s.pCache.Map(ctx, ids)
if err != nil {
return nil, fmt.Errorf("failed to load membership principal infos: %w", err)
}
// attach the principal infos back to the slice items
res := make([]types.MembershipUser, len(ms))
for i := range ms {
m := ms[i]
res[i].Membership = mapToMembership(&m.membership)
res[i].Principal = mapToPrincipalInfo(&m.principalInfo)
if addedBy, ok := infoMap[m.membership.CreatedBy]; ok {
res[i].AddedBy = *addedBy
}
}
return res, nil
}
func (s *MembershipStore) mapToMembershipSpaces(ctx context.Context,
ms []*membershipSpace,
) ([]types.MembershipSpace, error) {
// collect all principal IDs
ids := make([]int64, 0, len(ms))
for _, m := range ms {
ids = append(ids, m.membership.CreatedBy)
}
// pull principal infos from cache
infoMap, err := s.pCache.Map(ctx, ids)
if err != nil {
return nil, fmt.Errorf("failed to load membership principal infos: %w", err)
}
// attach the principal infos back to the slice items
res := make([]types.MembershipSpace, len(ms))
for i := range ms {
m := ms[i]
res[i].Membership = mapToMembership(&m.membership)
space, err := mapToSpace(ctx, s.db, s.spacePathStore, &m.space)
if err != nil {
return nil, fmt.Errorf("failed to map space %d: %w", m.space.ID, err)
}
res[i].Space = *space
if addedBy, ok := infoMap[m.membership.CreatedBy]; ok {
res[i].AddedBy = *addedBy
}
}
return res, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/token.go | app/store/database/token.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"strings"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
)
var _ store.TokenStore = (*TokenStore)(nil)
// NewTokenStore returns a new TokenStore.
func NewTokenStore(db *sqlx.DB) *TokenStore {
return &TokenStore{db}
}
// TokenStore implements a TokenStore backed by a relational database.
type TokenStore struct {
db *sqlx.DB
}
// Find finds the token by id.
func (s *TokenStore) Find(ctx context.Context, id int64) (*types.Token, error) {
db := dbtx.GetAccessor(ctx, s.db)
dst := new(types.Token)
if err := db.GetContext(ctx, dst, TokenSelectByID, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find token")
}
return dst, nil
}
// FindByIdentifier finds the token by principalId and token identifier.
func (s *TokenStore) FindByIdentifier(ctx context.Context, principalID int64, identifier string) (*types.Token, error) {
db := dbtx.GetAccessor(ctx, s.db)
dst := new(types.Token)
if err := db.GetContext(
ctx,
dst,
TokenSelectByPrincipalIDAndIdentifier,
principalID,
strings.ToLower(identifier),
); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find token by identifier")
}
return dst, nil
}
// Create saves the token details.
func (s *TokenStore) Create(ctx context.Context, token *types.Token) error {
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(tokenInsert, token)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind token object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&token.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert query failed")
}
return nil
}
// Delete deletes the token with the given id.
func (s *TokenStore) Delete(ctx context.Context, id int64) error {
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, tokenDelete, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "The delete query failed")
}
return nil
}
// DeleteExpiredBefore deletes all tokens that expired before the provided time.
// If tokenTypes are provided, then only tokens of that type are deleted.
func (s *TokenStore) DeleteExpiredBefore(
ctx context.Context,
before time.Time,
tknTypes []enum.TokenType,
) (int64, error) {
stmt := database.Builder.
Delete("tokens").
Where("token_expires_at < ?", before.UnixMilli())
if len(tknTypes) > 0 {
stmt = stmt.Where(squirrel.Eq{"token_type": tknTypes})
}
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert delete token query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
result, err := db.ExecContext(ctx, sql, args...)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to execute delete token query")
}
n, err := result.RowsAffected()
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to get number of deleted tokens")
}
return n, nil
}
// Count returns a count of tokens of a specifc type for a specific principal.
func (s *TokenStore) Count(ctx context.Context,
principalID int64, tokenType enum.TokenType) (int64, error) {
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err := db.QueryRowContext(ctx, tokenCountForPrincipalIDOfType, principalID, tokenType).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
// List returns a list of tokens of a specific type for a specific principal.
func (s *TokenStore) List(ctx context.Context,
principalID int64, tokenType enum.TokenType) ([]*types.Token, error) {
db := dbtx.GetAccessor(ctx, s.db)
dst := []*types.Token{}
// TODO: custom filters / sorting for tokens.
err := db.SelectContext(ctx, &dst, tokenSelectForPrincipalIDOfType, principalID, tokenType)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing token list query")
}
return dst, nil
}
const tokenSelectBase = `
SELECT
token_id
,token_type
,token_uid
,token_principal_id
,token_expires_at
,token_issued_at
,token_created_by
FROM tokens
` //#nosec G101
const tokenSelectForPrincipalIDOfType = tokenSelectBase + `
WHERE token_principal_id = $1 AND token_type = $2
ORDER BY token_issued_at DESC
` //#nosec G101
const tokenCountForPrincipalIDOfType = `
SELECT count(*)
FROM tokens
WHERE token_principal_id = $1 AND token_type = $2
` //#nosec G101
const TokenSelectByID = tokenSelectBase + `
WHERE token_id = $1
`
const TokenSelectByPrincipalIDAndIdentifier = tokenSelectBase + `
WHERE token_principal_id = $1 AND LOWER(token_uid) = $2
`
const tokenDelete = `
DELETE FROM tokens
WHERE token_id = $1
`
const tokenInsert = `
INSERT INTO tokens (
token_type
,token_uid
,token_principal_id
,token_expires_at
,token_issued_at
,token_created_by
) values (
:token_type
,:token_uid
,:token_principal_id
,:token_expires_at
,:token_issued_at
,:token_created_by
) RETURNING token_id
`
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/linked_repo.go | app/store/database/linked_repo.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/errors"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/jmoiron/sqlx"
)
var _ store.LinkedRepoStore = (*LinkedRepoStore)(nil)
func NewLinkedRepoStore(db *sqlx.DB) *LinkedRepoStore {
return &LinkedRepoStore{
db: db,
}
}
// LinkedRepoStore implements store.LinkedRepoStore backed by a relational database.
type LinkedRepoStore struct {
db *sqlx.DB
}
type linkedRepo struct {
RepoID int64 `db:"linked_repo_id"`
Version int64 `db:"linked_repo_version"`
Created int64 `db:"linked_repo_created"`
Updated int64 `db:"linked_repo_updated"`
LastFullSync int64 `db:"linked_repo_last_full_sync"`
ConnectorPath string `db:"linked_repo_connector_path"`
ConnectorIdentifier string `db:"linked_repo_connector_identifier"`
ConnectorRepo string `db:"linked_repo_connector_repo"`
}
const (
linkedRepoColumns = `
linked_repo_id
,linked_repo_version
,linked_repo_created
,linked_repo_updated
,linked_repo_last_full_sync
,linked_repo_connector_path
,linked_repo_connector_identifier
,linked_repo_connector_repo`
linkedRepoSelectBase = `
SELECT` + linkedRepoColumns + `
FROM linked_repositories`
)
func (s *LinkedRepoStore) Find(ctx context.Context, repoID int64) (*types.LinkedRepo, error) {
const sqlQuery = linkedRepoSelectBase + `
WHERE linked_repo_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := &linkedRepo{}
if err := db.GetContext(ctx, dst, sqlQuery, repoID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find linked repo")
}
return (*types.LinkedRepo)(dst), nil
}
func (s *LinkedRepoStore) Create(ctx context.Context, v *types.LinkedRepo) error {
const sqlQuery = `
INSERT INTO linked_repositories (
linked_repo_id
,linked_repo_version
,linked_repo_created
,linked_repo_updated
,linked_repo_last_full_sync
,linked_repo_connector_path
,linked_repo_connector_identifier
,linked_repo_connector_repo
) values (
:linked_repo_id
,:linked_repo_version
,:linked_repo_created
,:linked_repo_updated
,:linked_repo_last_full_sync
,:linked_repo_connector_path
,:linked_repo_connector_identifier
,:linked_repo_connector_repo
)`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, (*linkedRepo)(v))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind linked repo object")
}
if _, err = db.ExecContext(ctx, query, arg...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to insert linked repo")
}
return nil
}
func (s *LinkedRepoStore) Update(ctx context.Context, linked *types.LinkedRepo) error {
const sqlQuery = `
UPDATE linked_repositories
SET
linked_repo_version = :linked_repo_version
,linked_repo_updated = :linked_repo_updated
,linked_repo_last_full_sync = :linked_repo_last_full_sync
WHERE linked_repo_id = :linked_repo_id AND linked_repo_version = :linked_repo_version - 1`
dbLinked := linkedRepo(*linked)
dbLinked.Version++
dbLinked.Updated = time.Now().UnixMilli()
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, dbLinked)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind linked repository object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update linked repository")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated linked repository rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
linked.Version = dbLinked.Version
linked.Updated = dbLinked.Updated
return nil
}
func (s *LinkedRepoStore) UpdateOptLock(
ctx context.Context,
r *types.LinkedRepo,
mutateFn func(*types.LinkedRepo) error,
) (*types.LinkedRepo, error) {
for {
dup := *r
err := mutateFn(&dup)
if err != nil {
return nil, err
}
err = s.Update(ctx, &dup)
if err == nil {
return &dup, nil
}
if !errors.Is(err, gitness_store.ErrVersionConflict) {
return nil, err
}
r, err = s.Find(ctx, r.RepoID)
if err != nil {
return nil, err
}
}
}
func (s *LinkedRepoStore) List(ctx context.Context, limit int) ([]types.LinkedRepo, error) {
stmt := database.Builder.
Select(linkedRepoColumns).
From("linked_repositories").
InnerJoin("repositories ON repo_id = linked_repo_id").
Where("repo_deleted IS NULL").
OrderBy("linked_repo_last_full_sync ASC").
Limit(uint64(limit)) //nolint:gosec
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert linked repo list query to sql: %w", err)
}
dst := make([]linkedRepo, 0)
db := dbtx.GetAccessor(ctx, s.db)
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing list linked repos query")
}
result := make([]types.LinkedRepo, len(dst))
for i, r := range dst {
result[i] = types.LinkedRepo(r)
}
return result, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/infra_provider_template.go | app/store/database/infra_provider_template.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
const (
infraProviderTemplateIDColumn = `iptemp_id`
infraProviderTemplateColumns = `
iptemp_uid,
iptemp_infra_provider_config_id,
iptemp_description,
iptemp_space_id,
iptemp_data,
iptemp_created,
iptemp_updated,
iptemp_version
`
infraProviderTemplateSelectColumns = infraProviderTemplateIDColumn + `,
` + infraProviderTemplateColumns
infraProviderTemplateTable = `infra_provider_templates`
)
var _ store.InfraProviderTemplateStore = (*infraProviderTemplateStore)(nil)
type infraProviderTemplateStore struct {
db *sqlx.DB
}
type infraProviderTemplate struct {
ID int64 `db:"iptemp_id"`
Identifier string `db:"iptemp_uid"`
InfraProviderConfigID int64 `db:"iptemp_infra_provider_config_id"`
Description string `db:"iptemp_description"`
SpaceID int64 `db:"iptemp_space_id"`
Data string `db:"iptemp_data"`
Created int64 `db:"iptemp_created"`
Updated int64 `db:"iptemp_updated"`
Version int64 `db:"iptemp_version"`
}
func NewInfraProviderTemplateStore(db *sqlx.DB) store.InfraProviderTemplateStore {
return &infraProviderTemplateStore{
db: db,
}
}
func (i infraProviderTemplateStore) FindByIdentifier(
ctx context.Context,
spaceID int64,
identifier string,
) (*types.InfraProviderTemplate, error) {
stmt := database.Builder.
Select(infraProviderTemplateSelectColumns).
From(infraProviderTemplateTable).
Where("iptemp_uid = $1", identifier).
Where("iptemp_space_id = $2", spaceID)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
infraProviderTemplateEntity := new(infraProviderTemplate)
db := dbtx.GetAccessor(ctx, i.db)
if err := db.GetContext(ctx, infraProviderTemplateEntity, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find infraprovider template %s", identifier)
}
return infraProviderTemplateEntity.mapToDTO(), nil
}
func (i infraProviderTemplateStore) Find(
ctx context.Context,
id int64,
) (*types.InfraProviderTemplate,
error) {
stmt := database.Builder.
Select(infraProviderTemplateSelectColumns).
From(infraProviderTemplateTable).
Where(infraProviderTemplateIDColumn+" = $1", id)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
infraProviderTemplateEntity := new(infraProviderTemplate)
db := dbtx.GetAccessor(ctx, i.db)
if err := db.GetContext(ctx, infraProviderTemplateEntity, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find infraprovider template %d", id)
}
return infraProviderTemplateEntity.mapToDTO(), nil
}
func (i infraProviderTemplateStore) Create(
ctx context.Context,
infraProviderTemplate *types.InfraProviderTemplate,
) error {
stmt := database.Builder.
Insert(infraProviderTemplateTable).
Columns(infraProviderTemplateColumns).
Values(infraProviderTemplate.Identifier,
infraProviderTemplate.InfraProviderConfigID,
infraProviderTemplate.Description,
infraProviderTemplate.SpaceID,
infraProviderTemplate.Data,
infraProviderTemplate.Created,
infraProviderTemplate.Updated,
infraProviderTemplate.Version,
).
Suffix(ReturningClause + infraProviderTemplateIDColumn)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, i.db)
if err = db.QueryRowContext(ctx, sql, args...).Scan(&infraProviderTemplate.ID); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "infraprovider template create failed %s", infraProviderTemplate.Identifier)
}
return nil
}
func (i infraProviderTemplateStore) Update(
ctx context.Context,
infraProviderTemplate *types.InfraProviderTemplate,
) error {
dbinfraProviderTemplate := i.mapToInternalInfraProviderTemplate(infraProviderTemplate)
stmt := database.Builder.
Update(infraProviderTemplateTable).
Set("iptemp_description", dbinfraProviderTemplate.Description).
Set("iptemp_updated", dbinfraProviderTemplate.Updated).
Set("iptemp_data", dbinfraProviderTemplate.Data).
Set("iptemp_version", dbinfraProviderTemplate.Version+1).
Where("iptemp_id = ?", infraProviderTemplate.ID)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, i.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "Failed to update infraprovider template %s", infraProviderTemplate.Identifier)
}
return nil
}
func (i infraProviderTemplateStore) Delete(ctx context.Context, id int64) error {
stmt := database.Builder.
Delete(infraProviderTemplateTable).
Where(infraProviderTemplateIDColumn+" = $1", id)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, i.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to delete infraprovider template")
}
return nil
}
func (i infraProviderTemplateStore) mapToInternalInfraProviderTemplate(
template *types.InfraProviderTemplate) infraProviderTemplate {
return infraProviderTemplate{
Identifier: template.Identifier,
InfraProviderConfigID: template.InfraProviderConfigID,
Description: template.Description,
Data: template.Data,
Version: template.Version,
SpaceID: template.SpaceID,
Created: template.Created,
Updated: template.Updated,
}
}
func (entity infraProviderTemplate) mapToDTO() *types.InfraProviderTemplate {
return &types.InfraProviderTemplate{
ID: entity.ID,
Identifier: entity.Identifier,
InfraProviderConfigID: entity.InfraProviderConfigID,
Description: entity.Description,
Data: entity.Data,
Version: entity.Version,
SpaceID: entity.SpaceID,
Created: entity.Created,
Updated: entity.Updated,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/execution.go | app/store/database/execution.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/harness/gitness/app/store"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
sqlxtypes "github.com/jmoiron/sqlx/types"
"github.com/pkg/errors"
)
var _ store.ExecutionStore = (*executionStore)(nil)
// NewExecutionStore returns a new ExecutionStore.
func NewExecutionStore(db *sqlx.DB) store.ExecutionStore {
return &executionStore{
db: db,
}
}
type executionStore struct {
db *sqlx.DB
}
// execution represents an execution object stored in the database.
type execution struct {
ID int64 `db:"execution_id"`
PipelineID int64 `db:"execution_pipeline_id"`
CreatedBy int64 `db:"execution_created_by"`
RepoID int64 `db:"execution_repo_id"`
Trigger string `db:"execution_trigger"`
Number int64 `db:"execution_number"`
Parent int64 `db:"execution_parent"`
Status enum.CIStatus `db:"execution_status"`
Error string `db:"execution_error"`
Event enum.TriggerEvent `db:"execution_event"`
Action enum.TriggerAction `db:"execution_action"`
Link string `db:"execution_link"`
Timestamp int64 `db:"execution_timestamp"`
Title string `db:"execution_title"`
Message string `db:"execution_message"`
Before string `db:"execution_before"`
After string `db:"execution_after"`
Ref string `db:"execution_ref"`
Fork string `db:"execution_source_repo"`
Source string `db:"execution_source"`
Target string `db:"execution_target"`
Author string `db:"execution_author"`
AuthorName string `db:"execution_author_name"`
AuthorEmail string `db:"execution_author_email"`
AuthorAvatar string `db:"execution_author_avatar"`
Sender string `db:"execution_sender"`
Params sqlxtypes.JSONText `db:"execution_params"`
Cron string `db:"execution_cron"`
Deploy string `db:"execution_deploy"`
DeployID int64 `db:"execution_deploy_id"`
Debug bool `db:"execution_debug"`
Started int64 `db:"execution_started"`
Finished int64 `db:"execution_finished"`
Created int64 `db:"execution_created"`
Updated int64 `db:"execution_updated"`
Version int64 `db:"execution_version"`
}
type executionPipelineRepoJoin struct {
execution
PipelineUID sql.NullString `db:"pipeline_uid"`
RepoUID sql.NullString `db:"repo_uid"`
}
const (
executionColumns = `
execution_id
,execution_pipeline_id
,execution_created_by
,execution_repo_id
,execution_trigger
,execution_number
,execution_parent
,execution_status
,execution_error
,execution_event
,execution_action
,execution_link
,execution_timestamp
,execution_title
,execution_message
,execution_before
,execution_after
,execution_ref
,execution_source_repo
,execution_source
,execution_target
,execution_author
,execution_author_name
,execution_author_email
,execution_author_avatar
,execution_sender
,execution_params
,execution_cron
,execution_deploy
,execution_deploy_id
,execution_debug
,execution_started
,execution_finished
,execution_created
,execution_updated
,execution_version
`
executionInfoColumns = `
execution_number
,execution_pipeline_id
,execution_status
,execution_created_by
,execution_trigger
,execution_event
,execution_started
,execution_finished
`
)
// Find returns an execution given an execution ID.
func (s *executionStore) Find(ctx context.Context, id int64) (*types.Execution, error) {
//nolint:goconst
const findQueryStmt = `
SELECT` + executionColumns + `
FROM executions
WHERE execution_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(execution)
if err := db.GetContext(ctx, dst, findQueryStmt, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find execution")
}
return mapInternalToExecution(dst)
}
// FindByNumber returns an execution given a pipeline ID and an execution number.
func (s *executionStore) FindByNumber(
ctx context.Context,
pipelineID int64,
executionNum int64,
) (*types.Execution, error) {
const findQueryStmt = `
SELECT` + executionColumns + `
FROM executions
WHERE execution_pipeline_id = $1 AND execution_number = $2`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(execution)
if err := db.GetContext(ctx, dst, findQueryStmt, pipelineID, executionNum); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find execution")
}
return mapInternalToExecution(dst)
}
// Create creates a new execution in the datastore.
func (s *executionStore) Create(ctx context.Context, execution *types.Execution) error {
const executionInsertStmt = `
INSERT INTO executions (
execution_pipeline_id
,execution_repo_id
,execution_created_by
,execution_trigger
,execution_number
,execution_parent
,execution_status
,execution_error
,execution_event
,execution_action
,execution_link
,execution_timestamp
,execution_title
,execution_message
,execution_before
,execution_after
,execution_ref
,execution_source_repo
,execution_source
,execution_target
,execution_author
,execution_author_name
,execution_author_email
,execution_author_avatar
,execution_sender
,execution_params
,execution_cron
,execution_deploy
,execution_deploy_id
,execution_debug
,execution_started
,execution_finished
,execution_created
,execution_updated
,execution_version
) VALUES (
:execution_pipeline_id
,:execution_repo_id
,:execution_created_by
,:execution_trigger
,:execution_number
,:execution_parent
,:execution_status
,:execution_error
,:execution_event
,:execution_action
,:execution_link
,:execution_timestamp
,:execution_title
,:execution_message
,:execution_before
,:execution_after
,:execution_ref
,:execution_source_repo
,:execution_source
,:execution_target
,:execution_author
,:execution_author_name
,:execution_author_email
,:execution_author_avatar
,:execution_sender
,:execution_params
,:execution_cron
,:execution_deploy
,:execution_deploy_id
,:execution_debug
,:execution_started
,:execution_finished
,:execution_created
,:execution_updated
,:execution_version
) RETURNING execution_id`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(executionInsertStmt, mapExecutionToInternal(execution))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind execution object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&execution.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Execution query failed")
}
return nil
}
// Update tries to update an execution in the datastore with optimistic locking.
func (s *executionStore) Update(ctx context.Context, e *types.Execution) error {
const executionUpdateStmt = `
UPDATE executions
SET
execution_status = :execution_status
,execution_error = :execution_error
,execution_event = :execution_event
,execution_started = :execution_started
,execution_finished = :execution_finished
,execution_updated = :execution_updated
,execution_version = :execution_version
WHERE execution_id = :execution_id AND execution_version = :execution_version - 1`
updatedAt := time.Now()
stages := e.Stages
execution := mapExecutionToInternal(e)
execution.Version++
execution.Updated = updatedAt.UnixMilli()
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(executionUpdateStmt, execution)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind execution object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update execution")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
m, err := mapInternalToExecution(execution)
if err != nil {
return fmt.Errorf("could not map execution object: %w", err)
}
*e = *m
e.Version = execution.Version
e.Updated = execution.Updated
e.Stages = stages // stages are not mapped in database.
return nil
}
// List lists the executions for a given pipeline ID.
// It orders them in descending order of execution number.
func (s *executionStore) List(
ctx context.Context,
pipelineID int64,
pagination types.Pagination,
) ([]*types.Execution, error) {
stmt := database.Builder.
Select(executionColumns).
From("executions").
Where("execution_pipeline_id = ?", fmt.Sprint(pipelineID)).
OrderBy("execution_number " + enum.OrderDesc.String())
stmt = stmt.Limit(database.Limit(pagination.Size))
stmt = stmt.Offset(database.Offset(pagination.Page, pagination.Size))
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*execution{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return mapInternalToExecutionList(dst)
}
// ListInSpace lists the executions in a given space.
// It orders them in descending order of execution id.
func (s *executionStore) ListInSpace(
ctx context.Context,
spaceID int64,
filter types.ListExecutionsFilter,
) ([]*types.Execution, error) {
const executionWithPipelineRepoColumn = executionColumns + `
,pipeline_uid
,repo_uid`
stmt := database.Builder.
Select(executionWithPipelineRepoColumn).
From("executions").
InnerJoin("pipelines ON execution_pipeline_id = pipeline_id").
InnerJoin("repositories ON execution_repo_id = repo_id").
Where("repo_parent_id = ?", spaceID).
OrderBy("execution_" + string(filter.Sort) + " " + filter.Order.String())
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("pipeline_uid", filter.Query))
}
if filter.PipelineIdentifier != "" {
stmt = stmt.Where("pipeline_uid = ?", filter.PipelineIdentifier)
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*executionPipelineRepoJoin{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return convertExecutionPipelineRepoJoins(dst)
}
func (s executionStore) ListByPipelineIDs(
ctx context.Context,
pipelineIDs []int64,
maxRows int64,
) (map[int64][]*types.ExecutionInfo, error) {
stmt := database.Builder.
Select(executionInfoColumns).
FromSelect(
database.Builder.
Select(executionInfoColumns+`,
ROW_NUMBER() OVER (
PARTITION BY execution_pipeline_id
ORDER BY execution_number DESC
) AS row_num
`).
From("executions").
Where(squirrel.Eq{"execution_pipeline_id": pipelineIDs}),
"ranked",
).
Where("row_num <= ?", maxRows)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var dst []*types.ExecutionInfo
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to list executions by pipeline IDs")
}
executionInfosMap := make(map[int64][]*types.ExecutionInfo)
for _, info := range dst {
executionInfosMap[info.PipelineID] = append(
executionInfosMap[info.PipelineID],
info,
)
}
return executionInfosMap, nil
}
// Count of executions in a pipeline, if pipelineID is 0 then return total number of executions.
func (s *executionStore) Count(ctx context.Context, pipelineID int64) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("executions")
if pipelineID > 0 {
stmt = stmt.Where("execution_pipeline_id = ?", pipelineID)
}
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
// CountInSpace counts the number of executions in a given space.
func (s *executionStore) CountInSpace(
ctx context.Context,
spaceID int64,
filter types.ListExecutionsFilter,
) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("executions").
InnerJoin("pipelines ON execution_pipeline_id = pipeline_id").
InnerJoin("repositories ON execution_repo_id = repo_id").
Where("repo_parent_id = ?", spaceID)
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("pipeline_uid", filter.Query))
}
if filter.PipelineIdentifier != "" {
stmt = stmt.Where("pipeline_uid = ?", filter.PipelineIdentifier)
}
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
// Delete deletes an execution given a pipeline ID and an execution number.
func (s *executionStore) Delete(ctx context.Context, pipelineID int64, executionNum int64) error {
const executionDeleteStmt = `
DELETE FROM executions
WHERE execution_pipeline_id = $1 AND execution_number = $2`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, executionDeleteStmt, pipelineID, executionNum); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Could not delete execution")
}
return nil
}
func convertExecutionPipelineRepoJoins(rows []*executionPipelineRepoJoin) ([]*types.Execution, error) {
executions := make([]*types.Execution, len(rows))
for i, k := range rows {
e, err := convertExecutionPipelineRepoJoin(k)
if err != nil {
return nil, err
}
executions[i] = e
}
return executions, nil
}
func convertExecutionPipelineRepoJoin(join *executionPipelineRepoJoin) (*types.Execution, error) {
e, err := mapInternalToExecution(&join.execution)
if err != nil {
return nil, err
}
e.RepoUID = join.RepoUID.String
e.PipelineUID = join.PipelineUID.String
return e, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/check.go | app/store/database/check.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"encoding/json"
"fmt"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/git/sha"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
)
var _ store.CheckStore = (*CheckStore)(nil)
// NewCheckStore returns a new CheckStore.
func NewCheckStore(
db *sqlx.DB,
pCache store.PrincipalInfoCache,
) *CheckStore {
return &CheckStore{
db: db,
pCache: pCache,
}
}
// CheckStore implements store.CheckStore backed by a relational database.
type CheckStore struct {
db *sqlx.DB
pCache store.PrincipalInfoCache
}
const (
checkColumns = `
check_id
,check_created_by
,check_created
,check_updated
,check_repo_id
,check_commit_sha
,check_uid
,check_status
,check_summary
,check_link
,check_payload
,check_metadata
,check_payload_kind
,check_payload_version
,check_started
,check_ended`
//nolint:goconst
checkSelectBase = `
SELECT` + checkColumns + `
FROM checks`
)
type check struct {
ID int64 `db:"check_id"`
CreatedBy int64 `db:"check_created_by"`
Created int64 `db:"check_created"`
Updated int64 `db:"check_updated"`
RepoID int64 `db:"check_repo_id"`
CommitSHA string `db:"check_commit_sha"`
Identifier string `db:"check_uid"`
Status enum.CheckStatus `db:"check_status"`
Summary string `db:"check_summary"`
Link string `db:"check_link"`
Payload json.RawMessage `db:"check_payload"`
Metadata json.RawMessage `db:"check_metadata"`
PayloadKind enum.CheckPayloadKind `db:"check_payload_kind"`
PayloadVersion string `db:"check_payload_version"`
Started int64 `db:"check_started"`
Ended int64 `db:"check_ended"`
}
// FindByIdentifier returns status check result for given unique key.
func (s *CheckStore) FindByIdentifier(
ctx context.Context,
repoID int64,
commitSHA string,
identifier string,
) (types.Check, error) {
const sqlQuery = checkSelectBase + `
WHERE check_repo_id = $1 AND check_uid = $2 AND check_commit_sha = $3`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(check)
if err := db.GetContext(ctx, dst, sqlQuery, repoID, identifier, commitSHA); err != nil {
return types.Check{}, database.ProcessSQLErrorf(ctx, err, "Failed to find check")
}
return mapCheck(dst), nil
}
// Upsert creates new or updates an existing status check result.
func (s *CheckStore) Upsert(ctx context.Context, check *types.Check) error {
const sqlQuery = `
INSERT INTO checks (
check_created_by
,check_created
,check_updated
,check_repo_id
,check_commit_sha
,check_uid
,check_status
,check_summary
,check_link
,check_payload
,check_metadata
,check_payload_kind
,check_payload_version
,check_started
,check_ended
) VALUES (
:check_created_by
,:check_created
,:check_updated
,:check_repo_id
,:check_commit_sha
,:check_uid
,:check_status
,:check_summary
,:check_link
,:check_payload
,:check_metadata
,:check_payload_kind
,:check_payload_version
,:check_started
,:check_ended
)
ON CONFLICT (check_repo_id, check_commit_sha, check_uid) DO
UPDATE SET
check_updated = :check_updated
,check_status = :check_status
,check_summary = :check_summary
,check_link = :check_link
,check_payload = :check_payload
,check_metadata = :check_metadata
,check_payload_kind = :check_payload_kind
,check_payload_version = :check_payload_version
,check_started = :check_started
,check_ended = :check_ended
RETURNING check_id, check_created_by, check_created`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, mapInternalCheck(check))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind status check object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&check.ID, &check.CreatedBy, &check.Created); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Upsert query failed")
}
return nil
}
// Count counts status check results for a specific commit in a repo.
func (s *CheckStore) Count(ctx context.Context,
repoID int64,
commitSHA string,
opts types.CheckListOptions,
) (int, error) {
stmt := database.Builder.
Select("count(*)").
From("checks").
Where("check_repo_id = ?", repoID).
Where("check_commit_sha = ?", commitSHA)
stmt = s.applyOpts(stmt, opts.Query)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
var count int
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed to execute count status checks query")
}
return count, nil
}
// List returns a list of status check results for a specific commit in a repo.
func (s *CheckStore) List(ctx context.Context,
repoID int64,
commitSHA string,
opts types.CheckListOptions,
) ([]types.Check, error) {
stmt := database.Builder.
Select(checkColumns).
From("checks").
Where("check_repo_id = ?", repoID).
Where("check_commit_sha = ?", commitSHA)
stmt = s.applyOpts(stmt, opts.Query)
stmt = stmt.
Limit(database.Limit(opts.Size)).
Offset(database.Offset(opts.Page, opts.Size)).
OrderBy("check_updated desc")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
dst := make([]*check, 0)
db := dbtx.GetAccessor(ctx, s.db)
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to execute list status checks query")
}
result, err := s.mapSliceCheck(ctx, dst)
if err != nil {
return nil, err
}
return result, nil
}
// ListRecent returns a list of recently executed status checks in a repository.
func (s *CheckStore) ListRecent(
ctx context.Context,
repoID int64,
opts types.CheckRecentOptions,
) ([]string, error) {
stmt := database.Builder.
Select("distinct check_uid").
From("checks").
Where("check_created > ?", opts.Since).
Where("check_repo_id = ?", repoID)
return s.listRecent(ctx, stmt, opts)
}
// ListRecentSpace returns a list of recently executed status checks in
// repositories in spaces with specified space IDs.
func (s *CheckStore) ListRecentSpace(
ctx context.Context,
spaceIDs []int64,
opts types.CheckRecentOptions,
) ([]string, error) {
stmt := database.Builder.
Select("distinct check_uid").
From("checks").
Join("repositories ON checks.check_repo_id = repositories.repo_id").
Where("check_created > ?", opts.Since).
Where(squirrel.Eq{"repositories.repo_parent_id": spaceIDs})
return s.listRecent(ctx, stmt, opts)
}
func (s *CheckStore) listRecent(
ctx context.Context,
stmt squirrel.SelectBuilder,
opts types.CheckRecentOptions,
) ([]string, error) {
stmt = s.applyOpts(stmt, opts.Query)
stmt = stmt.OrderBy("check_uid")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert list recent status checks query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
dst := make([]string, 0)
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to execute list recent status checks query")
}
return dst, nil
}
// ListResults returns a list of status check results for a specific commit in a repo.
func (s *CheckStore) ListResults(ctx context.Context,
repoID int64,
commitSHA string,
) ([]types.CheckResult, error) {
const checkColumns = "check_uid, check_status"
stmt := database.Builder.
Select(checkColumns).
From("checks").
Where("check_repo_id = ?", repoID).
Where("check_commit_sha = ?", commitSHA)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
result := make([]types.CheckResult, 0)
db := dbtx.GetAccessor(ctx, s.db)
if err = db.SelectContext(ctx, &result, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to execute list status checks results query")
}
return result, nil
}
// ResultSummary returns a list of status check result summaries for the provided list of commits in a repo.
func (s *CheckStore) ResultSummary(ctx context.Context,
repoID int64,
commitSHAs []string,
) (map[sha.SHA]types.CheckCountSummary, error) {
const selectColumns = `
check_commit_sha,
COUNT(*) FILTER (WHERE check_status = 'pending') as "count_pending",
COUNT(*) FILTER (WHERE check_status = 'running') as "count_running",
COUNT(*) FILTER (WHERE check_status = 'success') as "count_success",
COUNT(*) FILTER (WHERE check_status = 'failure') as "count_failure",
COUNT(*) FILTER (WHERE check_status = 'error') as "count_error"`
stmt := database.Builder.
Select(selectColumns).
From("checks").
Where("check_repo_id = ?", repoID).
Where(squirrel.Eq{"check_commit_sha": commitSHAs}).
GroupBy("check_commit_sha")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
rows, err := db.QueryxContext(ctx, sql, args...)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to execute status check summary query")
}
defer func() {
_ = rows.Close()
}()
result := make(map[sha.SHA]types.CheckCountSummary)
for rows.Next() {
var commitSHAStr string
var countPending int
var countRunning int
var countSuccess int
var countFailure int
var countError int
err := rows.Scan(&commitSHAStr, &countPending, &countRunning, &countSuccess, &countFailure, &countError)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to scan values of status check summary query")
}
commitSHA, err := sha.New(commitSHAStr)
if err != nil {
return nil, fmt.Errorf("invalid commit SHA read from DB: %s", commitSHAStr)
}
result[commitSHA] = types.CheckCountSummary{
Pending: countPending,
Running: countRunning,
Success: countSuccess,
Failure: countFailure,
Error: countError,
}
}
if err := rows.Err(); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to read status chek summary")
}
return result, nil
}
func (*CheckStore) applyOpts(stmt squirrel.SelectBuilder, query string) squirrel.SelectBuilder {
if query != "" {
stmt = stmt.Where(PartialMatch("check_uid", query))
}
return stmt
}
func mapInternalCheck(c *types.Check) *check {
m := &check{
ID: c.ID,
CreatedBy: c.CreatedBy,
Created: c.Created,
Updated: c.Updated,
RepoID: c.RepoID,
CommitSHA: c.CommitSHA,
Identifier: c.Identifier,
Status: c.Status,
Summary: c.Summary,
Link: c.Link,
Payload: c.Payload.Data,
Metadata: c.Metadata,
PayloadKind: c.Payload.Kind,
PayloadVersion: c.Payload.Version,
Started: c.Started,
Ended: c.Ended,
}
return m
}
func mapCheck(c *check) types.Check {
return types.Check{
ID: c.ID,
CreatedBy: c.CreatedBy,
Created: c.Created,
Updated: c.Updated,
RepoID: c.RepoID,
CommitSHA: c.CommitSHA,
Identifier: c.Identifier,
Status: c.Status,
Summary: c.Summary,
Link: c.Link,
Metadata: c.Metadata,
Payload: types.CheckPayload{
Version: c.PayloadVersion,
Kind: c.PayloadKind,
Data: c.Payload,
},
ReportedBy: nil,
Started: c.Started,
Ended: c.Ended,
}
}
func (s *CheckStore) mapSliceCheck(ctx context.Context, checks []*check) ([]types.Check, error) {
// collect all principal IDs
ids := make([]int64, len(checks))
for i, req := range checks {
ids[i] = req.CreatedBy
}
// pull principal infos from cache
infoMap, err := s.pCache.Map(ctx, ids)
if err != nil {
return nil, fmt.Errorf("failed to load status check principal reporters: %w", err)
}
// attach the principal infos back to the slice items
m := make([]types.Check, len(checks))
for i, c := range checks {
m[i] = mapCheck(c)
if reportedBy, ok := infoMap[c.CreatedBy]; ok {
m[i].ReportedBy = reportedBy
}
}
return m, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/database.go | app/store/database/database.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"strings"
)
const (
PostgresDriverName = "postgres"
SqliteDriverName = "sqlite3"
)
// PartialMatch builds a string pair that can be passed as a parameter to squirrel's Where() function
// for a SQL "LIKE" expression. Besides surrounding the input value with '%' wildcard characters for a partial match,
// this function also escapes the '_' and '%' metacharacters supported in SQL "LIKE" expressions.
// The "ESCAPE" clause isn't needed for Postgres, but is necessary for SQLite.
// It will be used only if '_' and '%' are present in the value string.
//
// See:
// https://www.postgresql.org/docs/current/functions-matching.html#FUNCTIONS-LIKE
// https://www.sqlite.org/lang_expr.html#the_like_glob_regexp_match_and_extract_operators
func PartialMatch(column, value string) (string, string) {
var (
n int
escaped bool
)
if n, value = len(value), strings.ReplaceAll(value, `\`, `\\`); n < len(value) {
escaped = true
}
if n, value = len(value), strings.ReplaceAll(value, "_", `\_`); n < len(value) {
escaped = true
}
if n, value = len(value), strings.ReplaceAll(value, "%", `\%`); n < len(value) {
escaped = true
}
sb := strings.Builder{}
sb.WriteString("LOWER(")
sb.WriteString(column)
sb.WriteString(") LIKE '%' || LOWER(?) || '%'")
if escaped {
sb.WriteString(` ESCAPE '\'`)
}
return sb.String(), value
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/gitspace_settings.go | app/store/database/gitspace_settings.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"encoding/json"
"fmt"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
var _ store.GitspaceSettingsStore = (*gitspaceSettingsStore)(nil)
const (
gitspaceSettingsIDColumn = `gsett_id`
gitspaceSettingsColumns = `
gsett_space_id,
gsett_settings_data,
gsett_settings_type,
gsett_criteria_key,
gsett_created,
gsett_updated
`
gitspaceSettingsColumnsWithID = gitspaceSettingsIDColumn + `,
` + gitspaceSettingsColumns
gitspaceSettingsTable = `gitspace_settings`
)
type gitspaceSettingsStore struct {
db *sqlx.DB
}
func (g gitspaceSettingsStore) List(
ctx context.Context,
spaceID int64,
filter *types.GitspaceSettingsFilter,
) ([]*types.GitspaceSettings, error) {
stmt := database.Builder.
Select(gitspaceSettingsColumnsWithID).
From(gitspaceSettingsTable).
Where("gsett_space_id = $1", spaceID).
OrderBy("gsett_updated DESC")
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert squirrel builder to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, g.db)
var gitspaceSettingsEntity []*gitspaceSettings
if err = db.SelectContext(ctx, &gitspaceSettingsEntity, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(
ctx, err, "Failed to find gitspace settings for space: %d", spaceID)
}
return g.mapToGitspaceSettings(gitspaceSettingsEntity)
}
func (g gitspaceSettingsStore) Upsert(ctx context.Context, in *types.GitspaceSettings) error {
dbgitspaceSettings, err := g.mapToInternalGitspaceSettings(in)
if err != nil {
return err
}
stmt := database.Builder.
Insert(gitspaceSettingsTable).
Columns(gitspaceSettingsColumns).
Values(
dbgitspaceSettings.SpaceID,
dbgitspaceSettings.SettingsData,
dbgitspaceSettings.SettingsType,
dbgitspaceSettings.CriteriaKey,
dbgitspaceSettings.Created,
dbgitspaceSettings.Updated).
Suffix(`
ON CONFLICT (gsett_space_id, gsett_settings_type, gsett_criteria_key)
DO UPDATE
SET
gsett_settings_data = EXCLUDED.gsett_settings_data,
gsett_updated = EXCLUDED.gsett_updated`)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, g.db)
if _, err = db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "gitspace settings upsert create query failed for %v %v", in.Settings, in.SpaceID)
}
return nil
}
func (g gitspaceSettingsStore) FindByType(
ctx context.Context,
spaceID int64,
settingsType enum.GitspaceSettingsType,
criteria *types.GitspaceSettingsCriteria,
) (*types.GitspaceSettings, error) {
criteriaKey, err := criteria.ToKey()
if err != nil {
return nil, fmt.Errorf("failed to convert criteria to key: %w", err)
}
stmt := database.Builder.
Select(gitspaceSettingsColumnsWithID).
From(gitspaceSettingsTable).
Where("gsett_settings_type = $1", settingsType).
Where("gsett_space_id = $2", spaceID).
Where("gsett_criteria_key = $3", criteriaKey).
OrderBy("gsett_updated DESC")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert squirrel builder to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, g.db)
gitspaceSettingsEntity := new(gitspaceSettings)
if err = db.GetContext(ctx, gitspaceSettingsEntity, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(
ctx, err, "Failed to find gitspace settings for type:%v space: %d", settingsType, spaceID)
}
return g.mapGitspaceSettings(gitspaceSettingsEntity)
}
func NewGitspaceSettingsStore(db *sqlx.DB) store.GitspaceSettingsStore {
return &gitspaceSettingsStore{
db: db,
}
}
type gitspaceSettings struct {
ID int64 `db:"gsett_id"`
SpaceID int64 `db:"gsett_space_id"`
SettingsData []byte `db:"gsett_settings_data"`
SettingsType enum.GitspaceSettingsType `db:"gsett_settings_type"`
CriteriaKey types.CriteriaKey `db:"gsett_criteria_key"`
Created int64 `db:"gsett_created"`
Updated int64 `db:"gsett_updated"`
}
func (g gitspaceSettingsStore) mapGitspaceSettings(in *gitspaceSettings) (*types.GitspaceSettings, error) {
var settingsData types.SettingsData
if len(in.SettingsData) > 0 {
marshalErr := json.Unmarshal(in.SettingsData, &settingsData)
if marshalErr != nil {
return nil, marshalErr
}
}
return &types.GitspaceSettings{
ID: in.ID,
SpaceID: in.SpaceID,
Settings: settingsData,
SettingsType: in.SettingsType,
CriteriaKey: in.CriteriaKey,
Created: in.Created,
Updated: in.Updated,
}, nil
}
func (g gitspaceSettingsStore) mapToGitspaceSettings(
in []*gitspaceSettings,
) ([]*types.GitspaceSettings, error) {
var err error
res := make([]*types.GitspaceSettings, len(in))
for index := range in {
res[index], err = g.mapGitspaceSettings(in[index])
if err != nil {
return nil, err
}
}
return res, nil
}
func (g gitspaceSettingsStore) mapToInternalGitspaceSettings(
in *types.GitspaceSettings,
) (*gitspaceSettings, error) {
var settingsBytes []byte
var marshalErr error
settingsBytes, marshalErr = json.Marshal(in.Settings)
if marshalErr != nil {
return nil, marshalErr
}
gitspaceSettingsEntity := &gitspaceSettings{
ID: in.ID,
SpaceID: in.SpaceID,
SettingsData: settingsBytes,
SettingsType: in.SettingsType,
CriteriaKey: in.CriteriaKey,
Created: in.Created,
Updated: in.Updated,
}
return gitspaceSettingsEntity, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/settings.go | app/store/database/settings.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
)
var _ store.SettingsStore = (*SettingsStore)(nil)
// NewSettingsStore returns a new SettingsStore.
func NewSettingsStore(db *sqlx.DB) *SettingsStore {
return &SettingsStore{
db: db,
}
}
// SettingsStore implements store.SettingsStore backed by a relational database.
type SettingsStore struct {
db *sqlx.DB
}
// setting is an internal representation used to store setting data in the database.
type setting struct {
ID int64 `db:"setting_id"`
SpaceID null.Int `db:"setting_space_id"`
RepoID null.Int `db:"setting_repo_id"`
Key string `db:"setting_key"`
Value json.RawMessage `db:"setting_value"`
}
const (
settingsColumns = `
setting_id
,setting_space_id
,setting_repo_id
,setting_key
,setting_value`
)
func (s *SettingsStore) Find(
ctx context.Context,
scope enum.SettingsScope,
scopeID int64,
key string,
) (json.RawMessage, error) {
stmt := database.Builder.
Select(settingsColumns).
From("settings").
Where("LOWER(setting_key) = ?", strings.ToLower(key))
switch scope {
case enum.SettingsScopeSpace:
stmt = stmt.Where("setting_space_id = ?", scopeID)
case enum.SettingsScopeRepo:
stmt = stmt.Where("setting_repo_id = ?", scopeID)
case enum.SettingsScopeSystem:
stmt = stmt.Where("setting_repo_id IS NULL AND setting_space_id IS NULL")
default:
return nil, fmt.Errorf("setting scope %q is not supported", scope)
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
dst := &setting{}
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select query failed")
}
return dst.Value, nil
}
func (s *SettingsStore) FindMany(
ctx context.Context,
scope enum.SettingsScope,
scopeID int64,
keys ...string,
) (map[string]json.RawMessage, error) {
if len(keys) == 0 {
return map[string]json.RawMessage{}, nil
}
keysLower := make([]string, len(keys))
for i, k := range keys {
keysLower[i] = strings.ToLower(k)
}
stmt := database.Builder.
Select(settingsColumns).
From("settings").
Where(squirrel.Eq{"LOWER(setting_key)": keysLower})
switch scope {
case enum.SettingsScopeSpace:
stmt = stmt.Where("setting_space_id = ?", scopeID)
case enum.SettingsScopeRepo:
stmt = stmt.Where("setting_repo_id = ?", scopeID)
case enum.SettingsScopeSystem:
stmt = stmt.Where("setting_repo_id IS NULL AND setting_space_id IS NULL")
default:
return nil, fmt.Errorf("setting scope %q is not supported", scope)
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*setting{}
if err := db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select query failed")
}
out := map[string]json.RawMessage{}
for _, d := range dst {
out[d.Key] = d.Value
}
return out, nil
}
func (s *SettingsStore) Upsert(ctx context.Context,
scope enum.SettingsScope,
scopeID int64,
key string,
value json.RawMessage,
) error {
stmt := database.Builder.
Insert("").
Into("settings").
Columns(
"setting_space_id",
"setting_repo_id",
"setting_key",
"setting_value",
)
switch scope {
case enum.SettingsScopeSpace:
stmt = stmt.Values(null.IntFrom(scopeID), null.Int{}, key, value)
stmt = stmt.Suffix(`ON CONFLICT (setting_space_id, LOWER(setting_key)) WHERE setting_space_id IS NOT NULL DO`)
case enum.SettingsScopeRepo:
stmt = stmt.Values(null.Int{}, null.IntFrom(scopeID), key, value)
stmt = stmt.Suffix(`ON CONFLICT (setting_repo_id, LOWER(setting_key)) WHERE setting_repo_id IS NOT NULL DO`)
case enum.SettingsScopeSystem:
stmt = stmt.Values(null.Int{}, null.Int{}, key, value)
stmt = stmt.Suffix(`ON CONFLICT (LOWER(setting_key))
WHERE setting_repo_id IS NULL AND setting_space_id IS NULL DO`)
default:
return fmt.Errorf("setting scope %q is not supported", scope)
}
stmt = stmt.Suffix(`
UPDATE SET
setting_value = EXCLUDED.setting_value
WHERE
`)
if strings.HasPrefix(s.db.DriverName(), "sqlite") {
stmt = stmt.Suffix(`settings.setting_value <> EXCLUDED.setting_value`)
} else {
stmt = stmt.Suffix(`settings.setting_value::text <> EXCLUDED.setting_value::text`)
}
sql, args, err := stmt.ToSql()
if err != nil {
return fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Upsert query failed")
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/publickey_subkey.go | app/store/database/publickey_subkey.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/jmoiron/sqlx"
)
var _ store.PublicKeySubKeyStore = PublicKeySubKeyStore{}
// NewPublicKeySubKeyStore returns a new PublicKeySubKeyStore.
func NewPublicKeySubKeyStore(db *sqlx.DB) PublicKeySubKeyStore {
return PublicKeySubKeyStore{
db: db,
}
}
// PublicKeySubKeyStore implements a store.PublicKeySubKeyStore backed by a relational database.
type PublicKeySubKeyStore struct {
db *sqlx.DB
}
// Create creates subkeys for the provided public key.
func (s PublicKeySubKeyStore) Create(ctx context.Context, publicKeyID int64, pgpKeyIDs []string) error {
if len(pgpKeyIDs) == 0 {
return nil
}
const sqlQuery = `
INSERT INTO public_key_sub_keys(public_key_sub_key_public_key_id, public_key_sub_key_id)
VALUES ($1, $2)`
db := dbtx.GetAccessor(ctx, s.db)
stmt, err := db.PrepareContext(ctx, sqlQuery)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to prepare insert public key subkey statement")
}
defer stmt.Close()
for _, pgpKeyID := range pgpKeyIDs {
_, err = stmt.ExecContext(ctx, publicKeyID, pgpKeyID)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert public key subkey query failed")
}
}
return nil
}
// List return all sub keys from a public key.
func (s PublicKeySubKeyStore) List(ctx context.Context, publicKeyID int64) ([]string, error) {
const sqlQuery = `
SELECT public_key_sub_key_id
FROM public_key_sub_keys
WHERE public_key_sub_key_public_key_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
rows, err := db.QueryContext(ctx, sqlQuery, publicKeyID)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to query for public key subkeys")
}
defer rows.Close()
var result []string
for rows.Next() {
var subKeyID string
if err := rows.Scan(&subKeyID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to scan subkey ID")
}
result = append(result, subKeyID)
}
err = rows.Err()
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to list subkeys")
}
return result, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/code_comment.go | app/store/database/code_comment.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
var _ store.CodeCommentView = (*CodeCommentView)(nil)
// NewCodeCommentView returns a new CodeCommentView.
func NewCodeCommentView(db *sqlx.DB) *CodeCommentView {
return &CodeCommentView{
db: db,
}
}
// CodeCommentView implements store.CodeCommentView backed by a relational database.
type CodeCommentView struct {
db *sqlx.DB
}
// ListNotAtSourceSHA lists all code comments not already at the provided source SHA.
func (s *CodeCommentView) ListNotAtSourceSHA(ctx context.Context,
prID int64, sourceSHA string,
) ([]*types.CodeComment, error) {
return s.list(ctx, prID, "", sourceSHA)
}
// ListNotAtMergeBaseSHA lists all code comments not already at the provided merge base SHA.
func (s *CodeCommentView) ListNotAtMergeBaseSHA(ctx context.Context,
prID int64, mergeBaseSHA string,
) ([]*types.CodeComment, error) {
return s.list(ctx, prID, mergeBaseSHA, "")
}
// list is used by internal service that updates line numbers of code comments after
// branch updates and requires either mergeBaseSHA or sourceSHA but not both.
// Resulting list is ordered by the file name and the relevant line number.
func (s *CodeCommentView) list(ctx context.Context,
prID int64, mergeBaseSHA, sourceSHA string,
) ([]*types.CodeComment, error) {
const codeCommentColumns = `
pullreq_activity_id
,pullreq_activity_version
,pullreq_activity_updated
,coalesce(pullreq_activity_outdated, false) as "pullreq_activity_outdated"
,coalesce(pullreq_activity_code_comment_merge_base_sha, '') as "pullreq_activity_code_comment_merge_base_sha"
,coalesce(pullreq_activity_code_comment_source_sha, '') as "pullreq_activity_code_comment_source_sha"
,coalesce(pullreq_activity_code_comment_path, '') as "pullreq_activity_code_comment_path"
,coalesce(pullreq_activity_code_comment_line_new, 1) as "pullreq_activity_code_comment_line_new"
,coalesce(pullreq_activity_code_comment_span_new, 0) as "pullreq_activity_code_comment_span_new"
,coalesce(pullreq_activity_code_comment_line_old, 1) as "pullreq_activity_code_comment_line_old"
,coalesce(pullreq_activity_code_comment_span_old, 0) as "pullreq_activity_code_comment_span_old"`
stmt := database.Builder.
Select(codeCommentColumns).
From("pullreq_activities").
Where("pullreq_activity_pullreq_id = ?", prID).
Where("not pullreq_activity_outdated").
Where("pullreq_activity_type = ?", enum.PullReqActivityTypeCodeComment).
Where("pullreq_activity_kind = ?", enum.PullReqActivityKindChangeComment).
Where("pullreq_activity_deleted is null and pullreq_activity_parent_id is null")
if mergeBaseSHA != "" {
stmt = stmt.
Where("pullreq_activity_code_comment_merge_base_sha <> ?", mergeBaseSHA)
} else {
stmt = stmt.
Where("pullreq_activity_code_comment_source_sha <> ?", sourceSHA)
}
stmt = stmt.OrderBy("pullreq_activity_code_comment_path asc",
"pullreq_activity_code_comment_line_new asc")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert pull request activity query to sql")
}
result := make([]*types.CodeComment, 0)
db := dbtx.GetAccessor(ctx, s.db)
if err = db.SelectContext(ctx, &result, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing code comment list query")
}
return result, nil
}
// UpdateAll updates all code comments provided in the slice.
func (s *CodeCommentView) UpdateAll(ctx context.Context, codeComments []*types.CodeComment) error {
if len(codeComments) == 0 {
return nil
}
const sqlQuery = `
UPDATE pullreq_activities
SET
pullreq_activity_version = :pullreq_activity_version
,pullreq_activity_updated = :pullreq_activity_updated
,pullreq_activity_outdated = :pullreq_activity_outdated
,pullreq_activity_code_comment_merge_base_sha = :pullreq_activity_code_comment_merge_base_sha
,pullreq_activity_code_comment_source_sha = :pullreq_activity_code_comment_source_sha
,pullreq_activity_code_comment_path = :pullreq_activity_code_comment_path
,pullreq_activity_code_comment_line_new = :pullreq_activity_code_comment_line_new
,pullreq_activity_code_comment_span_new = :pullreq_activity_code_comment_span_new
,pullreq_activity_code_comment_line_old = :pullreq_activity_code_comment_line_old
,pullreq_activity_code_comment_span_old = :pullreq_activity_code_comment_span_old
WHERE pullreq_activity_id = :pullreq_activity_id AND pullreq_activity_version = :pullreq_activity_version - 1`
db := dbtx.GetAccessor(ctx, s.db)
//nolint:sqlclosecheck
stmt, err := db.PrepareNamedContext(ctx, sqlQuery)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to prepare update statement for update code comments")
}
updatedAt := time.Now()
for _, codeComment := range codeComments {
codeComment.Version++
codeComment.Updated = updatedAt.UnixMilli()
result, err := stmt.ExecContext(ctx, codeComment)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update code comment=%d", codeComment.ID)
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(
ctx,
err,
"Failed to get number of updated rows for code comment=%d",
codeComment.ID,
)
}
if count == 0 {
log.Ctx(ctx).Warn().Msgf("Version conflict when trying to update code comment=%d", codeComment.ID)
continue
}
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/git_signature_result.go | app/store/database/git_signature_result.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/git/sha"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
)
var _ store.GitSignatureResultStore = GitSignatureResultStore{}
// NewGitSignatureResultStore returns a new GitSignatureResultStore.
func NewGitSignatureResultStore(db *sqlx.DB) GitSignatureResultStore {
return GitSignatureResultStore{
db: db,
}
}
// GitSignatureResultStore implements a store.GitSignatureResultStore backed by a relational database.
type GitSignatureResultStore struct {
db *sqlx.DB
}
const (
gitSignatureResultColumns = `
git_signature_result_repo_id
,git_signature_result_object_sha
,git_signature_result_object_time
,git_signature_result_created
,git_signature_result_updated
,git_signature_result_result
,git_signature_result_principal_id
,git_signature_result_key_scheme
,git_signature_result_key_id
,git_signature_result_key_fingerprint`
gitSignatureResultInsertQuery = `
INSERT INTO git_signature_results (` + gitSignatureResultColumns + `
) values (
:git_signature_result_repo_id
,:git_signature_result_object_sha
,:git_signature_result_object_time
,:git_signature_result_created
,:git_signature_result_updated
,:git_signature_result_result
,:git_signature_result_principal_id
,:git_signature_result_key_scheme
,:git_signature_result_key_id
,:git_signature_result_key_fingerprint
)`
)
type gitSignatureResult struct {
RepoID int64 `db:"git_signature_result_repo_id"`
ObjectSHA string `db:"git_signature_result_object_sha"`
ObjectTime int64 `db:"git_signature_result_object_time"`
Created int64 `db:"git_signature_result_created"`
Updated int64 `db:"git_signature_result_updated"`
Result string `db:"git_signature_result_result"`
PrincipalID int64 `db:"git_signature_result_principal_id"`
KeyScheme string `db:"git_signature_result_key_scheme"`
KeyID string `db:"git_signature_result_key_id"`
KeyFingerprint string `db:"git_signature_result_key_fingerprint"`
}
func (s GitSignatureResultStore) Map(
ctx context.Context,
repoID int64,
objectSHAs []sha.SHA,
) (map[sha.SHA]types.GitSignatureResult, error) {
stmt := database.Builder.
Select(gitSignatureResultColumns).
From("git_signature_results").
Where("git_signature_result_repo_id = ?", repoID).
Where(squirrel.Eq{"git_signature_result_object_sha": objectSHAs})
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
sigVers := make([]gitSignatureResult, 0)
if err = db.SelectContext(ctx, &sigVers, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err,
"failed to execute list git signature verification results query")
}
sigVerMap := map[sha.SHA]types.GitSignatureResult{}
for _, sigVer := range sigVers {
o := mapToGitSignatureResult(sigVer)
sigVerMap[o.ObjectSHA] = o
}
return sigVerMap, nil
}
func (s GitSignatureResultStore) Create(
ctx context.Context,
sigResult types.GitSignatureResult,
) error {
db := dbtx.GetAccessor(ctx, s.db)
sigResultInternal := mapToInternalGitSignatureResult(sigResult)
query, arg, err := db.BindNamed(gitSignatureResultInsertQuery, &sigResultInternal)
if err != nil {
return database.ProcessSQLErrorf(ctx, err,
"Failed to bind git signature verification result object")
}
if _, err = db.ExecContext(ctx, query, arg...); err != nil {
return database.ProcessSQLErrorf(ctx, err,
"Insert git signature verification result query failed")
}
return nil
}
func (s GitSignatureResultStore) TryCreateAll(
ctx context.Context,
sigResults []*types.GitSignatureResult,
) error {
if len(sigResults) == 0 {
return nil
}
db := dbtx.GetAccessor(ctx, s.db)
const sql = gitSignatureResultInsertQuery + `
ON CONFLICT DO NOTHING`
stmt, err := db.PrepareNamedContext(ctx, sql)
if err != nil {
return database.ProcessSQLErrorf(ctx, err,
"Failed to prepare git signature verification result statement")
}
defer stmt.Close()
for _, sigResult := range sigResults {
_, err = stmt.Exec(mapToInternalGitSignatureResult(*sigResult))
if err != nil {
return database.ProcessSQLErrorf(ctx, err,
"Failed to insert git signature verification result")
}
}
return nil
}
func (s GitSignatureResultStore) UpdateAll(
ctx context.Context,
result enum.GitSignatureResult,
principalID int64,
keyIDs, keyFingerprints []string,
) error {
query := database.Builder.
Update("git_signature_results").
Set("git_signature_result_result", result).
Set("git_signature_result_updated", time.Now().UnixMilli()).
Where("git_signature_result_principal_id = ?", principalID)
if len(keyIDs) > 0 {
query = query.Where(squirrel.Eq{"git_signature_result_key_id": keyIDs})
}
if len(keyFingerprints) > 0 {
query = query.Where(squirrel.Eq{"git_signature_result_key_fingerprint": keyFingerprints})
}
sql, args, err := query.ToSql()
if err != nil {
return fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
_, err = db.ExecContext(ctx, sql, args...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to update git signatures")
}
return nil
}
func mapToInternalGitSignatureResult(sigVer types.GitSignatureResult) gitSignatureResult {
return gitSignatureResult{
RepoID: sigVer.RepoID,
ObjectSHA: sigVer.ObjectSHA.String(),
ObjectTime: sigVer.ObjectTime,
Created: sigVer.Created,
Updated: sigVer.Updated,
Result: string(sigVer.Result),
PrincipalID: sigVer.PrincipalID,
KeyScheme: string(sigVer.KeyScheme),
KeyID: sigVer.KeyID,
KeyFingerprint: sigVer.KeyFingerprint,
}
}
func mapToGitSignatureResult(sigVer gitSignatureResult) types.GitSignatureResult {
objectSHA, _ := sha.New(sigVer.ObjectSHA)
return types.GitSignatureResult{
RepoID: sigVer.RepoID,
ObjectSHA: objectSHA,
ObjectTime: sigVer.ObjectTime,
Created: sigVer.Created,
Updated: sigVer.Updated,
Result: enum.GitSignatureResult(sigVer.Result),
PrincipalID: sigVer.PrincipalID,
KeyScheme: enum.PublicKeyScheme(sigVer.KeyScheme),
KeyID: sigVer.KeyID,
KeyFingerprint: sigVer.KeyFingerprint,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/usage_metrics_test.go | app/store/database/usage_metrics_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database_test
import (
"context"
"testing"
"time"
"github.com/harness/gitness/app/store/database"
"github.com/harness/gitness/types"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
func TestUsageMetricsStore_Upsert(t *testing.T) {
db, teardown := setupDB(t)
defer teardown()
principalStore, spaceStore, spacePathStore, _ := setupStores(t, db)
ctx := context.Background()
createUser(ctx, t, principalStore)
createSpace(ctx, t, spaceStore, spacePathStore, userID, 1, 0)
metricsStore := database.NewUsageMetricsStore(db)
// First write will set bandwidth and storage to 100
err := metricsStore.Upsert(ctx, &types.UsageMetric{
RootSpaceID: 1,
BandwidthOut: 100,
BandwidthIn: 100,
StorageTotal: 100,
LFSStorageTotal: 100,
Pushes: 21,
})
require.NoError(t, err)
// second write will increase bandwidth for 100 and storage remains the same
err = metricsStore.Upsert(ctx, &types.UsageMetric{
RootSpaceID: 1,
BandwidthOut: 100,
BandwidthIn: 100,
Pushes: 3,
})
require.NoError(t, err)
row := db.QueryRowContext(
ctx,
`SELECT
usage_metric_space_id,
usage_metric_date,
usage_metric_bandwidth_out,
usage_metric_bandwidth_in,
usage_metric_storage_total,
usage_metric_lfs_storage_total,
usage_metric_pushes
FROM usage_metrics
WHERE usage_metric_space_id = ?
LIMIT 1`,
1,
)
metric := types.UsageMetric{}
var date int64
err = row.Scan(
&metric.RootSpaceID,
&date,
&metric.BandwidthOut,
&metric.BandwidthIn,
&metric.StorageTotal,
&metric.LFSStorageTotal,
&metric.Pushes,
)
require.NoError(t, err)
require.Equal(t, int64(1), metric.RootSpaceID)
require.Equal(t, metricsStore.Date(time.Now()), date)
require.Equal(t, int64(200), metric.BandwidthOut)
require.Equal(t, int64(200), metric.BandwidthIn)
require.Equal(t, int64(100), metric.StorageTotal)
require.Equal(t, int64(100), metric.LFSStorageTotal)
require.Equal(t, int64(24), metric.Pushes)
}
func TestUsageMetricsStore_UpsertOptimistic(t *testing.T) {
db, teardown := setupDB(t)
defer teardown()
principalStore, spaceStore, spacePathStore, _ := setupStores(t, db)
ctx := context.Background()
createUser(ctx, t, principalStore)
createSpace(ctx, t, spaceStore, spacePathStore, userID, 1, 0)
metricsStore := database.NewUsageMetricsStore(db)
g, _ := errgroup.WithContext(ctx)
for range 100 {
g.Go(func() error {
return metricsStore.UpsertOptimistic(ctx, &types.UsageMetric{
RootSpaceID: 1,
BandwidthOut: 100,
BandwidthIn: 100,
Pushes: 21,
})
})
}
err := g.Wait()
require.NoError(t, err)
now := time.Now().UnixMilli()
metric, err := metricsStore.GetMetrics(ctx, 1, now, now)
require.NoError(t, err)
require.Equal(t, int64(100*100), metric.BandwidthOut)
require.Equal(t, int64(100*100), metric.BandwidthIn)
require.Equal(t, int64(0), metric.StorageTotal)
require.Equal(t, int64(0), metric.LFSStorageTotal)
require.Equal(t, int64(21*100), metric.Pushes)
}
func TestUsageMetricsStore_GetMetrics(t *testing.T) {
db, teardown := setupDB(t)
defer teardown()
principalStore, spaceStore, spacePathStore, _ := setupStores(t, db)
ctx := context.Background()
createUser(ctx, t, principalStore)
createSpace(ctx, t, spaceStore, spacePathStore, userID, 1, 0)
metricsStore := database.NewUsageMetricsStore(db)
// First write will set bandwidth and storage to 100
err := metricsStore.Upsert(ctx, &types.UsageMetric{
RootSpaceID: 1,
BandwidthOut: 100,
BandwidthIn: 100,
StorageTotal: 100,
LFSStorageTotal: 100,
Pushes: 21,
})
require.NoError(t, err)
now := time.Now().UnixMilli()
metric, err := metricsStore.GetMetrics(ctx, 1, now, now)
require.NoError(t, err)
require.Equal(t, int64(1), metric.RootSpaceID, "expected spaceID = %d, got %d", 1, metric.RootSpaceID)
require.Equal(t, int64(100), metric.BandwidthOut, "expected bandwidth out = %d, got %d", 100, metric.BandwidthOut)
require.Equal(t, int64(100), metric.BandwidthIn, "expected bandwidth in = %d, got %d", 100, metric.BandwidthIn)
require.Equal(t, int64(100), metric.StorageTotal, "expected storage = %d, got %d", 100, metric.StorageTotal)
require.Equal(t, int64(100), metric.LFSStorageTotal, "expected lfs storage = %d, got %d", 100, metric.LFSStorageTotal)
require.Equal(t, int64(21), metric.Pushes, "expected pushes = %d, got %d", 21, metric.Pushes)
}
func TestUsageMetricsStore_List(t *testing.T) {
db, teardown := setupDB(t)
defer teardown()
principalStore, spaceStore, spacePathStore, _ := setupStores(t, db)
ctx := context.Background()
createUser(ctx, t, principalStore)
createSpace(ctx, t, spaceStore, spacePathStore, userID, 1, 0)
createSpace(ctx, t, spaceStore, spacePathStore, userID, 2, 0)
metricsStore := database.NewUsageMetricsStore(db)
err := metricsStore.Upsert(ctx, &types.UsageMetric{
RootSpaceID: 1,
BandwidthOut: 100,
BandwidthIn: 100,
StorageTotal: 100,
LFSStorageTotal: 100,
Pushes: 21,
})
require.NoError(t, err)
err = metricsStore.Upsert(ctx, &types.UsageMetric{
RootSpaceID: 1,
BandwidthOut: 50,
BandwidthIn: 50,
StorageTotal: 50,
LFSStorageTotal: 50,
Pushes: 21,
})
require.NoError(t, err)
err = metricsStore.Upsert(ctx, &types.UsageMetric{
RootSpaceID: 2,
BandwidthOut: 200,
BandwidthIn: 200,
StorageTotal: 200,
LFSStorageTotal: 200,
Pushes: 21,
})
require.NoError(t, err)
now := time.Now().UnixMilli()
metrics, err := metricsStore.List(ctx, now, now)
require.NoError(t, err)
require.Equal(t, 2, len(metrics))
// list use desc order so first row should be spaceID = 2
require.Equal(t, int64(2), metrics[0].RootSpaceID)
require.Equal(t, int64(200), metrics[0].BandwidthOut)
require.Equal(t, int64(200), metrics[0].BandwidthIn)
require.Equal(t, int64(200), metrics[0].StorageTotal)
require.Equal(t, int64(200), metrics[0].LFSStorageTotal)
require.Equal(t, int64(21), metrics[0].Pushes)
// second row should be spaceID = 1
require.Equal(t, int64(1), metrics[1].RootSpaceID)
require.Equal(t, int64(150), metrics[1].BandwidthOut)
require.Equal(t, int64(150), metrics[1].BandwidthIn)
require.Equal(t, int64(50), metrics[1].StorageTotal)
require.Equal(t, int64(50), metrics[1].LFSStorageTotal)
require.Equal(t, int64(42), metrics[1].Pushes)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/encode.go | app/store/database/encode.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"encoding/json"
sqlx "github.com/jmoiron/sqlx/types"
)
// EncodeToSQLXJSON accepts a generic parameter and returns
// a sqlx.JSONText object which is used to store arbitrary
// data in the DB. We absorb the error here as the value
// gets absorbed in sqlx.JSONText in case of UnsupportedValueError
// or UnsupportedTypeError.
func EncodeToSQLXJSON(v any) sqlx.JSONText {
raw, _ := json.Marshal(v)
return sqlx.JSONText(raw)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/pullreq.go | app/store/database/pullreq.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"strings"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/errors"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/rs/zerolog/log"
)
var _ store.PullReqStore = (*PullReqStore)(nil)
// NewPullReqStore returns a new PullReqStore.
func NewPullReqStore(db *sqlx.DB,
pCache store.PrincipalInfoCache) *PullReqStore {
return &PullReqStore{
db: db,
pCache: pCache,
}
}
// PullReqStore implements store.PullReqStore backed by a relational database.
type PullReqStore struct {
db *sqlx.DB
pCache store.PrincipalInfoCache
}
// pullReq is used to fetch pull request data from the database.
// The object should be later re-packed into a different struct to return it as an API response.
type pullReq struct {
ID int64 `db:"pullreq_id"`
Version int64 `db:"pullreq_version"`
Number int64 `db:"pullreq_number"`
CreatedBy int64 `db:"pullreq_created_by"`
Created int64 `db:"pullreq_created"`
Updated int64 `db:"pullreq_updated"`
Edited int64 `db:"pullreq_edited"` // TODO: Remove
Closed null.Int `db:"pullreq_closed"`
State enum.PullReqState `db:"pullreq_state"`
IsDraft bool `db:"pullreq_is_draft"`
CommentCount int `db:"pullreq_comment_count"`
UnresolvedCount int `db:"pullreq_unresolved_count"`
Title string `db:"pullreq_title"`
Description string `db:"pullreq_description"`
SourceRepoID null.Int `db:"pullreq_source_repo_id"`
SourceBranch string `db:"pullreq_source_branch"`
SourceSHA string `db:"pullreq_source_sha"`
TargetRepoID int64 `db:"pullreq_target_repo_id"`
TargetBranch string `db:"pullreq_target_branch"`
ActivitySeq int64 `db:"pullreq_activity_seq"`
MergedBy null.Int `db:"pullreq_merged_by"`
Merged null.Int `db:"pullreq_merged"`
MergeMethod null.String `db:"pullreq_merge_method"`
MergeTargetSHA null.String `db:"pullreq_merge_target_sha"`
MergeBaseSHA string `db:"pullreq_merge_base_sha"`
MergeSHA null.String `db:"pullreq_merge_sha"`
MergeViolationsBypassed null.Bool `db:"pullreq_merge_violations_bypassed"`
MergeCheckStatus enum.MergeCheckStatus `db:"pullreq_merge_check_status"`
MergeConflicts null.String `db:"pullreq_merge_conflicts"`
RebaseCheckStatus enum.MergeCheckStatus `db:"pullreq_rebase_check_status"`
RebaseConflicts null.String `db:"pullreq_rebase_conflicts,omitempty"`
CommitCount null.Int `db:"pullreq_commit_count"`
FileCount null.Int `db:"pullreq_file_count"`
Additions null.Int `db:"pullreq_additions"`
Deletions null.Int `db:"pullreq_deletions"`
}
const (
pullReqColumnsNoDescription = `
pullreq_id
,pullreq_version
,pullreq_number
,pullreq_created_by
,pullreq_created
,pullreq_updated
,pullreq_edited
,pullreq_closed
,pullreq_state
,pullreq_is_draft
,pullreq_comment_count
,pullreq_unresolved_count
,pullreq_title
,pullreq_source_repo_id
,pullreq_source_branch
,pullreq_source_sha
,pullreq_target_repo_id
,pullreq_target_branch
,pullreq_activity_seq
,pullreq_merged_by
,pullreq_merged
,pullreq_merge_method
,pullreq_merge_violations_bypassed
,pullreq_merge_target_sha
,pullreq_merge_base_sha
,pullreq_merge_sha
,pullreq_merge_check_status
,pullreq_merge_conflicts
,pullreq_rebase_check_status
,pullreq_rebase_conflicts
,pullreq_commit_count
,pullreq_file_count
,pullreq_additions
,pullreq_deletions`
pullReqColumns = pullReqColumnsNoDescription + `
,pullreq_description`
pullReqSelectBase = `
SELECT` + pullReqColumns + `
FROM pullreqs`
)
// Find finds the pull request by id.
func (s *PullReqStore) Find(ctx context.Context, id int64) (*types.PullReq, error) {
const sqlQuery = pullReqSelectBase + `
WHERE pullreq_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := &pullReq{}
if err := db.GetContext(ctx, dst, sqlQuery, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find pull request")
}
return s.mapPullReq(ctx, dst), nil
}
func (s *PullReqStore) findByNumberInternal(
ctx context.Context,
repoID,
number int64,
lock bool,
) (*types.PullReq, error) {
sqlQuery := pullReqSelectBase + `
WHERE pullreq_target_repo_id = $1 AND pullreq_number = $2`
if lock && !strings.HasPrefix(s.db.DriverName(), "sqlite") {
sqlQuery += "\n" + database.SQLForUpdate
}
db := dbtx.GetAccessor(ctx, s.db)
dst := &pullReq{}
if err := db.GetContext(ctx, dst, sqlQuery, repoID, number); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find pull request by number")
}
return s.mapPullReq(ctx, dst), nil
}
// FindByNumberWithLock finds the pull request by repo ID and pull request number
// and locks the pull request for the duration of the transaction.
func (s *PullReqStore) FindByNumberWithLock(
ctx context.Context,
repoID,
number int64,
) (*types.PullReq, error) {
return s.findByNumberInternal(ctx, repoID, number, true)
}
// FindByNumber finds the pull request by repo ID and pull request number.
func (s *PullReqStore) FindByNumber(ctx context.Context, repoID, number int64) (*types.PullReq, error) {
return s.findByNumberInternal(ctx, repoID, number, false)
}
// Create creates a new pull request.
func (s *PullReqStore) Create(ctx context.Context, pr *types.PullReq) error {
const sqlQuery = `
INSERT INTO pullreqs (
pullreq_version
,pullreq_number
,pullreq_created_by
,pullreq_created
,pullreq_updated
,pullreq_edited
,pullreq_closed
,pullreq_state
,pullreq_is_draft
,pullreq_comment_count
,pullreq_unresolved_count
,pullreq_title
,pullreq_description
,pullreq_source_repo_id
,pullreq_source_branch
,pullreq_source_sha
,pullreq_target_repo_id
,pullreq_target_branch
,pullreq_activity_seq
,pullreq_merged_by
,pullreq_merged
,pullreq_merge_method
,pullreq_merge_violations_bypassed
,pullreq_merge_target_sha
,pullreq_merge_base_sha
,pullreq_merge_sha
,pullreq_merge_check_status
,pullreq_merge_conflicts
,pullreq_rebase_check_status
,pullreq_rebase_conflicts
,pullreq_commit_count
,pullreq_file_count
,pullreq_additions
,pullreq_deletions
) values (
:pullreq_version
,:pullreq_number
,:pullreq_created_by
,:pullreq_created
,:pullreq_updated
,:pullreq_edited
,:pullreq_closed
,:pullreq_state
,:pullreq_is_draft
,:pullreq_comment_count
,:pullreq_unresolved_count
,:pullreq_title
,:pullreq_description
,:pullreq_source_repo_id
,:pullreq_source_branch
,:pullreq_source_sha
,:pullreq_target_repo_id
,:pullreq_target_branch
,:pullreq_activity_seq
,:pullreq_merged_by
,:pullreq_merged
,:pullreq_merge_method
,:pullreq_merge_violations_bypassed
,:pullreq_merge_target_sha
,:pullreq_merge_base_sha
,:pullreq_merge_sha
,:pullreq_merge_check_status
,:pullreq_merge_conflicts
,:pullreq_rebase_check_status
,:pullreq_rebase_conflicts
,:pullreq_commit_count
,:pullreq_file_count
,:pullreq_additions
,:pullreq_deletions
) RETURNING pullreq_id`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, mapInternalPullReq(pr))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind pullReq object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&pr.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert query failed")
}
return nil
}
// Update updates the pull request.
func (s *PullReqStore) Update(ctx context.Context, pr *types.PullReq) error {
const sqlQuery = `
UPDATE pullreqs
SET
pullreq_version = :pullreq_version
,pullreq_updated = :pullreq_updated
,pullreq_edited = :pullreq_edited
,pullreq_closed = :pullreq_closed
,pullreq_state = :pullreq_state
,pullreq_is_draft = :pullreq_is_draft
,pullreq_comment_count = :pullreq_comment_count
,pullreq_unresolved_count = :pullreq_unresolved_count
,pullreq_title = :pullreq_title
,pullreq_description = :pullreq_description
,pullreq_activity_seq = :pullreq_activity_seq
,pullreq_source_sha = :pullreq_source_sha
,pullreq_target_branch = :pullreq_target_branch
,pullreq_merged_by = :pullreq_merged_by
,pullreq_merged = :pullreq_merged
,pullreq_merge_method = :pullreq_merge_method
,pullreq_merge_target_sha = :pullreq_merge_target_sha
,pullreq_merge_base_sha = :pullreq_merge_base_sha
,pullreq_merge_sha = :pullreq_merge_sha
,pullreq_merge_check_status = :pullreq_merge_check_status
,pullreq_merge_conflicts = :pullreq_merge_conflicts
,pullreq_merge_violations_bypassed = :pullreq_merge_violations_bypassed
,pullreq_rebase_check_status = :pullreq_rebase_check_status
,pullreq_rebase_conflicts = :pullreq_rebase_conflicts
,pullreq_commit_count = :pullreq_commit_count
,pullreq_file_count = :pullreq_file_count
,pullreq_additions = :pullreq_additions
,pullreq_deletions = :pullreq_deletions
WHERE pullreq_id = :pullreq_id AND pullreq_version = :pullreq_version - 1`
db := dbtx.GetAccessor(ctx, s.db)
updatedAt := time.Now().UnixMilli()
dbPR := mapInternalPullReq(pr)
dbPR.Version++
dbPR.Updated = updatedAt
dbPR.Edited = updatedAt
query, arg, err := db.BindNamed(sqlQuery, dbPR)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind pull request object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update pull request")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
*pr = *s.mapPullReq(ctx, dbPR)
return nil
}
// updateMergeCheckMetadata updates the pull request merge check metadata only without updating updated time stamp.
func (s *PullReqStore) updateMergeCheckMetadata(ctx context.Context, pr *types.PullReq) error {
const sqlQuery = `
UPDATE pullreqs
SET
pullreq_version = :pullreq_version
,pullreq_merge_target_sha = :pullreq_merge_target_sha
,pullreq_merge_base_sha = :pullreq_merge_base_sha
,pullreq_merge_sha = :pullreq_merge_sha
,pullreq_merge_check_status = :pullreq_merge_check_status
,pullreq_merge_conflicts = :pullreq_merge_conflicts
,pullreq_rebase_check_status = :pullreq_rebase_check_status
,pullreq_rebase_conflicts = :pullreq_rebase_conflicts
,pullreq_commit_count = :pullreq_commit_count
,pullreq_file_count = :pullreq_file_count
,pullreq_additions = :pullreq_additions
,pullreq_deletions = :pullreq_deletions
WHERE pullreq_id = :pullreq_id AND pullreq_version = :pullreq_version - 1`
db := dbtx.GetAccessor(ctx, s.db)
dbPR := mapInternalPullReq(pr)
dbPR.Version++
query, arg, err := db.BindNamed(sqlQuery, dbPR)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind pull request object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update pull request")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
*pr = *s.mapPullReq(ctx, dbPR)
return nil
}
// UpdateOptLock the pull request details using the optimistic locking mechanism.
func (s *PullReqStore) UpdateOptLock(ctx context.Context, pr *types.PullReq,
mutateFn func(pr *types.PullReq) error,
) (*types.PullReq, error) {
for {
dup := *pr
err := mutateFn(&dup)
if err != nil {
return nil, err
}
err = s.Update(ctx, &dup)
if err == nil {
return &dup, nil
}
if !errors.Is(err, gitness_store.ErrVersionConflict) {
return nil, err
}
pr, err = s.Find(ctx, pr.ID)
if err != nil {
return nil, err
}
}
}
// UpdateMergeCheckMetadataOptLock updates the pull request merge check metadata using the optimistic locking mechanism.
func (s *PullReqStore) UpdateMergeCheckMetadataOptLock(ctx context.Context, pr *types.PullReq,
mutateFn func(pr *types.PullReq) error,
) (*types.PullReq, error) {
for {
dup := *pr
err := mutateFn(&dup)
if err != nil {
return nil, err
}
err = s.updateMergeCheckMetadata(ctx, &dup)
if err == nil {
return &dup, nil
}
if !errors.Is(err, gitness_store.ErrVersionConflict) {
return nil, err
}
pr, err = s.Find(ctx, pr.ID)
if err != nil {
return nil, err
}
}
}
// UpdateActivitySeq updates the pull request's activity sequence.
func (s *PullReqStore) UpdateActivitySeq(ctx context.Context, pr *types.PullReq) (*types.PullReq, error) {
return s.UpdateOptLock(ctx, pr, func(pr *types.PullReq) error {
pr.ActivitySeq++
return nil
})
}
// ResetMergeCheckStatus resets the pull request's mergeability status to unchecked
// for all pr which target branch points to targetBranch.
func (s *PullReqStore) ResetMergeCheckStatus(
ctx context.Context,
targetRepo int64,
targetBranch string,
) error {
// NOTE: Keep pullreq_merge_base_sha on old value as it's a required field.
// NOTE: Deliberately skip update of "repo_updated" field because we update many PRs at once.
const query = `
UPDATE pullreqs
SET
pullreq_version = pullreq_version + 1
,pullreq_merge_target_sha = NULL
,pullreq_merge_sha = NULL
,pullreq_merge_check_status = $1
,pullreq_merge_conflicts = NULL
,pullreq_rebase_check_status = $1
,pullreq_rebase_conflicts = NULL
,pullreq_commit_count = NULL
,pullreq_file_count = NULL
,pullreq_additions = NULL
,pullreq_deletions = NULL
WHERE pullreq_target_repo_id = $2 AND
pullreq_target_branch = $3 AND
pullreq_state not in ($4, $5)`
db := dbtx.GetAccessor(ctx, s.db)
_, err := db.ExecContext(ctx, query, enum.MergeCheckStatusUnchecked, targetRepo, targetBranch,
enum.PullReqStateClosed, enum.PullReqStateMerged)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to reset mergeable status check in pull requests")
}
return nil
}
// Delete the pull request.
func (s *PullReqStore) Delete(ctx context.Context, id int64) error {
const pullReqDelete = `DELETE FROM pullreqs WHERE pullreq_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, pullReqDelete, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "the delete query failed")
}
return nil
}
// Count of pull requests for a repo.
func (s *PullReqStore) Count(ctx context.Context, opts *types.PullReqFilter) (int64, error) {
var stmt squirrel.SelectBuilder
if len(opts.LabelID) > 0 || len(opts.ValueID) > 0 {
stmt = database.Builder.Select("1")
} else {
stmt = database.Builder.Select("COUNT(*)")
}
stmt = stmt.From("pullreqs")
s.applyFilter(&stmt, opts)
if len(opts.LabelID) > 0 || len(opts.ValueID) > 0 {
stmt = database.Builder.Select("COUNT(*)").FromSelect(stmt, "subquery")
}
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
// List returns a list of pull requests for a repo.
func (s *PullReqStore) List(ctx context.Context, opts *types.PullReqFilter) ([]*types.PullReq, error) {
stmt := s.listQuery(opts)
stmt = stmt.Limit(database.Limit(opts.Size))
stmt = stmt.Offset(database.Offset(opts.Page, opts.Size))
// NOTE: string concatenation is safe because the
// order attribute is an enum and is not user-defined,
// and is therefore not subject to injection attacks.
opts.Sort, _ = opts.Sort.Sanitize()
stmt = stmt.OrderBy("pullreq_" + string(opts.Sort) + " " + opts.Order.String())
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
dst := make([]*pullReq, 0)
db := dbtx.GetAccessor(ctx, s.db)
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
result, err := s.mapSlicePullReq(ctx, dst)
if err != nil {
return nil, err
}
return result, nil
}
// Stream returns a list of pull requests for a repo.
func (s *PullReqStore) Stream(ctx context.Context, opts *types.PullReqFilter) (<-chan *types.PullReq, <-chan error) {
stmt := s.listQuery(opts)
stmt = stmt.OrderBy("pullreq_updated desc")
chPRs := make(chan *types.PullReq)
chErr := make(chan error, 1)
go func() {
defer close(chPRs)
defer close(chErr)
sql, args, err := stmt.ToSql()
if err != nil {
chErr <- fmt.Errorf("failed to convert query to sql: %w", err)
return
}
db := dbtx.GetAccessor(ctx, s.db)
rows, err := db.QueryxContext(ctx, sql, args...)
if err != nil {
chErr <- database.ProcessSQLErrorf(ctx, err, "Failed to execute stream query")
return
}
defer func() { _ = rows.Close() }()
for rows.Next() {
var prData pullReq
err = rows.StructScan(&prData)
if err != nil {
chErr <- fmt.Errorf("failed to scan pull request: %w", err)
return
}
chPRs <- s.mapPullReq(ctx, &prData)
}
if err := rows.Err(); err != nil {
chErr <- fmt.Errorf("failed to scan pull request: %w", err)
}
}()
return chPRs, chErr
}
func (s *PullReqStore) ListOpenByBranchName(
ctx context.Context,
repoID int64,
branchNames []string,
) (map[string][]*types.PullReq, error) {
columns := pullReqColumnsNoDescription
stmt := database.Builder.Select(columns)
stmt = stmt.From("pullreqs")
stmt = stmt.Where("pullreq_source_repo_id = ?", repoID)
stmt = stmt.Where("pullreq_state = ?", enum.PullReqStateOpen)
stmt = stmt.Where(squirrel.Eq{"pullreq_source_branch": branchNames})
stmt = stmt.OrderBy("pullreq_updated desc")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
dst := make([]*pullReq, 0)
err = db.SelectContext(ctx, &dst, sql, args...)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to fetch list of PRs by branch")
}
prMap := make(map[string][]*types.PullReq)
for _, prDB := range dst {
pr := s.mapPullReq(ctx, prDB)
prMap[prDB.SourceBranch] = append(prMap[prDB.SourceBranch], pr)
}
return prMap, nil
}
func (s *PullReqStore) listQuery(opts *types.PullReqFilter) squirrel.SelectBuilder {
var stmt squirrel.SelectBuilder
columns := pullReqColumns
if opts.ExcludeDescription {
columns = pullReqColumnsNoDescription
}
if len(opts.LabelID) > 0 || len(opts.ValueID) > 0 || opts.CommenterID > 0 || opts.MentionedID > 0 {
stmt = database.Builder.Select("DISTINCT " + columns)
} else {
stmt = database.Builder.Select(columns)
}
stmt = stmt.From("pullreqs")
s.applyFilter(&stmt, opts)
return stmt
}
//nolint:cyclop,gocognit,gocyclo
func (s *PullReqStore) applyFilter(stmt *squirrel.SelectBuilder, opts *types.PullReqFilter) {
if len(opts.States) == 1 {
*stmt = stmt.Where("pullreq_state = ?", opts.States[0])
} else if len(opts.States) > 1 {
*stmt = stmt.Where(squirrel.Eq{"pullreq_state": opts.States})
}
if opts.SourceRepoID != 0 {
*stmt = stmt.Where("pullreq_source_repo_id = ?", opts.SourceRepoID)
}
if opts.SourceBranch != "" {
*stmt = stmt.Where("pullreq_source_branch = ?", opts.SourceBranch)
}
if opts.TargetRepoID != 0 {
*stmt = stmt.Where("pullreq_target_repo_id = ?", opts.TargetRepoID)
}
if opts.TargetBranch != "" {
*stmt = stmt.Where("pullreq_target_branch = ?", opts.TargetBranch)
}
if opts.Query != "" {
*stmt = stmt.Where(PartialMatch("pullreq_title", opts.Query))
}
if opts.CreatedLt > 0 {
*stmt = stmt.Where("pullreq_created < ?", opts.CreatedLt)
}
if opts.CreatedGt > 0 {
*stmt = stmt.Where("pullreq_created > ?", opts.CreatedGt)
}
if opts.UpdatedLt > 0 {
*stmt = stmt.Where("pullreq_updated < ?", opts.UpdatedLt)
}
if opts.UpdatedGt > 0 {
*stmt = stmt.Where("pullreq_updated > ?", opts.UpdatedGt)
}
if opts.EditedLt > 0 {
*stmt = stmt.Where("pullreq_edited < ?", opts.EditedLt)
}
if opts.EditedGt > 0 {
*stmt = stmt.Where("pullreq_edited > ?", opts.EditedGt)
}
if len(opts.SpaceIDs) == 1 {
*stmt = stmt.InnerJoin("repositories ON repo_id = pullreq_target_repo_id")
*stmt = stmt.Where("repo_parent_id = ?", opts.SpaceIDs[0])
} else if len(opts.SpaceIDs) > 1 {
*stmt = stmt.InnerJoin("repositories ON repo_id = pullreq_target_repo_id")
switch s.db.DriverName() {
case SqliteDriverName:
*stmt = stmt.Where(squirrel.Eq{"repo_parent_id": opts.SpaceIDs})
case PostgresDriverName:
*stmt = stmt.Where("repo_parent_id = ANY(?)", pq.Array(opts.SpaceIDs))
}
}
if len(opts.RepoIDBlacklist) == 1 {
*stmt = stmt.Where("pullreq_target_repo_id <> ?", opts.RepoIDBlacklist[0])
} else if len(opts.RepoIDBlacklist) > 1 {
switch s.db.DriverName() {
case SqliteDriverName:
*stmt = stmt.Where(squirrel.NotEq{"pullreq_target_repo_id": opts.RepoIDBlacklist})
case PostgresDriverName:
*stmt = stmt.Where("pullreq_target_repo_id <> ALL(?)", pq.Array(opts.RepoIDBlacklist))
}
}
if len(opts.CreatedBy) == 1 {
*stmt = stmt.Where("pullreq_created_by = ?", opts.CreatedBy[0])
} else if len(opts.CreatedBy) > 1 {
switch s.db.DriverName() {
case SqliteDriverName:
*stmt = stmt.Where(squirrel.Eq{"pullreq_created_by": opts.CreatedBy})
case PostgresDriverName:
*stmt = stmt.Where("pullreq_created_by = ANY(?)", pq.Array(opts.CreatedBy))
}
}
if opts.CommenterID > 0 {
*stmt = stmt.InnerJoin("pullreq_activities act_com ON act_com.pullreq_activity_pullreq_id = pullreq_id")
*stmt = stmt.Where("act_com.pullreq_activity_deleted IS NULL")
*stmt = stmt.Where("(" +
"act_com.pullreq_activity_kind = '" + string(enum.PullReqActivityKindComment) + "' OR " +
"act_com.pullreq_activity_kind = '" + string(enum.PullReqActivityKindChangeComment) + "')")
*stmt = stmt.Where("act_com.pullreq_activity_created_by = ?", opts.CommenterID)
}
if opts.ReviewerID > 0 {
*stmt = stmt.InnerJoin(
fmt.Sprintf("pullreq_reviewers ON "+
"pullreq_reviewer_pullreq_id = pullreq_id AND pullreq_reviewer_principal_id = %d", opts.ReviewerID))
if len(opts.ReviewDecisions) > 0 {
*stmt = stmt.Where(squirrel.Eq{"pullreq_reviewer_review_decision": opts.ReviewDecisions})
}
}
if opts.MentionedID > 0 {
*stmt = stmt.InnerJoin("pullreq_activities act_ment ON act_ment.pullreq_activity_pullreq_id = pullreq_id")
*stmt = stmt.Where("act_ment.pullreq_activity_deleted IS NULL")
*stmt = stmt.Where("(" +
"act_ment.pullreq_activity_kind = '" + string(enum.PullReqActivityKindComment) + "' OR " +
"act_ment.pullreq_activity_kind = '" + string(enum.PullReqActivityKindChangeComment) + "')")
switch s.db.DriverName() {
case SqliteDriverName:
*stmt = stmt.InnerJoin(
"json_each(json_extract(act_ment.pullreq_activity_metadata, '$.mentions.ids')) as mentions")
*stmt = stmt.Where("mentions.value = ?", opts.MentionedID)
case PostgresDriverName:
*stmt = stmt.Where(fmt.Sprintf(
"act_ment.pullreq_activity_metadata->'mentions'->'ids' @> ('[%d]')::jsonb",
opts.MentionedID))
}
}
// labels
if len(opts.LabelID) == 0 && len(opts.ValueID) == 0 {
return
}
*stmt = stmt.InnerJoin("pullreq_labels ON pullreq_label_pullreq_id = pullreq_id").
GroupBy("pullreq_id")
switch {
case len(opts.LabelID) > 0 && len(opts.ValueID) == 0:
*stmt = stmt.Where(
squirrel.Eq{"pullreq_label_label_id": opts.LabelID},
)
case len(opts.LabelID) == 0 && len(opts.ValueID) > 0:
*stmt = stmt.Where(
squirrel.Eq{"pullreq_label_label_value_id": opts.ValueID},
)
default:
*stmt = stmt.Where(squirrel.Or{
squirrel.Eq{"pullreq_label_label_id": opts.LabelID},
squirrel.Eq{"pullreq_label_label_value_id": opts.ValueID},
})
}
*stmt = stmt.Having("COUNT(pullreq_label_pullreq_id) = ?", len(opts.LabelID)+len(opts.ValueID))
}
func mapPullReq(pr *pullReq) *types.PullReq {
var mergeConflicts, rebaseConflicts []string
if pr.MergeConflicts.Valid {
mergeConflicts = strings.Split(pr.MergeConflicts.String, "\n")
}
if pr.RebaseConflicts.Valid {
rebaseConflicts = strings.Split(pr.RebaseConflicts.String, "\n")
}
return &types.PullReq{
ID: pr.ID,
Version: pr.Version,
Number: pr.Number,
CreatedBy: pr.CreatedBy,
Created: pr.Created,
Updated: pr.Updated,
Edited: pr.Edited, // TODO: When we remove the DB column, make Edited equal to Updated
Closed: pr.Closed.Ptr(),
State: pr.State,
IsDraft: pr.IsDraft,
CommentCount: pr.CommentCount,
UnresolvedCount: pr.UnresolvedCount,
Title: pr.Title,
Description: pr.Description,
SourceRepoID: pr.SourceRepoID.Ptr(),
SourceBranch: pr.SourceBranch,
SourceSHA: pr.SourceSHA,
TargetRepoID: pr.TargetRepoID,
TargetBranch: pr.TargetBranch,
ActivitySeq: pr.ActivitySeq,
MergedBy: pr.MergedBy.Ptr(),
Merged: pr.Merged.Ptr(),
MergeMethod: (*enum.MergeMethod)(pr.MergeMethod.Ptr()),
MergeCheckStatus: pr.MergeCheckStatus,
MergeTargetSHA: pr.MergeTargetSHA.Ptr(),
MergeBaseSHA: pr.MergeBaseSHA,
MergeSHA: pr.MergeSHA.Ptr(),
MergeConflicts: mergeConflicts,
MergeViolationsBypassed: pr.MergeViolationsBypassed.Ptr(),
RebaseCheckStatus: pr.RebaseCheckStatus,
RebaseConflicts: rebaseConflicts,
Author: types.PrincipalInfo{},
Merger: nil,
Stats: types.PullReqStats{
Conversations: pr.CommentCount,
UnresolvedCount: pr.UnresolvedCount,
DiffStats: types.DiffStats{
Commits: pr.CommitCount.Ptr(),
FilesChanged: pr.FileCount.Ptr(),
Additions: pr.Additions.Ptr(),
Deletions: pr.Deletions.Ptr(),
},
},
}
}
func mapInternalPullReq(pr *types.PullReq) *pullReq {
mergeConflicts := strings.Join(pr.MergeConflicts, "\n")
rebaseConflicts := strings.Join(pr.RebaseConflicts, "\n")
m := &pullReq{
ID: pr.ID,
Version: pr.Version,
Number: pr.Number,
CreatedBy: pr.CreatedBy,
Created: pr.Created,
Updated: pr.Updated,
Edited: pr.Edited, // TODO: When we remove the DB column, make Edited equal to Updated
Closed: null.IntFromPtr(pr.Closed),
State: pr.State,
IsDraft: pr.IsDraft,
CommentCount: pr.CommentCount,
UnresolvedCount: pr.UnresolvedCount,
Title: pr.Title,
Description: pr.Description,
SourceRepoID: null.IntFromPtr(pr.SourceRepoID),
SourceBranch: pr.SourceBranch,
SourceSHA: pr.SourceSHA,
TargetRepoID: pr.TargetRepoID,
TargetBranch: pr.TargetBranch,
ActivitySeq: pr.ActivitySeq,
MergedBy: null.IntFromPtr(pr.MergedBy),
Merged: null.IntFromPtr(pr.Merged),
MergeMethod: null.StringFromPtr((*string)(pr.MergeMethod)),
MergeCheckStatus: pr.MergeCheckStatus,
MergeTargetSHA: null.StringFromPtr(pr.MergeTargetSHA),
MergeBaseSHA: pr.MergeBaseSHA,
MergeSHA: null.StringFromPtr(pr.MergeSHA),
MergeConflicts: null.NewString(mergeConflicts, mergeConflicts != ""),
MergeViolationsBypassed: null.BoolFromPtr(pr.MergeViolationsBypassed),
RebaseCheckStatus: pr.RebaseCheckStatus,
RebaseConflicts: null.NewString(rebaseConflicts, rebaseConflicts != ""),
CommitCount: null.IntFromPtr(pr.Stats.Commits),
FileCount: null.IntFromPtr(pr.Stats.FilesChanged),
Additions: null.IntFromPtr(pr.Stats.Additions),
Deletions: null.IntFromPtr(pr.Stats.Deletions),
}
return m
}
func (s *PullReqStore) mapPullReq(ctx context.Context, pr *pullReq) *types.PullReq {
m := mapPullReq(pr)
var author, merger *types.PrincipalInfo
var err error
author, err = s.pCache.Get(ctx, pr.CreatedBy)
if err != nil {
log.Ctx(ctx).Err(err).Msg("failed to load PR author")
}
if author != nil {
m.Author = *author
}
if pr.MergedBy.Valid {
merger, err = s.pCache.Get(ctx, pr.MergedBy.Int64)
if err != nil {
log.Ctx(ctx).Err(err).Msg("failed to load PR merger")
}
m.Merger = merger
}
return m
}
func (s *PullReqStore) mapSlicePullReq(ctx context.Context, prs []*pullReq) ([]*types.PullReq, error) {
// collect all principal IDs
ids := make([]int64, 0, 2*len(prs))
for _, pr := range prs {
ids = append(ids, pr.CreatedBy)
if pr.MergedBy.Valid {
ids = append(ids, pr.MergedBy.Int64)
}
}
// pull principal infos from cache
infoMap, err := s.pCache.Map(ctx, ids)
if err != nil {
return nil, fmt.Errorf("failed to load PR principal infos: %w", err)
}
// attach the principal infos back to the slice items
m := make([]*types.PullReq, len(prs))
for i, pr := range prs {
m[i] = mapPullReq(pr)
if author, ok := infoMap[pr.CreatedBy]; ok {
m[i].Author = *author
}
if pr.MergedBy.Valid {
if merger, ok := infoMap[pr.MergedBy.Int64]; ok {
m[i].Merger = merger
}
}
}
return m, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/space.go | app/store/database/space.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/app/store"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
var _ store.SpaceStore = (*SpaceStore)(nil)
// NewSpaceStore returns a new SpaceStore.
func NewSpaceStore(
db *sqlx.DB,
spacePathCache store.SpacePathCache,
spacePathStore store.SpacePathStore,
) *SpaceStore {
return &SpaceStore{
db: db,
spacePathCache: spacePathCache,
spacePathStore: spacePathStore,
}
}
// SpaceStore implements a SpaceStore backed by a relational database.
type SpaceStore struct {
db *sqlx.DB
spacePathCache store.SpacePathCache
spacePathStore store.SpacePathStore
}
// space is an internal representation used to store space data in DB.
type space struct {
ID int64 `db:"space_id"`
Version int64 `db:"space_version"`
// IMPORTANT: We need to make parentID optional for spaces to allow it to be a foreign key.
ParentID null.Int `db:"space_parent_id"`
Identifier string `db:"space_uid"`
Description string `db:"space_description"`
CreatedBy int64 `db:"space_created_by"`
Created int64 `db:"space_created"`
Updated int64 `db:"space_updated"`
Deleted null.Int `db:"space_deleted"`
}
const (
spaceColumns = `
space_id
,space_version
,space_parent_id
,space_uid
,space_description
,space_created_by
,space_created
,space_updated
,space_deleted`
spaceSelectBase = `
SELECT` + spaceColumns + `
FROM spaces`
)
// Find the space by id.
func (s *SpaceStore) Find(ctx context.Context, id int64) (*types.Space, error) {
return s.find(ctx, id, nil)
}
// FindByIDs finds all spaces by ids.
func (s *SpaceStore) FindByIDs(ctx context.Context, ids ...int64) ([]*types.Space, error) {
stmt := database.Builder.
Select(spaceColumns).
From("spaces").
Where(squirrel.Eq{"space_id": ids})
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var dst []*space
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return s.mapToSpaces(ctx, s.db, dst)
}
func (s *SpaceStore) find(ctx context.Context, id int64, deletedAt *int64) (*types.Space, error) {
stmt := database.Builder.
Select(spaceColumns).
From("spaces").
Where("space_id = ?", id)
if deletedAt != nil {
stmt = stmt.Where("space_deleted = ?", *deletedAt)
} else {
stmt = stmt.Where("space_deleted IS NULL")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := new(space)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
if err = db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find space")
}
return mapToSpace(ctx, s.db, s.spacePathStore, dst)
}
// FindByRef finds the space using the spaceRef as either the id or the space path.
func (s *SpaceStore) FindByRef(ctx context.Context, spaceRef string) (*types.Space, error) {
return s.findByRef(ctx, spaceRef, nil)
}
// FindByRefCaseInsensitive finds the space using the spaceRef.
func (s *SpaceStore) FindByRefCaseInsensitive(ctx context.Context, spaceRef string) (int64, error) {
segments := paths.Segments(spaceRef)
if len(segments) < 1 {
return -1, fmt.Errorf("invalid space reference provided")
}
var stmt squirrel.SelectBuilder
switch {
case len(segments) == 1:
stmt = database.Builder.
Select("space_id").
From("spaces").
Where("LOWER(space_uid) = ? ", strings.ToLower(segments[0])).
Limit(1)
case len(segments) > 1:
stmt = buildRecursiveSelectQueryUsingCaseInsensitivePath(segments)
}
sql, args, err := stmt.ToSql()
if err != nil {
return -1, fmt.Errorf("failed to create sql query: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
var spaceID int64
if err = db.GetContext(ctx, &spaceID, sql, args...); err != nil {
return -1, database.ProcessSQLErrorf(ctx, err, "Failed executing custom select query")
}
return spaceID, nil
}
// FindByRefAndDeletedAt finds the space using the spaceRef as either the id or the space path and deleted timestamp.
func (s *SpaceStore) FindByRefAndDeletedAt(
ctx context.Context,
spaceRef string,
deletedAt int64,
) (*types.Space, error) {
// ASSUMPTION: digits only is not a valid space path
id, err := strconv.ParseInt(spaceRef, 10, 64)
if err != nil {
return s.findByPathAndDeletedAt(ctx, spaceRef, deletedAt)
}
return s.find(ctx, id, &deletedAt)
}
func (s *SpaceStore) findByRef(ctx context.Context, spaceRef string, deletedAt *int64) (*types.Space, error) {
// ASSUMPTION: digits only is not a valid space path
id, err := strconv.ParseInt(spaceRef, 10, 64)
if err != nil {
var path *types.SpacePath
path, err = s.spacePathCache.Get(ctx, spaceRef)
if err != nil {
return nil, fmt.Errorf("failed to get path: %w", err)
}
id = path.SpaceID
}
return s.find(ctx, id, deletedAt)
}
func (s *SpaceStore) findByPathAndDeletedAt(
ctx context.Context,
spaceRef string,
deletedAt int64,
) (*types.Space, error) {
segments := paths.Segments(spaceRef)
if len(segments) < 1 {
return nil, fmt.Errorf("invalid space reference provided")
}
var stmt squirrel.SelectBuilder
switch {
case len(segments) == 1:
stmt = database.Builder.
Select("space_id").
From("spaces").
Where("space_uid = ? AND space_deleted = ? AND space_parent_id IS NULL", segments[0], deletedAt)
case len(segments) > 1:
stmt = buildRecursiveSelectQueryUsingPath(segments, deletedAt)
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to create sql query")
}
db := dbtx.GetAccessor(ctx, s.db)
var spaceID int64
if err = db.GetContext(ctx, &spaceID, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom select query")
}
return s.find(ctx, spaceID, &deletedAt)
}
const spaceAncestorsQuery = `
WITH RECURSIVE space_ancestors(space_ancestor_id, space_ancestor_uid, space_ancestor_parent_id) AS (
SELECT space_id, space_uid, space_parent_id
FROM spaces
WHERE space_id = $1
UNION
SELECT space_id, space_uid, space_parent_id
FROM spaces
JOIN space_ancestors ON space_id = space_ancestor_parent_id
)
`
const spaceDescendantsQuery = `
WITH RECURSIVE space_descendants(space_descendant_id, space_descendant_uid, space_descendant_parent_id) AS (
SELECT space_id, space_uid, space_parent_id
FROM spaces
WHERE space_id = $1
UNION
SELECT space_id, space_uid, space_parent_id
FROM spaces
JOIN space_descendants ON space_descendant_id = space_parent_id
)
`
// GetRootSpace returns a space where space_parent_id is NULL.
func (s *SpaceStore) GetRootSpace(ctx context.Context, spaceID int64) (*types.Space, error) {
query := spaceAncestorsQuery + `
SELECT space_ancestor_id
FROM space_ancestors
WHERE space_ancestor_parent_id IS NULL`
db := dbtx.GetAccessor(ctx, s.db)
var rootID int64
if err := db.GetContext(ctx, &rootID, query, spaceID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to get root space_id")
}
return s.Find(ctx, rootID)
}
// GetRootSpaces returns all spaces where space_parent_id is NULL.
func (s *SpaceStore) GetAllRootSpaces(ctx context.Context, opts *types.SpaceFilter) ([]*types.Space, error) {
stmt := database.Builder.
Select(spaceColumns).
From("spaces").
Where(squirrel.Expr("space_parent_id IS NULL"))
stmt = s.applyQueryFilter(stmt, opts)
stmt = s.applySortFilter(stmt, opts)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var dst []*space
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return s.mapToSpaces(ctx, s.db, dst)
}
// GetAncestorIDs returns a list of all space IDs along the recursive path to the root space.
func (s *SpaceStore) GetAncestorIDs(ctx context.Context, spaceID int64) ([]int64, error) {
query := spaceAncestorsQuery + `
SELECT space_ancestor_id FROM space_ancestors`
db := dbtx.GetAccessor(ctx, s.db)
var spaceIDs []int64
if err := db.SelectContext(ctx, &spaceIDs, query, spaceID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to get space ancestors IDs")
}
return spaceIDs, nil
}
// GetTreeLevel returns the level of a space in a space tree.
func (s *SpaceStore) GetTreeLevel(ctx context.Context, spaceID int64) (int64, error) {
query := spaceAncestorsQuery + `
SELECT COUNT(space_ancestor_id) FROM space_ancestors`
db := dbtx.GetAccessor(ctx, s.db)
var level int64
if err := db.GetContext(ctx, &level, query, spaceID); err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to get space ancestors IDs")
}
return level, nil
}
func (s *SpaceStore) GetAncestors(
ctx context.Context,
spaceID int64,
) ([]*types.Space, error) {
query := spaceAncestorsQuery + `
SELECT ` + spaceColumns + `
FROM spaces INNER JOIN space_ancestors ON space_id = space_ancestor_id`
db := dbtx.GetAccessor(ctx, s.db)
var dst []*space
if err := db.SelectContext(ctx, &dst, query, spaceID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing get space ancestors query")
}
return s.mapToSpaces(ctx, s.db, dst)
}
// GetAncestorsData returns a list of space parent data for spaces that are ancestors of the space.
func (s *SpaceStore) GetAncestorsData(ctx context.Context, spaceID int64) ([]types.SpaceParentData, error) {
query := spaceAncestorsQuery + `
SELECT space_ancestor_id, space_ancestor_uid, space_ancestor_parent_id FROM space_ancestors`
return s.readParentsData(ctx, query, spaceID)
}
// GetDescendantsData returns a list of space parent data for spaces that are descendants of the space.
func (s *SpaceStore) GetDescendantsData(ctx context.Context, spaceID int64) ([]types.SpaceParentData, error) {
query := spaceDescendantsQuery + `
SELECT space_descendant_id, space_descendant_uid, space_descendant_parent_id FROM space_descendants`
return s.readParentsData(ctx, query, spaceID)
}
// GetDescendantsIDs returns a list of space ids for spaces that are descendants of the specified space.
func (s *SpaceStore) GetDescendantsIDs(ctx context.Context, spaceID int64) ([]int64, error) {
return getSpaceDescendantsIDs(ctx, dbtx.GetAccessor(ctx, s.db), spaceID)
}
func getSpaceDescendantsIDs(ctx context.Context, db dbtx.Accessor, spaceID int64) ([]int64, error) {
query := spaceDescendantsQuery + `
SELECT space_descendant_id
FROM space_descendants`
var ids []int64
if err := db.SelectContext(ctx, &ids, query, spaceID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to retrieve spaces")
}
return ids, nil
}
func (s *SpaceStore) readParentsData(
ctx context.Context,
query string,
spaceID int64,
) ([]types.SpaceParentData, error) {
db := dbtx.GetAccessor(ctx, s.db)
rows, err := db.QueryContext(ctx, query, spaceID)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to run space parent data query")
}
defer func() { _ = rows.Close() }()
var result []types.SpaceParentData
for rows.Next() {
var id int64
var uid string
var parent null.Int
err = rows.Scan(&id, &uid, &parent)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to scan space parent data")
}
result = append(result, types.SpaceParentData{
ID: id,
Identifier: uid,
ParentID: parent.Int64,
})
}
if err := rows.Err(); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to read space parent data")
}
return result, nil
}
// Create a new space.
func (s *SpaceStore) Create(ctx context.Context, space *types.Space) error {
if space == nil {
return errors.New("space is nil")
}
const sqlQuery = `
INSERT INTO spaces (
space_version
,space_parent_id
,space_uid
,space_description
,space_created_by
,space_created
,space_updated
,space_deleted
) values (
:space_version
,:space_parent_id
,:space_uid
,:space_description
,:space_created_by
,:space_created
,:space_updated
,:space_deleted
) RETURNING space_id`
db := dbtx.GetAccessor(ctx, s.db)
query, args, err := db.BindNamed(sqlQuery, mapToInternalSpace(space))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind space object")
}
if err = db.QueryRowContext(ctx, query, args...).Scan(&space.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert query failed")
}
return nil
}
// Update updates the space details.
func (s *SpaceStore) Update(ctx context.Context, space *types.Space) error {
if space == nil {
return errors.New("space is nil")
}
const sqlQuery = `
UPDATE spaces
SET
space_version = :space_version
,space_updated = :space_updated
,space_parent_id = :space_parent_id
,space_uid = :space_uid
,space_description = :space_description
,space_deleted = :space_deleted
WHERE space_id = :space_id AND space_version = :space_version - 1`
dbSpace := mapToInternalSpace(space)
// update Version (used for optimistic locking) and Updated time
dbSpace.Version++
dbSpace.Updated = time.Now().UnixMilli()
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, dbSpace)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind space object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Update query failed")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
space.Version = dbSpace.Version
space.Updated = dbSpace.Updated
// update path in case parent/identifier changed
space.Path, err = getSpacePath(ctx, s.db, s.spacePathStore, space.ID)
if err != nil {
return err
}
return nil
}
// updateOptLock updates the space using the optimistic locking mechanism.
func (s *SpaceStore) updateOptLock(
ctx context.Context,
space *types.Space,
mutateFn func(space *types.Space) error,
) (*types.Space, error) {
for {
dup := *space
err := mutateFn(&dup)
if err != nil {
return nil, err
}
err = s.Update(ctx, &dup)
if err == nil {
return &dup, nil
}
if !errors.Is(err, gitness_store.ErrVersionConflict) {
return nil, err
}
space, err = s.find(ctx, space.ID, space.Deleted)
if err != nil {
return nil, err
}
}
}
// UpdateOptLock updates the space using the optimistic locking mechanism.
func (s *SpaceStore) UpdateOptLock(
ctx context.Context,
space *types.Space,
mutateFn func(space *types.Space) error,
) (*types.Space, error) {
return s.updateOptLock(
ctx,
space,
func(r *types.Space) error {
if space.Deleted != nil {
return gitness_store.ErrResourceNotFound
}
return mutateFn(r)
},
)
}
// UpdateDeletedOptLock updates a soft deleted space using the optimistic locking mechanism.
func (s *SpaceStore) updateDeletedOptLock(
ctx context.Context,
space *types.Space,
mutateFn func(space *types.Space) error,
) (*types.Space, error) {
return s.updateOptLock(
ctx,
space,
func(r *types.Space) error {
if space.Deleted == nil {
return gitness_store.ErrResourceNotFound
}
return mutateFn(r)
},
)
}
// FindForUpdate finds the space and locks it for an update (should be called in a tx).
func (s *SpaceStore) FindForUpdate(ctx context.Context, id int64) (*types.Space, error) {
// sqlite allows at most one write to proceed (no need to lock)
if strings.HasPrefix(s.db.DriverName(), "sqlite") {
return s.find(ctx, id, nil)
}
stmt := database.Builder.Select("space_id").
From("spaces").
Where("space_id = ? AND space_deleted IS NULL", id).
Suffix("FOR UPDATE")
sqlQuery, params, err := stmt.ToSql()
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to generate lock on spaces")
}
dst := new(space)
db := dbtx.GetAccessor(ctx, s.db)
if err = db.GetContext(ctx, dst, sqlQuery, params...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find space")
}
return mapToSpace(ctx, s.db, s.spacePathStore, dst)
}
// SoftDelete deletes a space softly.
func (s *SpaceStore) SoftDelete(
ctx context.Context,
space *types.Space,
deletedAt int64,
) error {
_, err := s.UpdateOptLock(ctx, space, func(s *types.Space) error {
s.Deleted = &deletedAt
return nil
})
if err != nil {
return err
}
return nil
}
// Purge deletes a space permanently.
func (s *SpaceStore) Purge(ctx context.Context, id int64, deletedAt *int64) error {
stmt := database.Builder.
Delete("spaces").
Where("space_id = ?", id)
if deletedAt != nil {
stmt = stmt.Where("space_deleted = ?", *deletedAt)
} else {
stmt = stmt.Where("space_deleted IS NULL")
}
sql, args, err := stmt.ToSql()
if err != nil {
return fmt.Errorf("failed to convert purge space query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
_, err = db.ExecContext(ctx, sql, args...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "the delete query failed")
}
return nil
}
// Restore restores a soft deleted space.
func (s *SpaceStore) Restore(
ctx context.Context,
space *types.Space,
newIdentifier *string,
newParentID *int64,
) (*types.Space, error) {
space, err := s.updateDeletedOptLock(ctx, space, func(s *types.Space) error {
s.Deleted = nil
if newParentID != nil {
s.ParentID = *newParentID
}
if newIdentifier != nil {
s.Identifier = *newIdentifier
}
return nil
})
if err != nil {
return nil, err
}
return space, nil
}
// Count the child spaces of a space.
func (s *SpaceStore) Count(ctx context.Context, id int64, opts *types.SpaceFilter) (int64, error) {
if opts.Recursive {
return s.countAll(ctx, id, opts)
}
return s.count(ctx, id, opts)
}
func (s *SpaceStore) count(
ctx context.Context,
id int64,
opts *types.SpaceFilter,
) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("spaces").
Where("space_parent_id = ?", id)
if opts.Query != "" {
stmt = stmt.Where(PartialMatch("space_uid", opts.Query))
}
stmt = s.applyQueryFilter(stmt, opts)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
func (s *SpaceStore) countAll(
ctx context.Context,
id int64,
opts *types.SpaceFilter,
) (int64, error) {
ctePrefix := `WITH RECURSIVE SpaceHierarchy AS (
SELECT space_id, space_parent_id, space_deleted, space_uid
FROM spaces
WHERE space_id = ?
UNION
SELECT s.space_id, s.space_parent_id, s.space_deleted, s.space_uid
FROM spaces s
JOIN SpaceHierarchy h ON s.space_parent_id = h.space_id
)`
db := dbtx.GetAccessor(ctx, s.db)
stmt := database.Builder.
Select("COUNT(*)").
Prefix(ctePrefix, id).
From("SpaceHierarchy h1").
Where("h1.space_id <> ?", id)
stmt = s.applyQueryFilter(stmt, opts)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
var count int64
if err = db.GetContext(ctx, &count, sql, args...); err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to count sub spaces")
}
return count, nil
}
// List returns a list of spaces under the parent space.
func (s *SpaceStore) List(
ctx context.Context,
id int64,
opts *types.SpaceFilter,
) ([]*types.Space, error) {
if opts.Recursive {
return s.listRecursive(ctx, id, opts)
}
return s.list(ctx, id, opts)
}
func (s *SpaceStore) list(
ctx context.Context,
id int64,
opts *types.SpaceFilter,
) ([]*types.Space, error) {
stmt := database.Builder.
Select(spaceColumns).
From("spaces").
Where("space_parent_id = ?", fmt.Sprint(id))
stmt = s.applyQueryFilter(stmt, opts)
stmt = s.applySortFilter(stmt, opts)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var dst []*space
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return s.mapToSpaces(ctx, s.db, dst)
}
func (s *SpaceStore) listRecursive(
ctx context.Context,
id int64,
opts *types.SpaceFilter,
) ([]*types.Space, error) {
ctePrefix := `WITH RECURSIVE SpaceHierarchy AS (
SELECT *
FROM spaces
WHERE space_id = ?
UNION
SELECT s.*
FROM spaces s
JOIN SpaceHierarchy h ON s.space_parent_id = h.space_id
)`
db := dbtx.GetAccessor(ctx, s.db)
stmt := database.Builder.
Select(spaceColumns).
Prefix(ctePrefix, id).
From("SpaceHierarchy h1").
Where("h1.space_id <> ?", id)
stmt = s.applyQueryFilter(stmt, opts)
stmt = s.applySortFilter(stmt, opts)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
var dst []*space
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return s.mapToSpaces(ctx, s.db, dst)
}
func (s *SpaceStore) applyQueryFilter(
stmt squirrel.SelectBuilder,
opts *types.SpaceFilter,
) squirrel.SelectBuilder {
if opts.Query != "" {
stmt = stmt.Where(PartialMatch("space_uid", opts.Query))
}
//nolint:gocritic
if opts.DeletedAt != nil {
stmt = stmt.Where("space_deleted = ?", opts.DeletedAt)
} else if opts.DeletedBeforeOrAt != nil {
stmt = stmt.Where("space_deleted <= ?", opts.DeletedBeforeOrAt)
} else {
stmt = stmt.Where("space_deleted IS NULL")
}
return stmt
}
func getPathForDeletedSpace(
ctx context.Context,
sqlxdb *sqlx.DB,
id int64,
) (string, error) {
sqlQuery := spaceSelectBase + `
where space_id = $1`
path := ""
nextSpaceID := null.IntFrom(id)
db := dbtx.GetAccessor(ctx, sqlxdb)
dst := new(space)
for nextSpaceID.Valid {
err := db.GetContext(ctx, dst, sqlQuery, nextSpaceID.Int64)
if err != nil {
return "", fmt.Errorf("failed to find the space %d: %w", id, err)
}
path = paths.Concatenate(dst.Identifier, path)
nextSpaceID = dst.ParentID
}
return path, nil
}
func (s *SpaceStore) applySortFilter(
stmt squirrel.SelectBuilder,
opts *types.SpaceFilter,
) squirrel.SelectBuilder {
stmt = stmt.Limit(database.Limit(opts.Size))
stmt = stmt.Offset(database.Offset(opts.Page, opts.Size))
switch opts.Sort {
case enum.SpaceAttrUID, enum.SpaceAttrIdentifier, enum.SpaceAttrNone:
// NOTE: string concatenation is safe because the
// order attribute is an enum and is not user-defined,
// and is therefore not subject to injection attacks.
stmt = stmt.OrderBy("space_uid " + opts.Order.String())
//TODO: Postgres does not support COLLATE NOCASE for UTF8
// stmt = stmt.OrderBy("space_uid COLLATE NOCASE " + opts.Order.String())
case enum.SpaceAttrCreated:
stmt = stmt.OrderBy("space_created " + opts.Order.String())
case enum.SpaceAttrUpdated:
stmt = stmt.OrderBy("space_updated " + opts.Order.String())
case enum.SpaceAttrDeleted:
stmt = stmt.OrderBy("space_deleted " + opts.Order.String())
}
return stmt
}
func mapToSpace(
ctx context.Context,
sqlxdb *sqlx.DB,
spacePathStore store.SpacePathStore,
in *space,
) (*types.Space, error) {
var err error
res := &types.Space{
ID: in.ID,
Version: in.Version,
Identifier: in.Identifier,
Description: in.Description,
Created: in.Created,
CreatedBy: in.CreatedBy,
Updated: in.Updated,
Deleted: in.Deleted.Ptr(),
}
// Only overwrite ParentID if it's not a root space
if in.ParentID.Valid {
res.ParentID = in.ParentID.Int64
}
// backfill path
res.Path, err = getSpacePath(ctx, sqlxdb, spacePathStore, in.ID)
if err != nil {
return nil, fmt.Errorf("failed to get primary path for space %d: %w", in.ID, err)
}
return res, nil
}
func getSpacePath(
ctx context.Context,
sqlxdb *sqlx.DB,
spacePathStore store.SpacePathStore,
spaceID int64,
) (string, error) {
spacePath, err := spacePathStore.FindPrimaryBySpaceID(ctx, spaceID)
// delete space will delete paths; generate the path if space is soft deleted.
if errors.Is(err, gitness_store.ErrResourceNotFound) {
return getPathForDeletedSpace(ctx, sqlxdb, spaceID)
}
if err != nil {
return "", fmt.Errorf("failed to get primary path for space %d: %w", spaceID, err)
}
return spacePath.Value, nil
}
func (s *SpaceStore) mapToSpaces(
ctx context.Context,
sqlxdb *sqlx.DB,
spaces []*space,
) ([]*types.Space, error) {
var err error
res := make([]*types.Space, len(spaces))
for i := range spaces {
res[i], err = mapToSpace(ctx, sqlxdb, s.spacePathStore, spaces[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func mapToInternalSpace(s *types.Space) *space {
res := &space{
ID: s.ID,
Version: s.Version,
Identifier: s.Identifier,
Description: s.Description,
Created: s.Created,
CreatedBy: s.CreatedBy,
Updated: s.Updated,
Deleted: null.IntFromPtr(s.Deleted),
}
// Only overwrite ParentID if it's not a root space
// IMPORTANT: s.ParentID==0 has to be translated to nil as otherwise the foreign key fails
if s.ParentID > 0 {
res.ParentID = null.IntFrom(s.ParentID)
}
return res
}
// buildRecursiveSelectQueryUsingPath builds the recursive select query using path among active or soft deleted spaces.
func buildRecursiveSelectQueryUsingPath(segments []string, deletedAt int64) squirrel.SelectBuilder {
leaf := "s" + strconv.Itoa(len(segments)-1)
// add the current space (leaf)
stmt := database.Builder.
Select(leaf+".space_id").
From("spaces "+leaf).
Where(leaf+".space_uid = ? AND "+leaf+".space_deleted = ?", segments[len(segments)-1], deletedAt)
for i := len(segments) - 2; i >= 0; i-- {
parentAlias := "s" + strconv.Itoa(i)
alias := "s" + strconv.Itoa(i+1)
stmt = stmt.InnerJoin(fmt.Sprintf("spaces %s ON %s.space_id = %s.space_parent_id", parentAlias, parentAlias,
alias)).
Where(parentAlias+".space_uid = ?", segments[i])
}
// add parent check for root
stmt = stmt.Where("s0.space_parent_id IS NULL")
return stmt
}
// buildRecursiveSelectQueryUsingCaseInsensitivePath builds the recursive select query using path among active or soft
// deleted spaces.
func buildRecursiveSelectQueryUsingCaseInsensitivePath(segments []string) squirrel.SelectBuilder {
leaf := "s" + strconv.Itoa(len(segments)-1)
// add the current space (leaf)
stmt := database.Builder.
Select(leaf+".space_id").
From("spaces "+leaf).
Where("LOWER("+leaf+".space_uid) = LOWER(?)", segments[len(segments)-1])
for i := len(segments) - 2; i >= 0; i-- {
parentAlias := "s" + strconv.Itoa(i)
alias := "s" + strconv.Itoa(i+1)
stmt = stmt.InnerJoin(fmt.Sprintf("spaces %s ON %s.space_id = %s.space_parent_id", parentAlias, parentAlias,
alias)).
Where(parentAlias+".space_uid = ?", segments[i])
}
// add parent check for root
stmt = stmt.Where("s0.space_parent_id IS NULL")
return stmt
}
func (s *SpaceStore) GetRootSpacesSize(ctx context.Context) ([]types.SpaceStorage, error) {
const query = `
WITH RECURSIVE SpaceHierarchy AS (
SELECT space_id, space_id AS root_space_id, space_uid AS root_space_uid
FROM spaces
WHERE space_parent_id IS NULL
UNION ALL
SELECT s.space_id, sh.root_space_id, sh.root_space_uid
FROM spaces s
JOIN SpaceHierarchy sh ON s.space_parent_id = sh.space_id
)
SELECT
sh.root_space_id,
sh.root_space_uid,
COALESCE(SUM(r.repo_size), 0) AS total_repository_size,
COALESCE(SUM(r.repo_lfs_size), 0) AS total_lfs_size
FROM SpaceHierarchy sh
LEFT JOIN repositories r ON r.repo_parent_id = sh.space_id
WHERE repo_deleted IS NULL
GROUP BY sh.root_space_id, sh.root_space_uid
`
db := dbtx.GetAccessor(ctx, s.db)
rows, err := db.QueryContext(ctx, query)
if err != nil {
return nil, fmt.Errorf("failed to get root spaces storage size: %w", err)
}
defer rows.Close()
spaces := make([]types.SpaceStorage, 0, 32)
for rows.Next() {
var space types.SpaceStorage
if err := rows.Scan(
&space.ID,
&space.Identifier,
&space.Size,
&space.LFSSize,
); err != nil {
return nil, fmt.Errorf("failed to scan root space storage: %w", err)
}
spaces = append(spaces, space)
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("failed to scan rows for root spaces storage size: %w", err)
}
return spaces, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/principal_service_account.go | app/store/database/principal_service_account.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/rs/zerolog/log"
)
// serviceAccount is a DB representation of a service account principal.
// It is required to allow storing transformed UIDs used for uniquness constraints and searching.
type serviceAccount struct {
types.ServiceAccount
UIDUnique string `db:"principal_uid_unique"`
}
const serviceAccountColumns = principalCommonColumns + `
,principal_sa_parent_type
,principal_sa_parent_id`
const serviceAccountSelectBase = `
SELECT` + serviceAccountColumns + `
FROM principals`
// FindServiceAccount finds the service account by id.
func (s *PrincipalStore) FindServiceAccount(ctx context.Context, id int64) (*types.ServiceAccount, error) {
const sqlQuery = serviceAccountSelectBase + `
WHERE principal_type = 'serviceaccount' AND principal_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(serviceAccount)
if err := db.GetContext(ctx, dst, sqlQuery, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select by id query failed")
}
return s.mapDBServiceAccount(dst), nil
}
// FindServiceAccountByUID finds the service account by uid.
func (s *PrincipalStore) FindServiceAccountByUID(ctx context.Context, uid string) (*types.ServiceAccount, error) {
const sqlQuery = serviceAccountSelectBase + `
WHERE principal_type = 'serviceaccount' AND principal_uid_unique = $1`
// map the UID to unique UID before searching!
uidUnique, err := s.uidTransformation(uid)
if err != nil {
// in case we fail to transform, return a not found (as it can't exist in the first place)
log.Ctx(ctx).Debug().Msgf("failed to transform uid '%s': %s", uid, err.Error())
return nil, gitness_store.ErrResourceNotFound
}
db := dbtx.GetAccessor(ctx, s.db)
dst := new(serviceAccount)
if err = db.GetContext(ctx, dst, sqlQuery, uidUnique); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select by uid query failed")
}
return s.mapDBServiceAccount(dst), nil
}
func (s *PrincipalStore) FindManyServiceAccountByUID(
ctx context.Context,
uids []string,
) ([]*types.ServiceAccount, error) {
uniqueUIDs := make([]string, len(uids))
var err error
for i, uid := range uids {
uniqueUIDs[i], err = s.uidTransformation(uid)
if err != nil {
log.Ctx(ctx).Debug().Msgf("failed to transform uid '%s': %s", uid, err.Error())
return nil, gitness_store.ErrResourceNotFound
}
}
stmt := database.Builder.
Select(serviceAccountColumns).
From("principals").
Where("principal_type = ?", enum.PrincipalTypeServiceAccount).
Where(squirrel.Eq{"principal_uid_unique": uniqueUIDs})
db := dbtx.GetAccessor(ctx, s.db)
sqlQuery, params, err := stmt.ToSql()
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to generate find many service accounts query")
}
dst := []*serviceAccount{}
if err := db.SelectContext(ctx, &dst, sqlQuery, params...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "find many service accounts failed")
}
return s.mapDBServiceAccounts(dst), nil
}
// CreateServiceAccount saves the service account.
func (s *PrincipalStore) CreateServiceAccount(ctx context.Context, sa *types.ServiceAccount) error {
const sqlQuery = `
INSERT INTO principals (
principal_type
,principal_uid
,principal_uid_unique
,principal_email
,principal_display_name
,principal_admin
,principal_blocked
,principal_salt
,principal_created
,principal_updated
,principal_sa_parent_type
,principal_sa_parent_id
) values (
'serviceaccount'
,:principal_uid
,:principal_uid_unique
,:principal_email
,:principal_display_name
,false
,:principal_blocked
,:principal_salt
,:principal_created
,:principal_updated
,:principal_sa_parent_type
,:principal_sa_parent_id
) RETURNING principal_id`
dbSA, err := s.mapToDBserviceAccount(sa)
if err != nil {
return fmt.Errorf("failed to map db service account: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, dbSA)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind service account object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&sa.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert query failed")
}
return nil
}
// UpdateServiceAccount updates the service account details.
func (s *PrincipalStore) UpdateServiceAccount(ctx context.Context, sa *types.ServiceAccount) error {
const sqlQuery = `
UPDATE principals
SET
principal_uid = :principal_uid
,principal_uid_unique = :principal_uid_unique
,principal_email = :principal_email
,principal_display_name = :principal_display_name
,principal_blocked = :principal_blocked
,principal_salt = :principal_salt
,principal_updated = :principal_updated
WHERE principal_type = 'serviceaccount' AND principal_id = :principal_id`
dbSA, err := s.mapToDBserviceAccount(sa)
if err != nil {
return fmt.Errorf("failed to map db service account: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, dbSA)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind service account object")
}
if _, err = db.ExecContext(ctx, query, arg...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Update query failed")
}
return err
}
// DeleteServiceAccount deletes the service account.
func (s *PrincipalStore) DeleteServiceAccount(ctx context.Context, id int64) error {
const sqlQuery = `
DELETE FROM principals
WHERE principal_type = 'serviceaccount' AND principal_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sqlQuery, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "The delete query failed")
}
return nil
}
// ListServiceAccounts returns a list of service accounts for a specific parent.
func (s *PrincipalStore) ListServiceAccounts(
ctx context.Context,
parentInfos []*types.ServiceAccountParentInfo,
opts *types.PrincipalFilter,
) ([]*types.ServiceAccount, error) {
stmt := database.Builder.
Select(serviceAccountColumns).
From("principals").
Where("principal_type = ?", enum.PrincipalTypeServiceAccount)
stmt, err := selectServiceAccountParents(parentInfos, stmt)
if err != nil {
return nil, fmt.Errorf("failed to select service account parents: %w", err)
}
stmt = stmt.Limit(database.Limit(opts.Size))
stmt = stmt.Offset(database.Offset(opts.Page, opts.Size))
if opts.Query != "" {
stmt = stmt.Where(PartialMatch("principal_display_name", opts.Query))
}
sqlQuery, params, err := stmt.ToSql()
if err != nil {
return nil, database.ProcessSQLErrorf(
ctx, err, "failed to generate list service accounts query",
)
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*serviceAccount{}
if err := db.SelectContext(ctx, &dst, sqlQuery, params...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing default list query")
}
return s.mapDBServiceAccounts(dst), nil
}
// CountServiceAccounts returns a count of service accounts for a specific parent.
func (s *PrincipalStore) CountServiceAccounts(
ctx context.Context,
parentInfos []*types.ServiceAccountParentInfo,
opts *types.PrincipalFilter,
) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("principals").
Where("principal_type = ?", enum.PrincipalTypeServiceAccount)
if opts.Query != "" {
stmt = stmt.Where(PartialMatch("principal_display_name", opts.Query))
}
stmt, err := selectServiceAccountParents(parentInfos, stmt)
if err != nil {
return 0, fmt.Errorf("failed to select service account parents: %w", err)
}
sqlQuery, params, err := stmt.ToSql()
if err != nil {
return 0, database.ProcessSQLErrorf(
ctx, err, "failed to generate count service accounts query",
)
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
if err = db.QueryRowContext(ctx, sqlQuery, params...).Scan(&count); err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
func (s *PrincipalStore) mapDBServiceAccount(dbSA *serviceAccount) *types.ServiceAccount {
return &dbSA.ServiceAccount
}
func (s *PrincipalStore) mapDBServiceAccounts(dbSAs []*serviceAccount) []*types.ServiceAccount {
res := make([]*types.ServiceAccount, len(dbSAs))
for i := range dbSAs {
res[i] = s.mapDBServiceAccount(dbSAs[i])
}
return res
}
func (s *PrincipalStore) mapToDBserviceAccount(sa *types.ServiceAccount) (*serviceAccount, error) {
// service account comes from outside.
if sa == nil {
return nil, fmt.Errorf("service account is nil")
}
uidUnique, err := s.uidTransformation(sa.UID)
if err != nil {
return nil, fmt.Errorf("failed to transform service account UID: %w", err)
}
dbSA := &serviceAccount{
ServiceAccount: *sa,
UIDUnique: uidUnique,
}
return dbSA, nil
}
func selectServiceAccountParents(
parents []*types.ServiceAccountParentInfo,
stmt squirrel.SelectBuilder,
) (squirrel.SelectBuilder, error) {
var typeSelector squirrel.Or
for _, parent := range parents {
switch parent.Type {
case enum.ParentResourceTypeRepo:
typeSelector = append(typeSelector, squirrel.Eq{
"principal_sa_parent_type": enum.ParentResourceTypeRepo,
"principal_sa_parent_id": parent.ID,
})
case enum.ParentResourceTypeSpace:
typeSelector = append(typeSelector, squirrel.Eq{
"principal_sa_parent_type": enum.ParentResourceTypeSpace,
"principal_sa_parent_id": parent.ID,
})
default:
return squirrel.SelectBuilder{}, fmt.Errorf("service account parent type '%s' is not supported", parent.Type)
}
}
return stmt.Where(typeSelector), nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/favorite.go | app/store/database/favorite.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
)
var _ store.FavoriteStore = (*FavoriteStore)(nil)
// NewFavoriteStore returns a new FavoriteStore.
func NewFavoriteStore(db *sqlx.DB) *FavoriteStore {
return &FavoriteStore{db}
}
// FavoriteStore implements a store backed by a relational database.
type FavoriteStore struct {
db *sqlx.DB
}
type favorite struct {
ResourceID int64 `db:"favorite_resource_id"`
PrincipalID int64 `db:"favorite_principal_id"`
Created int64 `db:"favorite_created"`
}
// Create marks the resource as favorite.
func (s *FavoriteStore) Create(ctx context.Context, principalID int64, in *types.FavoriteResource) error {
tableName, resourceColumnName, err := getTableAndColumnName(in.Type)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to fetch table and column name for favorite resource")
}
favoriteResourceInsert := fmt.Sprintf(favoriteInsert, tableName, resourceColumnName)
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(favoriteResourceInsert, favorite{
ResourceID: in.ID,
PrincipalID: principalID,
Created: time.Now().UnixMilli(),
})
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to bind favorite object")
}
if _, err := db.ExecContext(ctx, query, arg...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to insert in %s", tableName)
}
return nil
}
// Map returns a map for the given resourceIDs and checks if the entity has been marked favorite or not.
func (s *FavoriteStore) Map(
ctx context.Context,
principalID int64,
resourceType enum.ResourceType,
resourceIDs []int64,
) (map[int64]bool, error) {
tableName, resourceColumnName, err := getTableAndColumnName(resourceType)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to resolve table/column for resourceType %v", resourceType)
}
stmt := database.Builder.
Select(resourceColumnName).
From(tableName).
Where("favorite_principal_id = ?", principalID)
switch s.db.DriverName() {
case SqliteDriverName:
stmt = stmt.Where(squirrel.Eq{resourceColumnName: resourceIDs})
case PostgresDriverName:
query := fmt.Sprintf("%s = ANY(?)", resourceColumnName)
stmt = stmt.Where(query, pq.Array(resourceIDs))
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var foundIDs []int64
if err := db.SelectContext(ctx, &foundIDs, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(
ctx, err, "failed to fetch %s favorites for principal %d", resourceType, principalID)
}
result := make(map[int64]bool, len(resourceIDs))
for _, id := range foundIDs {
result[id] = true
}
return result, nil
}
// Delete unfavorites the resource.
func (s *FavoriteStore) Delete(ctx context.Context, principalID int64, in *types.FavoriteResource) error {
tableName, resourceColumnName, err := getTableAndColumnName(in.Type)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to fetch table and column name for favorite resource")
}
favoriteResourceDelete := fmt.Sprintf(
`DELETE FROM %s WHERE %s = $1 AND favorite_principal_id = $2`,
tableName,
resourceColumnName,
)
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, favoriteResourceDelete, in.ID, principalID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "delete query failed for %s", tableName)
}
return nil
}
func getTableAndColumnName(resourceType enum.ResourceType) (string, string, error) {
switch resourceType { // nolint:exhaustive
case enum.ResourceTypeRepo:
return "favorite_repos", "favorite_repo_id", nil
default:
return "", "", fmt.Errorf("resource type %v not onboarded to favorites", resourceType)
}
}
const favoriteInsert = `
INSERT INTO %s (
%s,
favorite_principal_id,
favorite_created
) VALUES (
:favorite_resource_id,
:favorite_principal_id,
:favorite_created
)
`
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/repo.go | app/store/database/repo.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/app/store"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
var _ store.RepoStore = (*RepoStore)(nil)
// NewRepoStore returns a new RepoStore.
func NewRepoStore(
db *sqlx.DB,
spacePathCache store.SpacePathCache,
spacePathStore store.SpacePathStore,
spaceStore store.SpaceStore,
) *RepoStore {
return &RepoStore{
db: db,
spacePathCache: spacePathCache,
spacePathStore: spacePathStore,
spaceStore: spaceStore,
}
}
// RepoStore implements a store.RepoStore backed by a relational database.
type RepoStore struct {
db *sqlx.DB
spacePathCache store.SpacePathCache
spacePathStore store.SpacePathStore
spaceStore store.SpaceStore
}
type repository struct {
// TODO: int64 ID doesn't match DB
ID int64 `db:"repo_id"`
Version int64 `db:"repo_version"`
ParentID int64 `db:"repo_parent_id"`
Identifier string `db:"repo_uid"`
Description string `db:"repo_description"`
CreatedBy int64 `db:"repo_created_by"`
Created int64 `db:"repo_created"`
Updated int64 `db:"repo_updated"`
Deleted null.Int `db:"repo_deleted"`
LastGITPush int64 `db:"repo_last_git_push"`
Size int64 `db:"repo_size"`
SizeLFS int64 `db:"repo_lfs_size"`
SizeUpdated int64 `db:"repo_size_updated"`
GitUID string `db:"repo_git_uid"`
DefaultBranch string `db:"repo_default_branch"`
ForkID int64 `db:"repo_fork_id"`
PullReqSeq int64 `db:"repo_pullreq_seq"`
NumForks int `db:"repo_num_forks"`
NumPulls int `db:"repo_num_pulls"`
NumClosedPulls int `db:"repo_num_closed_pulls"`
NumOpenPulls int `db:"repo_num_open_pulls"`
NumMergedPulls int `db:"repo_num_merged_pulls"`
State enum.RepoState `db:"repo_state"`
IsEmpty bool `db:"repo_is_empty"`
// default sqlite '[]' requires []byte, fails with json.RawMessage
Tags []byte `db:"repo_tags"`
Type null.String `db:"repo_type"`
}
const (
repoColumnsForJoin = `
repo_id
,repo_version
,repo_parent_id
,repo_uid
,repo_description
,repo_created_by
,repo_created
,repo_updated
,repo_deleted
,repo_last_git_push
,repo_size
,repo_lfs_size
,repo_size_updated
,repo_git_uid
,repo_default_branch
,repo_pullreq_seq
,repo_fork_id
,repo_num_forks
,repo_num_pulls
,repo_num_closed_pulls
,repo_num_open_pulls
,repo_num_merged_pulls
,repo_state
,repo_is_empty
,repo_tags
,repo_type`
)
// Find finds the repo by id.
func (s *RepoStore) Find(ctx context.Context, id int64) (*types.Repository, error) {
return s.FindDeleted(ctx, id, nil)
}
// FindDeleted finds a repo by id and deleted timestamp.
func (s *RepoStore) FindDeleted(ctx context.Context, id int64, deletedAt *int64) (*types.Repository, error) {
stmt := database.Builder.
Select(repoColumnsForJoin).
From("repositories").
Where("repo_id = ?", id)
if deletedAt != nil {
stmt = stmt.Where("repo_deleted = ?", *deletedAt)
} else {
stmt = stmt.Where("repo_deleted IS NULL")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := new(repository)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
if err = db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find repo")
}
return s.mapToRepo(ctx, dst)
}
// FindActiveByUID finds the repo by UID.
func (s *RepoStore) FindActiveByUID(
ctx context.Context,
parentID int64,
uid string,
) (*types.Repository, error) {
stmt := database.Builder.
Select(repoColumnsForJoin).
From("repositories").
Where("repo_parent_id = ?", parentID).
Where("LOWER(repo_uid) = LOWER(?)", uid).
Where("repo_deleted IS NULL")
db := dbtx.GetAccessor(ctx, s.db)
dst := new(repository)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
if err = db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find repo by UID")
}
return s.mapToRepo(ctx, dst)
}
// FindDeletedByUID finds the repo by UID.
func (s *RepoStore) FindDeletedByUID(
ctx context.Context,
parentID int64,
uid string,
deletedAt int64,
) (*types.Repository, error) {
stmt := database.Builder.
Select(repoColumnsForJoin).
From("repositories").
Where("repo_parent_id = ?", parentID).
Where("LOWER(repo_uid) = LOWER(?)", uid).
Where("repo_deleted = ?", deletedAt)
db := dbtx.GetAccessor(ctx, s.db)
dst := new(repository)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
if err = db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find repo by UID")
}
return s.mapToRepo(ctx, dst)
}
// Create creates a new repository.
func (s *RepoStore) Create(ctx context.Context, repo *types.Repository) error {
const sqlQuery = `
INSERT INTO repositories (
repo_version
,repo_parent_id
,repo_uid
,repo_description
,repo_created_by
,repo_created
,repo_updated
,repo_deleted
,repo_last_git_push
,repo_size
,repo_lfs_size
,repo_size_updated
,repo_git_uid
,repo_default_branch
,repo_fork_id
,repo_pullreq_seq
,repo_num_forks
,repo_num_pulls
,repo_num_closed_pulls
,repo_num_open_pulls
,repo_num_merged_pulls
,repo_state
,repo_is_empty
,repo_tags
,repo_type
) values (
:repo_version
,:repo_parent_id
,:repo_uid
,:repo_description
,:repo_created_by
,:repo_created
,:repo_updated
,:repo_deleted
,:repo_last_git_push
,:repo_size
,:repo_lfs_size
,:repo_size_updated
,:repo_git_uid
,:repo_default_branch
,:repo_fork_id
,:repo_pullreq_seq
,:repo_num_forks
,:repo_num_pulls
,:repo_num_closed_pulls
,:repo_num_open_pulls
,:repo_num_merged_pulls
,:repo_state
,:repo_is_empty
,:repo_tags
,:repo_type
) RETURNING repo_id`
db := dbtx.GetAccessor(ctx, s.db)
// insert repo first so we get id
query, arg, err := db.BindNamed(sqlQuery, mapToInternalRepo(repo))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind repo object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&repo.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert query failed")
}
repo.Path, err = s.getRepoPath(ctx, repo.ParentID, repo.Identifier)
if err != nil {
return err
}
return nil
}
// Update updates the repo details.
func (s *RepoStore) Update(ctx context.Context, repo *types.Repository) error {
const sqlQuery = `
UPDATE repositories
SET
repo_version = :repo_version
,repo_updated = :repo_updated
,repo_deleted = :repo_deleted
,repo_last_git_push = :repo_last_git_push
,repo_parent_id = :repo_parent_id
,repo_uid = :repo_uid
,repo_git_uid = :repo_git_uid
,repo_description = :repo_description
,repo_default_branch = :repo_default_branch
,repo_pullreq_seq = :repo_pullreq_seq
,repo_num_pulls = :repo_num_pulls
,repo_num_closed_pulls = :repo_num_closed_pulls
,repo_num_open_pulls = :repo_num_open_pulls
,repo_num_merged_pulls = :repo_num_merged_pulls
,repo_state = :repo_state
,repo_is_empty = :repo_is_empty
,repo_tags = :repo_tags
WHERE repo_id = :repo_id AND repo_version = :repo_version - 1`
db := dbtx.GetAccessor(ctx, s.db)
dbRepo := mapToInternalRepo(repo)
// update Version (used for optimistic locking) and Updated time
dbRepo.Version++
dbRepo.Updated = time.Now().UnixMilli()
query, arg, err := db.BindNamed(sqlQuery, dbRepo)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind repo object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update repository")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
repo.Version = dbRepo.Version
repo.Updated = dbRepo.Updated
// update path in case parent/identifier changed (its most likely cached anyway)
repo.Path, err = s.getRepoPath(ctx, repo.ParentID, repo.Identifier)
if err != nil {
return err
}
return nil
}
// UpdateSize updates the size of a specific repository in the database (size is in KiB).
func (s *RepoStore) UpdateSize(ctx context.Context, id int64, sizeInKiB, lfsSizeInKiB int64) error {
stmt := database.Builder.
Update("repositories").
Set("repo_size", sizeInKiB).
Set("repo_lfs_size", lfsSizeInKiB).
Set("repo_size_updated", time.Now().UnixMilli()).
Where("repo_id = ? AND repo_deleted IS NULL", id)
sqlQuery, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to create sql query")
}
db := dbtx.GetAccessor(ctx, s.db)
result, err := db.ExecContext(ctx, sqlQuery, args...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update repo size")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return fmt.Errorf("repo %d size not updated: %w", id, gitness_store.ErrResourceNotFound)
}
return nil
}
// GetSize returns the repo size.
func (s *RepoStore) GetSize(ctx context.Context, id int64) (int64, error) {
query := `
SELECT
repo_size
FROM repositories
WHERE
repo_id = $1 AND repo_deleted IS NULL
`
db := dbtx.GetAccessor(ctx, s.db)
var size int64
if err := db.QueryRowContext(ctx, query, id).Scan(&size); err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to get repo size")
}
return size, nil
}
// GetLFSSize returns the repo LFS size.
func (s *RepoStore) GetLFSSize(ctx context.Context, id int64) (int64, error) {
query := `
SELECT
repo_lfs_size
FROM repositories
WHERE
repo_id = $1 AND repo_deleted IS NULL
`
db := dbtx.GetAccessor(ctx, s.db)
var size int64
if err := db.QueryRowContext(ctx, query, id).Scan(&size); err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to get repo size")
}
return size, nil
}
// UpdateOptLock updates the active repository using the optimistic locking mechanism.
func (s *RepoStore) UpdateOptLock(
ctx context.Context,
repo *types.Repository,
mutateFn func(repository *types.Repository) error,
) (*types.Repository, error) {
return s.updateOptLock(
ctx,
repo,
func(r *types.Repository) error {
if repo.Deleted != nil {
return gitness_store.ErrResourceNotFound
}
return mutateFn(r)
},
)
}
// updateDeletedOptLock updates a deleted repository using the optimistic locking mechanism.
func (s *RepoStore) updateDeletedOptLock(ctx context.Context,
repo *types.Repository,
mutateFn func(repository *types.Repository) error,
) (*types.Repository, error) {
return s.updateOptLock(
ctx,
repo,
func(r *types.Repository) error {
if repo.Deleted == nil {
return gitness_store.ErrResourceNotFound
}
return mutateFn(r)
},
)
}
// updateOptLock updates the repository using the optimistic locking mechanism.
func (s *RepoStore) updateOptLock(
ctx context.Context,
repo *types.Repository,
mutateFn func(repository *types.Repository) error,
) (*types.Repository, error) {
for {
dup := *repo
err := mutateFn(&dup)
if err != nil {
return nil, err
}
err = s.Update(ctx, &dup)
if err == nil {
return &dup, nil
}
if !errors.Is(err, gitness_store.ErrVersionConflict) {
return nil, err
}
log.Ctx(ctx).Warn().
Int64("repo.id", repo.ID).
Err(err).
Msg("optimistic lock conflict ABOUT TO FIND DELETED")
repo, err = s.FindDeleted(ctx, repo.ID, repo.Deleted)
if err != nil {
return nil, err
}
}
}
// SoftDelete deletes a repo softly by setting the deleted timestamp.
func (s *RepoStore) SoftDelete(ctx context.Context, repo *types.Repository, deletedAt int64) error {
_, err := s.UpdateOptLock(ctx, repo, func(r *types.Repository) error {
r.Deleted = &deletedAt
return nil
})
if err != nil {
return fmt.Errorf("failed to soft delete repo: %w", err)
}
return nil
}
// Purge deletes the repo permanently.
func (s *RepoStore) Purge(ctx context.Context, id int64, deletedAt *int64) error {
stmt := database.Builder.
Delete("repositories").
Where("repo_id = ?", id)
if deletedAt != nil {
stmt = stmt.Where("repo_deleted = ?", *deletedAt)
} else {
stmt = stmt.Where("repo_deleted IS NULL")
}
sql, args, err := stmt.ToSql()
if err != nil {
return fmt.Errorf("failed to convert purge repo query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
_, err = db.ExecContext(ctx, sql, args...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "the delete query failed")
}
return nil
}
// Restore restores a deleted repo.
func (s *RepoStore) Restore(
ctx context.Context,
repo *types.Repository,
newIdentifier *string,
newParentID *int64,
) (*types.Repository, error) {
repo, err := s.updateDeletedOptLock(ctx, repo, func(r *types.Repository) error {
r.Deleted = nil
if newIdentifier != nil {
r.Identifier = *newIdentifier
}
if newParentID != nil {
r.ParentID = *newParentID
}
return nil
})
if err != nil {
return nil, err
}
return repo, nil
}
// Count of active repos in a space. if parentID (space) is zero then it will count all repositories in the system.
// Count deleted repos requires opts.DeletedBeforeOrAt filter.
func (s *RepoStore) Count(
ctx context.Context,
parentID int64,
filter *types.RepoFilter,
) (int64, error) {
if filter.Recursive {
return s.countAll(ctx, parentID, filter)
}
return s.count(ctx, parentID, filter)
}
func (s *RepoStore) count(
ctx context.Context,
parentID int64,
filter *types.RepoFilter,
) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("repositories")
if parentID > 0 {
stmt = stmt.Where("repo_parent_id = ?", parentID)
}
stmt = applyQueryFilter(stmt, filter, s.db.DriverName())
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
func (s *RepoStore) countAll(
ctx context.Context,
parentID int64,
filter *types.RepoFilter,
) (int64, error) {
db := dbtx.GetAccessor(ctx, s.db)
spaceIDs, err := getSpaceDescendantsIDs(ctx, db, parentID)
if err != nil {
return 0, fmt.Errorf(
"failed to get space descendants ids for %d: %w",
parentID, err,
)
}
stmt := database.Builder.
Select("COUNT(repo_id)").
From("repositories").
Where(squirrel.Eq{"repo_parent_id": spaceIDs})
stmt = applyQueryFilter(stmt, filter, s.db.DriverName())
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
var numRepos int64
if err := db.GetContext(ctx, &numRepos, sql, args...); err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to count repositories")
}
return numRepos, nil
}
// CountByRootSpaces counts total number of repositories grouped by root spaces.
func (s *RepoStore) CountByRootSpaces(
ctx context.Context,
) ([]types.RepositoryCount, error) {
query := `
WITH RECURSIVE
SpaceHierarchy(root_id, space_id, space_parent_id, space_uid) AS (
SELECT space_id, space_id, space_parent_id, space_uid
FROM spaces
WHERE space_parent_id is null
UNION
SELECT h.root_id, s.space_id, s.space_parent_id, h.space_uid
FROM spaces s
JOIN SpaceHierarchy h ON s.space_parent_id = h.space_id
)
SELECT
COUNT(r.repo_id) AS total,
s.root_id AS root_space_id,
s.space_uid
FROM repositories r
JOIN SpaceHierarchy s ON s.space_id = r.repo_parent_id
GROUP BY root_space_id, s.space_uid
`
db := dbtx.GetAccessor(ctx, s.db)
rows, err := db.QueryxContext(ctx, query)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to count repositories")
}
defer rows.Close()
var result []types.RepositoryCount
for rows.Next() {
var count types.RepositoryCount
if err = rows.Scan(&count.Total, &count.SpaceID, &count.SpaceUID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to scan row for count repositories query")
}
result = append(result, count)
}
if err = rows.Err(); err != nil {
return nil, err
}
return result, nil
}
// List returns a list of active repos in a space.
// With "DeletedBeforeOrAt" filter, lists deleted repos by opts.DeletedBeforeOrAt.
func (s *RepoStore) List(
ctx context.Context,
parentID int64,
filter *types.RepoFilter,
) ([]*types.Repository, error) {
if filter.Recursive {
return s.listRecursive(ctx, parentID, filter)
}
return s.list(ctx, parentID, filter)
}
func (s *RepoStore) list(
ctx context.Context,
parentID int64,
filter *types.RepoFilter,
) ([]*types.Repository, error) {
stmt := database.Builder.
Select(repoColumnsForJoin).
From("repositories").
Where("repo_parent_id = ?", fmt.Sprint(parentID))
stmt = applyQueryFilter(stmt, filter, s.db.DriverName())
stmt = applySortFilter(stmt, filter)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*repository{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return s.mapToRepos(ctx, dst)
}
func (s *RepoStore) listRecursive(
ctx context.Context,
parentID int64,
filter *types.RepoFilter,
) ([]*types.Repository, error) {
db := dbtx.GetAccessor(ctx, s.db)
spaceIDs, err := getSpaceDescendantsIDs(ctx, db, parentID)
if err != nil {
return nil, fmt.Errorf(
"failed to get space descendants ids for %d: %w",
parentID, err,
)
}
stmt := database.Builder.
Select(repoColumnsForJoin).
From("repositories").
Where(squirrel.Eq{"repo_parent_id": spaceIDs})
stmt = applyQueryFilter(stmt, filter, s.db.DriverName())
stmt = applySortFilter(stmt, filter)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
repos := []*repository{}
if err := db.SelectContext(ctx, &repos, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to count repositories")
}
return s.mapToRepos(ctx, repos)
}
type repoSize struct {
ID int64 `db:"repo_id"`
GitUID string `db:"repo_git_uid"`
Size int64 `db:"repo_size"`
LFSSize int64 `db:"repo_lfs_size"`
SizeUpdated int64 `db:"repo_size_updated"`
}
func (s *RepoStore) ListSizeInfos(ctx context.Context) ([]*types.RepositorySizeInfo, error) {
stmt := database.Builder.
Select("repo_id", "repo_git_uid", "repo_size", "repo_lfs_size", "repo_size_updated").
From("repositories").
Where("repo_last_git_push >= repo_size_updated").
Where("repo_deleted IS NULL").
Where("repo_state NOT IN (?, ?)",
enum.RepoStateGitImport,
enum.RepoStateMigrateGitPush,
)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*repoSize{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return s.mapToRepoSizes(dst), nil
}
// ListAll returns a list of all repos across spaces with the provided filters.
func (s *RepoStore) ListAll(
ctx context.Context,
filter *types.RepoFilter,
) ([]*types.Repository, error) {
stmt := database.Builder.
Select(repoColumnsForJoin).
From("repositories")
stmt = applyQueryFilter(stmt, filter, s.db.DriverName())
stmt = applySortFilter(stmt, filter)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*repository{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed executing custom list query")
}
return s.mapToRepos(ctx, dst)
}
func (s *RepoStore) UpdateNumForks(ctx context.Context, repoID int64, delta int64) error {
query := "UPDATE repositories SET repo_num_forks = repo_num_forks + $1 WHERE repo_id = $2"
if _, err := dbtx.GetAccessor(ctx, s.db).ExecContext(ctx, query, delta, repoID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed updating number of forks")
}
return nil
}
func (s *RepoStore) ClearForkID(ctx context.Context, repoUpstreamID int64) error {
stmt := database.Builder.Update("repositories").
Set("repo_fork_id", nil).
Where("repo_fork_id = ?", repoUpstreamID)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
_, err = db.ExecContext(ctx, sql, args...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to clear fork ID")
}
return nil
}
func (s *RepoStore) UpdateParent(ctx context.Context, currentParentID, newParentID int64) (int64, error) {
stmt := database.Builder.Update("repositories").
Set("repo_parent_id", newParentID).
Set("repo_updated", time.Now().UnixMilli()).
Where("repo_parent_id = ?", currentParentID)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
result, err := db.ExecContext(ctx, sql, args...)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to update parent ID for repos")
}
rows, err := result.RowsAffected()
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to get number of updated rows")
}
return rows, nil
}
func (s *RepoStore) mapToRepo(
ctx context.Context,
in *repository,
) (*types.Repository, error) {
var err error
t := enum.RepoTypeNormal
if in.Type.Valid {
t = enum.RepoType(in.Type.String)
}
res := &types.Repository{
ID: in.ID,
Version: in.Version,
ParentID: in.ParentID,
Identifier: in.Identifier,
Description: in.Description,
Created: in.Created,
CreatedBy: in.CreatedBy,
Updated: in.Updated,
Deleted: in.Deleted.Ptr(),
LastGITPush: in.LastGITPush,
Size: in.Size,
LFSSize: in.SizeLFS,
SizeUpdated: in.SizeUpdated,
GitUID: in.GitUID,
DefaultBranch: in.DefaultBranch,
ForkID: in.ForkID,
PullReqSeq: in.PullReqSeq,
NumForks: in.NumForks,
NumPulls: in.NumPulls,
NumClosedPulls: in.NumClosedPulls,
NumOpenPulls: in.NumOpenPulls,
NumMergedPulls: in.NumMergedPulls,
State: in.State,
IsEmpty: in.IsEmpty,
Tags: in.Tags,
Type: t,
// Path: is set below
}
res.Path, err = s.getRepoPath(ctx, in.ParentID, in.Identifier)
if err != nil {
return nil, err
}
return res, nil
}
func (s *RepoStore) getRepoPath(ctx context.Context, parentID int64, repoIdentifier string) (string, error) {
spacePath, err := s.spacePathStore.FindPrimaryBySpaceID(ctx, parentID)
// try to re-create the space path if it was soft deleted.
if errors.Is(err, gitness_store.ErrResourceNotFound) {
sPath, err := getPathForDeletedSpace(ctx, s.db, parentID)
if err != nil {
return "", fmt.Errorf("failed to get primary path of soft deleted space %d: %w", parentID, err)
}
return paths.Concatenate(sPath, repoIdentifier), nil
}
if err != nil {
return "", fmt.Errorf("failed to get primary path for space %d: %w", parentID, err)
}
return paths.Concatenate(spacePath.Value, repoIdentifier), nil
}
func (s *RepoStore) mapToRepos(
ctx context.Context,
repos []*repository,
) ([]*types.Repository, error) {
var err error
res := make([]*types.Repository, len(repos))
for i := range repos {
res[i], err = s.mapToRepo(ctx, repos[i])
if err != nil {
return nil, err
}
}
return res, nil
}
func (s *RepoStore) mapToRepoSize(
in *repoSize,
) *types.RepositorySizeInfo {
return &types.RepositorySizeInfo{
ID: in.ID,
GitUID: in.GitUID,
Size: in.Size,
SizeUpdated: in.SizeUpdated,
}
}
func (s *RepoStore) mapToRepoSizes(
repoSizes []*repoSize,
) []*types.RepositorySizeInfo {
res := make([]*types.RepositorySizeInfo, len(repoSizes))
for i := range repoSizes {
res[i] = s.mapToRepoSize(repoSizes[i])
}
return res
}
func mapToInternalRepo(in *types.Repository) *repository {
return &repository{
ID: in.ID,
Version: in.Version,
ParentID: in.ParentID,
Identifier: in.Identifier,
Description: in.Description,
Created: in.Created,
CreatedBy: in.CreatedBy,
Updated: in.Updated,
Deleted: null.IntFromPtr(in.Deleted),
LastGITPush: in.LastGITPush,
Size: in.Size,
SizeUpdated: in.SizeUpdated,
GitUID: in.GitUID,
DefaultBranch: in.DefaultBranch,
ForkID: in.ForkID,
PullReqSeq: in.PullReqSeq,
NumForks: in.NumForks,
NumPulls: in.NumPulls,
NumClosedPulls: in.NumClosedPulls,
NumOpenPulls: in.NumOpenPulls,
NumMergedPulls: in.NumMergedPulls,
State: in.State,
IsEmpty: in.IsEmpty,
Tags: in.Tags,
Type: null.NewString(string(in.Type), in.Type != ""),
}
}
func applyQueryFilter(
stmt squirrel.SelectBuilder,
filter *types.RepoFilter,
driverName string,
) squirrel.SelectBuilder {
if len(filter.Identifiers) > 0 {
identifiers := make([]string, len(filter.Identifiers))
for i, id := range filter.Identifiers {
identifiers[i] = strings.ToLower(id)
}
stmt = stmt.Where(squirrel.Eq{"LOWER(repo_uid)": identifiers})
}
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("repo_uid", filter.Query))
}
//nolint:gocritic
if filter.DeletedAt != nil {
stmt = stmt.Where("repo_deleted = ?", filter.DeletedAt)
} else if filter.DeletedBeforeOrAt != nil {
stmt = stmt.Where("repo_deleted <= ?", filter.DeletedBeforeOrAt)
} else {
stmt = stmt.Where("repo_deleted IS NULL")
}
if filter.OnlyFavoritesFor != nil {
stmt = stmt.
InnerJoin("favorite_repos ON favorite_repos.favorite_repo_id = repositories.repo_id").
Where("favorite_repos.favorite_principal_id = ?", *filter.OnlyFavoritesFor)
}
return applyTagsFilter(stmt, filter, driverName)
}
func applyTagsFilter(
stmt squirrel.SelectBuilder,
filter *types.RepoFilter,
driverName string,
) squirrel.SelectBuilder {
if len(filter.Tags) == 0 {
return stmt
}
ors := squirrel.Or{}
if driverName == PostgresDriverName {
for k, vs := range filter.Tags {
// key-only filter
if len(vs) == 0 {
ors = append(ors, squirrel.Expr("repo_tags ?? ?", k))
continue
}
// key-value filter
for _, v := range vs {
data, _ := json.Marshal(map[string]string{k: v})
ors = append(
ors,
squirrel.Expr("repo_tags @> ?::jsonb", string(data)),
)
}
}
if len(ors) > 0 {
stmt = stmt.Where(ors)
}
return stmt
}
for k, vs := range filter.Tags {
// key-only filter
if len(vs) == 0 {
ors = append(ors,
squirrel.Expr("EXISTS (SELECT 1 FROM json_each(repo_tags) WHERE json_each.key = ?)", k),
)
continue
}
// key-value filters
for _, v := range vs {
if k == "" {
// special case: empty key
ors = append(ors,
squirrel.Expr(
"EXISTS (SELECT 1 FROM json_each(repo_tags) WHERE json_each.key = '' AND json_each.value = ?)",
v,
),
)
} else {
ors = append(ors,
squirrel.Expr("json_extract(repo_tags, '$.' || ?) = ?", k, v),
)
}
}
}
if len(ors) > 0 {
stmt = stmt.Where(ors)
}
return stmt
}
func applySortFilter(stmt squirrel.SelectBuilder, filter *types.RepoFilter) squirrel.SelectBuilder {
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
switch filter.Sort {
// TODO [CODE-1363]: remove after identifier migration.
case enum.RepoAttrUID, enum.RepoAttrIdentifier, enum.RepoAttrNone:
// NOTE: string concatenation is safe because the
// order attribute is an enum and is not user-defined,
// and is therefore not subject to injection attacks.
stmt = stmt.OrderBy("LOWER(repo_uid) " + filter.Order.String())
case enum.RepoAttrCreated:
stmt = stmt.OrderBy("repo_created " + filter.Order.String())
case enum.RepoAttrUpdated:
stmt = stmt.OrderBy("repo_updated " + filter.Order.String())
case enum.RepoAttrDeleted:
stmt = stmt.OrderBy("repo_deleted " + filter.Order.String())
case enum.RepoAttrLastGITPush:
stmt = stmt.OrderBy("repo_last_git_push " + filter.Order.String())
}
return stmt
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/secret.go | app/store/database/secret.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
var _ store.SecretStore = (*secretStore)(nil)
const (
secretQueryBase = `
SELECT` + secretColumns + `
FROM secrets`
//nolint:gosec // wrong flagging
secretColumns = `
secret_id,
secret_description,
secret_space_id,
secret_created_by,
secret_uid,
secret_data,
secret_created,
secret_updated,
secret_version
`
)
// NewSecretStore returns a new SecretStore.
func NewSecretStore(db *sqlx.DB) store.SecretStore {
return &secretStore{
db: db,
}
}
type secretStore struct {
db *sqlx.DB
}
// Find returns a secret given a secret ID.
func (s *secretStore) Find(ctx context.Context, id int64) (*types.Secret, error) {
const findQueryStmt = secretQueryBase + `
WHERE secret_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(types.Secret)
if err := db.GetContext(ctx, dst, findQueryStmt, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find secret")
}
return dst, nil
}
// FindByIdentifier returns a secret in a given space with a given identifier.
func (s *secretStore) FindByIdentifier(ctx context.Context, spaceID int64, identifier string) (*types.Secret, error) {
const findQueryStmt = secretQueryBase + `
WHERE secret_space_id = $1 AND secret_uid = $2`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(types.Secret)
if err := db.GetContext(ctx, dst, findQueryStmt, spaceID, identifier); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find secret")
}
return dst, nil
}
// Create creates a secret.
func (s *secretStore) Create(ctx context.Context, secret *types.Secret) error {
//nolint:gosec // wrong flagging
const secretInsertStmt = `
INSERT INTO secrets (
secret_description,
secret_space_id,
secret_created_by,
secret_uid,
secret_data,
secret_created,
secret_updated,
secret_version
) VALUES (
:secret_description,
:secret_space_id,
:secret_created_by,
:secret_uid,
:secret_data,
:secret_created,
:secret_updated,
:secret_version
) RETURNING secret_id`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(secretInsertStmt, secret)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind secret object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&secret.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "secret query failed")
}
return nil
}
func (s *secretStore) Update(ctx context.Context, p *types.Secret) error {
const secretUpdateStmt = `
UPDATE secrets
SET
secret_description = :secret_description,
secret_uid = :secret_uid,
secret_data = :secret_data,
secret_updated = :secret_updated,
secret_version = :secret_version
WHERE secret_id = :secret_id AND secret_version = :secret_version - 1`
updatedAt := time.Now()
secret := *p
secret.Version++
secret.Updated = updatedAt.UnixMilli()
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(secretUpdateStmt, secret)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind secret object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update secret")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
p.Version = secret.Version
p.Updated = secret.Updated
return nil
}
// UpdateOptLock updates the pipeline using the optimistic locking mechanism.
func (s *secretStore) UpdateOptLock(ctx context.Context,
secret *types.Secret,
mutateFn func(secret *types.Secret) error,
) (*types.Secret, error) {
for {
dup := *secret
err := mutateFn(&dup)
if err != nil {
return nil, err
}
err = s.Update(ctx, &dup)
if err == nil {
return &dup, nil
}
if !errors.Is(err, gitness_store.ErrVersionConflict) {
return nil, err
}
secret, err = s.Find(ctx, secret.ID)
if err != nil {
return nil, err
}
}
}
// List lists all the secrets present in a space.
func (s *secretStore) List(ctx context.Context, parentID int64, filter types.ListQueryFilter) ([]*types.Secret, error) {
stmt := database.Builder.
Select(secretColumns).
From("secrets").
Where("secret_space_id = ?", fmt.Sprint(parentID))
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("secret_uid", filter.Query))
}
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*types.Secret{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return dst, nil
}
// ListAll lists all the secrets present in a space.
func (s *secretStore) ListAll(ctx context.Context, parentID int64) ([]*types.Secret, error) {
stmt := database.Builder.
Select(secretColumns).
From("secrets").
Where("secret_space_id = ?", fmt.Sprint(parentID))
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*types.Secret{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return dst, nil
}
// Delete deletes a secret given a secret ID.
func (s *secretStore) Delete(ctx context.Context, id int64) error {
//nolint:gosec // wrong flagging
const secretDeleteStmt = `
DELETE FROM secrets
WHERE secret_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, secretDeleteStmt, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Could not delete secret")
}
return nil
}
// DeleteByIdentifier deletes a secret with a given identifier in a space.
func (s *secretStore) DeleteByIdentifier(ctx context.Context, spaceID int64, identifier string) error {
//nolint:gosec // wrong flagging
const secretDeleteStmt = `
DELETE FROM secrets
WHERE secret_space_id = $1 AND secret_uid = $2`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, secretDeleteStmt, spaceID, identifier); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Could not delete secret")
}
return nil
}
// Count of secrets in a space.
func (s *secretStore) Count(ctx context.Context, parentID int64, filter types.ListQueryFilter) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("secrets").
Where("secret_space_id = ?", parentID)
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("secret_uid", filter.Query))
}
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/principal_service.go | app/store/database/principal_service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/rs/zerolog/log"
)
// service is a DB representation of a service principal.
// It is required to allow storing transformed UIDs used for uniquness constraints and searching.
type service struct {
types.Service
UIDUnique string `db:"principal_uid_unique"`
}
// service doesn't have any extra columns.
const serviceColumns = principalCommonColumns
const serviceSelectBase = `
SELECT` + serviceColumns + `
FROM principals`
// FindService finds the service by id.
func (s *PrincipalStore) FindService(ctx context.Context, id int64) (*types.Service, error) {
const sqlQuery = serviceSelectBase + `
WHERE principal_type = 'service' AND principal_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(service)
if err := db.GetContext(ctx, dst, sqlQuery, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select by id query failed")
}
return s.mapDBService(dst), nil
}
// FindServiceByUID finds the service by uid.
func (s *PrincipalStore) FindServiceByUID(ctx context.Context, uid string) (*types.Service, error) {
const sqlQuery = serviceSelectBase + `
WHERE principal_type = 'service' AND principal_uid_unique = $1`
// map the UID to unique UID before searching!
uidUnique, err := s.uidTransformation(uid)
if err != nil {
// in case we fail to transform, return a not found (as it can't exist in the first place)
log.Ctx(ctx).Debug().Msgf("failed to transform uid '%s': %s", uid, err.Error())
return nil, gitness_store.ErrResourceNotFound
}
db := dbtx.GetAccessor(ctx, s.db)
dst := new(service)
if err = db.GetContext(ctx, dst, sqlQuery, uidUnique); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Select by uid query failed")
}
return s.mapDBService(dst), nil
}
// CreateService saves the service.
func (s *PrincipalStore) CreateService(ctx context.Context, svc *types.Service) error {
const sqlQuery = `
INSERT INTO principals (
principal_type
,principal_uid
,principal_uid_unique
,principal_email
,principal_display_name
,principal_admin
,principal_blocked
,principal_salt
,principal_created
,principal_updated
) values (
'service'
,:principal_uid
,:principal_uid_unique
,:principal_email
,:principal_display_name
,:principal_admin
,:principal_blocked
,:principal_salt
,:principal_created
,:principal_updated
) RETURNING principal_id`
dbSVC, err := s.mapToDBservice(svc)
if err != nil {
return fmt.Errorf("failed to map db service: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, dbSVC)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind service object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&svc.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert query failed")
}
return nil
}
// UpdateService updates the service.
func (s *PrincipalStore) UpdateService(ctx context.Context, svc *types.Service) error {
const sqlQuery = `
UPDATE principals
SET
principal_uid = :principal_uid
,principal_uid_unique = :principal_uid_unique
,principal_email = :principal_email
,principal_display_name = :principal_display_name
,principal_admin = :principal_admin
,principal_blocked = :principal_blocked
,principal_updated = :principal_updated
WHERE principal_type = 'service' AND principal_id = :principal_id`
dbSVC, err := s.mapToDBservice(svc)
if err != nil {
return fmt.Errorf("failed to map db service: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, dbSVC)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind service object")
}
if _, err = db.ExecContext(ctx, query, arg...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Update query failed")
}
return err
}
// DeleteService deletes the service.
func (s *PrincipalStore) DeleteService(ctx context.Context, id int64) error {
const sqlQuery = `
DELETE FROM principals
WHERE principal_type = 'service' AND principal_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
// delete the service
if _, err := db.ExecContext(ctx, sqlQuery, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "The delete query failed")
}
return nil
}
// ListServices returns a list of service for a specific parent.
func (s *PrincipalStore) ListServices(ctx context.Context) ([]*types.Service, error) {
const sqlQuery = serviceSelectBase + `
WHERE principal_type = 'service'
ORDER BY principal_uid ASC`
db := dbtx.GetAccessor(ctx, s.db)
dst := []*service{}
err := db.SelectContext(ctx, &dst, sqlQuery)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing default list query")
}
return s.mapDBServices(dst), nil
}
// CountServices returns a count of service for a specific parent.
func (s *PrincipalStore) CountServices(ctx context.Context) (int64, error) {
const sqlQuery = `
SELECT count(*)
FROM principals
WHERE principal_type = 'service'`
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err := db.QueryRowContext(ctx, sqlQuery).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
func (s *PrincipalStore) mapDBService(dbSvc *service) *types.Service {
return &dbSvc.Service
}
func (s *PrincipalStore) mapDBServices(dbSVCs []*service) []*types.Service {
res := make([]*types.Service, len(dbSVCs))
for i := range dbSVCs {
res[i] = s.mapDBService(dbSVCs[i])
}
return res
}
func (s *PrincipalStore) mapToDBservice(svc *types.Service) (*service, error) {
// service comes from outside.
if svc == nil {
return nil, fmt.Errorf("service is nil")
}
uidUnique, err := s.uidTransformation(svc.UID)
if err != nil {
return nil, fmt.Errorf("failed to transform service UID: %w", err)
}
dbService := &service{
Service: *svc,
UIDUnique: uidUnique,
}
return dbService, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/ai_task.go | app/store/database/ai_task.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"strings"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
const (
aiTaskTable = `ai_tasks`
aiTaskInsertColumns = `
aitask_uid,
aitask_gitspace_config_id,
aitask_gitspace_instance_id,
aitask_initial_prompt,
aitask_display_name,
aitask_user_uid,
aitask_space_id,
aitask_created,
aitask_updated,
aitask_api_url,
aitask_ai_agent,
aitask_state,
aitask_output,
aitask_output_metadata,
aitask_error_message`
aiTaskSelectColumns = "aitask_id," + aiTaskInsertColumns
)
type aiTask struct {
ID int64 `db:"aitask_id"`
Identifier string `db:"aitask_uid"`
GitspaceConfigID int64 `db:"aitask_gitspace_config_id"`
GitspaceInstanceID int64 `db:"aitask_gitspace_instance_id"`
InitialPrompt string `db:"aitask_initial_prompt"`
DisplayName string `db:"aitask_display_name"`
UserUID string `db:"aitask_user_uid"`
SpaceID int64 `db:"aitask_space_id"`
Created int64 `db:"aitask_created"`
Updated int64 `db:"aitask_updated"`
APIURL null.String `db:"aitask_api_url"`
AgentType enum.AIAgent `db:"aitask_ai_agent"`
State enum.AITaskState `db:"aitask_state"`
Output null.String `db:"aitask_output"`
OutputMetadata []byte `db:"aitask_output_metadata"`
ErrorMessage null.String `db:"aitask_error_message"`
}
var _ store.AITaskStore = (*aiTaskStore)(nil)
// NewAITaskStore returns a new AITaskStore.
func NewAITaskStore(db *sqlx.DB) store.AITaskStore {
return &aiTaskStore{
db: db,
}
}
type aiTaskStore struct {
db *sqlx.DB
}
func (s aiTaskStore) Create(ctx context.Context, aiTask *types.AITask) error {
stmt := database.Builder.
Insert(aiTaskTable).
Columns(aiTaskInsertColumns).
Values(
aiTask.Identifier,
aiTask.GitspaceConfig.ID,
aiTask.GitspaceConfig.GitspaceInstance.ID,
aiTask.InitialPrompt,
aiTask.DisplayName,
aiTask.UserUID,
aiTask.SpaceID,
aiTask.Created,
aiTask.Updated,
aiTask.APIURL,
aiTask.AIAgent,
aiTask.State,
aiTask.Output,
aiTask.OutputMetadata,
aiTask.ErrorMessage,
).
Suffix("RETURNING aitask_id")
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
if err = db.QueryRowContext(ctx, sql, args...).Scan(&aiTask.ID); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "ai task create query failed for %s", aiTask.Identifier)
}
return nil
}
func (s aiTaskStore) Update(ctx context.Context, aiTask *types.AITask) error {
stmt := database.Builder.
Update(aiTaskTable).
Set("aitask_display_name", aiTask.DisplayName).
Set("aitask_updated", aiTask.Updated).
Set("aitask_api_url", aiTask.APIURL).
Set("aitask_state", aiTask.State).
Set("aitask_output", aiTask.Output).
Set("aitask_error_message", aiTask.ErrorMessage).
Set("aitask_output_metadata", aiTask.OutputMetadata).
Where("aitask_id = ?", aiTask.ID)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "Failed to update ai task for %s", aiTask.Identifier)
}
return nil
}
func (s aiTaskStore) Find(ctx context.Context, id int64) (*types.AITask, error) {
stmt := database.Builder.
Select(aiTaskSelectColumns).
From(aiTaskTable).
Where("aitask_id = ?", id)
dst := new(aiTask)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find ai task for %d", id)
}
return s.mapDBToAITask(dst), nil
}
func (s aiTaskStore) FindByIdentifier(ctx context.Context, spaceID int64, identifier string) (*types.AITask, error) {
stmt := database.Builder.
Select(aiTaskSelectColumns).
From(aiTaskTable).
Where("LOWER(aitask_uid) = $1", strings.ToLower(identifier)).
Where("aitask_space_id = $2", spaceID)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
dst := new(aiTask)
db := dbtx.GetAccessor(ctx, s.db)
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find ai task for %s", identifier)
}
return s.mapDBToAITask(dst), nil
}
func (s aiTaskStore) List(ctx context.Context, filter *types.AITaskFilter) ([]*types.AITask, error) {
stmt := database.Builder.
Select(aiTaskSelectColumns).
From(aiTaskTable)
stmt = s.addAITaskFilter(stmt, filter)
stmt = stmt.Limit(database.Limit(filter.QueryFilter.Size))
stmt = stmt.Offset(database.Offset(filter.QueryFilter.Page, filter.QueryFilter.Size))
stmt = stmt.OrderBy("aitask_created DESC")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var dst []*aiTask
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing list ai task query")
}
return s.mapToAITasks(dst), nil
}
func (s aiTaskStore) Count(ctx context.Context, filter *types.AITaskFilter) (int64, error) {
stmt := database.Builder.
Select("COUNT(*)").
From(aiTaskTable)
stmt = s.addAITaskFilter(stmt, filter)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
if err = db.QueryRowContext(ctx, sql, args...).Scan(&count); err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count ai task query")
}
return count, nil
}
func (s aiTaskStore) addAITaskFilter(stmt squirrel.SelectBuilder, filter *types.AITaskFilter) squirrel.SelectBuilder {
stmt = stmt.Where("aitask_space_id = ?", filter.SpaceID)
stmt = stmt.Where(squirrel.Eq{"aitask_user_uid": filter.UserIdentifier})
if len(filter.AIAgents) > 0 {
stmt = stmt.Where(squirrel.Eq{"aitask_ai_agent": filter.AIAgents})
}
if len(filter.States) > 0 {
stmt = stmt.Where(squirrel.Eq{"aitask_state": filter.States})
}
if filter.QueryFilter.Query != "" {
stmt = stmt.Where(squirrel.Or{
squirrel.Expr(PartialMatch("aitask_uid", filter.QueryFilter.Query)),
squirrel.Expr(PartialMatch("aitask_display_name", filter.QueryFilter.Query)),
squirrel.Expr(PartialMatch("aitask_initial_prompt", filter.QueryFilter.Query)),
})
}
return stmt
}
func (s aiTaskStore) mapDBToAITask(in *aiTask) *types.AITask {
return &types.AITask{
ID: in.ID,
Identifier: in.Identifier,
GitspaceConfigID: in.GitspaceConfigID,
GitspaceInstanceID: in.GitspaceInstanceID,
InitialPrompt: in.InitialPrompt,
DisplayName: in.DisplayName,
UserUID: in.UserUID,
SpaceID: in.SpaceID,
Created: in.Created,
Updated: in.Updated,
APIURL: in.APIURL.Ptr(),
AIAgent: in.AgentType,
State: in.State,
Output: in.Output.Ptr(),
OutputMetadata: in.OutputMetadata,
ErrorMessage: in.ErrorMessage.Ptr(),
}
}
func (s aiTaskStore) mapToAITasks(aiTasks []*aiTask) []*types.AITask {
res := make([]*types.AITask, len(aiTasks))
for i := range aiTasks {
res[i] = s.mapDBToAITask(aiTasks[i])
}
return res
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/space_path.go | app/store/database/space_path.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
)
var _ store.SpacePathStore = (*SpacePathStore)(nil)
// NewSpacePathStore returns a new SpacePathStore.
func NewSpacePathStore(
db *sqlx.DB,
pathTransformation store.SpacePathTransformation,
) *SpacePathStore {
return &SpacePathStore{
db: db,
spacePathTransformation: pathTransformation,
}
}
// SpacePathStore implements a store.SpacePathStore backed by a relational database.
type SpacePathStore struct {
db *sqlx.DB
spacePathTransformation store.SpacePathTransformation
}
// spacePathSegment is an internal representation of a segment of a space path.
type spacePathSegment struct {
ID int64 `db:"space_path_id"`
// Identifier is the original identifier that was provided
Identifier string `db:"space_path_uid"`
// IdentifierUnique is a transformed version of Identifier which is used to ensure uniqueness guarantees
IdentifierUnique string `db:"space_path_uid_unique"`
// IsPrimary indicates whether the path is the primary path of the space
// IMPORTANT: to allow DB enforcement of at most one primary path per repo/space
// we have a unique index on spaceID + IsPrimary and set IsPrimary to true
// for primary paths and to nil for non-primary paths.
IsPrimary null.Bool `db:"space_path_is_primary"`
ParentID null.Int `db:"space_path_parent_id"`
SpaceID int64 `db:"space_path_space_id"`
CreatedBy int64 `db:"space_path_created_by"`
Created int64 `db:"space_path_created"`
Updated int64 `db:"space_path_updated"`
}
const (
spacePathColumns = `
space_path_uid
,space_path_uid_unique
,space_path_is_primary
,space_path_parent_id
,space_path_space_id
,space_path_created_by
,space_path_created
,space_path_updated`
spacePathSelectBase = `
SELECT` + spacePathColumns + `
FROM space_paths`
)
// InsertSegment inserts a space path segment to the table - returns the full path.
func (s *SpacePathStore) InsertSegment(ctx context.Context, segment *types.SpacePathSegment) error {
const sqlQuery = `
INSERT INTO space_paths (
space_path_uid
,space_path_uid_unique
,space_path_is_primary
,space_path_parent_id
,space_path_space_id
,space_path_created_by
,space_path_created
,space_path_updated
) values (
:space_path_uid
,:space_path_uid_unique
,:space_path_is_primary
,:space_path_parent_id
,:space_path_space_id
,:space_path_created_by
,:space_path_created
,:space_path_updated
) RETURNING space_path_id`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, s.mapToInternalSpacePathSegment(segment))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind path segment object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&segment.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert query failed")
}
return nil
}
func (s *SpacePathStore) FindPrimaryBySpaceID(ctx context.Context, spaceID int64) (*types.SpacePath, error) {
sqlQuery := spacePathSelectBase + `
where space_path_space_id = $1 AND space_path_is_primary = TRUE`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(spacePathSegment)
path := ""
nextSpaceID := null.IntFrom(spaceID)
for nextSpaceID.Valid {
err := db.GetContext(ctx, dst, sqlQuery, nextSpaceID.Int64)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find primary segment for %d", nextSpaceID.Int64)
}
path = paths.Concatenate(dst.Identifier, path)
nextSpaceID = dst.ParentID
}
return &types.SpacePath{
SpaceID: spaceID,
Value: path,
IsPrimary: true,
}, nil
}
func (s *SpacePathStore) FindByPath(ctx context.Context, path string) (*types.SpacePath, error) {
const sqlQueryNoParent = spacePathSelectBase + ` WHERE space_path_uid_unique = $1 AND space_path_parent_id IS NULL`
const sqlQueryParent = spacePathSelectBase + ` WHERE space_path_uid_unique = $1 AND space_path_parent_id = $2`
db := dbtx.GetAccessor(ctx, s.db)
segment := new(spacePathSegment)
segmentIdentifiers := paths.Segments(path)
if len(segmentIdentifiers) == 0 {
return nil, fmt.Errorf("path with no segments was passed '%s'", path)
}
var err error
var parentID int64
originalPath := ""
isPrimary := true
for i, segmentIdentifier := range segmentIdentifiers {
uniqueSegmentIdentifier := s.spacePathTransformation(segmentIdentifier, i == 0)
if parentID == 0 {
err = db.GetContext(ctx, segment, sqlQueryNoParent, uniqueSegmentIdentifier)
} else {
err = db.GetContext(ctx, segment, sqlQueryParent, uniqueSegmentIdentifier, parentID)
}
if err != nil {
return nil, database.ProcessSQLErrorf(
ctx,
err,
"Failed to find segment for '%s' in '%s'",
uniqueSegmentIdentifier,
path,
)
}
originalPath = paths.Concatenate(originalPath, segment.Identifier)
parentID = segment.SpaceID
isPrimary = isPrimary && segment.IsPrimary.ValueOrZero()
}
return &types.SpacePath{
Value: originalPath,
IsPrimary: isPrimary,
SpaceID: segment.SpaceID,
}, nil
}
// DeletePrimarySegment deletes the primary segment of the space.
func (s *SpacePathStore) DeletePrimarySegment(ctx context.Context, spaceID int64) error {
const sqlQuery = `
DELETE FROM space_paths
WHERE space_path_space_id = $1 AND space_path_is_primary = TRUE`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sqlQuery, spaceID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "the delete query failed")
}
return nil
}
// DeletePathsAndDescendandPaths deletes all space paths reachable from spaceID including itself.
func (s *SpacePathStore) DeletePathsAndDescendandPaths(ctx context.Context, spaceID int64) error {
const sqlQuery = `WITH RECURSIVE DescendantPaths AS (
SELECT space_path_id, space_path_space_id, space_path_parent_id
FROM space_paths
WHERE space_path_space_id = $1
UNION
SELECT sp.space_path_id, sp.space_path_space_id, sp.space_path_parent_id
FROM space_paths sp
JOIN DescendantPaths dp ON sp.space_path_parent_id = dp.space_path_space_id
)
DELETE FROM space_paths
WHERE space_path_id IN (SELECT space_path_id FROM DescendantPaths);`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sqlQuery, spaceID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "the delete query failed")
}
return nil
}
func (s *SpacePathStore) mapToInternalSpacePathSegment(p *types.SpacePathSegment) *spacePathSegment {
res := &spacePathSegment{
ID: p.ID,
Identifier: p.Identifier,
IdentifierUnique: s.spacePathTransformation(p.Identifier, p.ParentID == 0),
SpaceID: p.SpaceID,
Created: p.Created,
CreatedBy: p.CreatedBy,
Updated: p.Updated,
// ParentID: is set below
// IsPrimary: is set below
}
// only set IsPrimary to a value if it's true (Unique Index doesn't allow multiple false, hence keep it nil)
if p.IsPrimary {
res.IsPrimary = null.BoolFrom(true)
}
if p.ParentID > 0 {
res.ParentID = null.IntFrom(p.ParentID)
}
return res
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/branch.go | app/store/database/branch.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/git/sha"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/jmoiron/sqlx"
)
const (
branchColumns = `
branch_repo_id
,branch_name
,branch_sha
,branch_created_by
,branch_created
,branch_updated_by
,branch_updated
,branch_last_created_pullreq_id`
branchSelectBase = `
SELECT` + branchColumns + `
FROM branches`
)
// branch represents the internal database model for a branch.
type branch struct {
RepoID int64 `db:"branch_repo_id"`
Name string `db:"branch_name"`
SHA string `db:"branch_sha"`
CreatedBy int64 `db:"branch_created_by"`
Created int64 `db:"branch_created"`
UpdatedBy int64 `db:"branch_updated_by"`
Updated int64 `db:"branch_updated"`
LastCreatedPullReqID *int64 `db:"branch_last_created_pullreq_id"`
}
// branchStore implements store.BranchStore interface to manage branch data.
type branchStore struct {
db *sqlx.DB
}
// NewBranchStore returns a new branchStore that implements store.BranchStore.
func NewBranchStore(db *sqlx.DB) store.BranchStore {
return &branchStore{
db: db,
}
}
// ToType converts the internal branch type to the external Branch type.
func (b *branch) ToType() types.BranchTable {
shaObj, _ := sha.New(b.SHA) // Error ignored since DB values should be valid
return types.BranchTable{
Name: b.Name,
SHA: shaObj,
CreatedBy: b.CreatedBy,
Created: b.Created,
UpdatedBy: b.UpdatedBy,
Updated: b.Updated,
LastCreatedPullReqID: b.LastCreatedPullReqID,
}
}
// mapInternalBranch converts the external branch type to the internal branch type.
func mapInternalBranch(b *types.BranchTable, repoID int64) branch {
return branch{
Name: b.Name,
RepoID: repoID,
SHA: b.SHA.String(),
CreatedBy: b.CreatedBy,
Created: b.Created,
UpdatedBy: b.UpdatedBy,
Updated: b.Updated,
LastCreatedPullReqID: b.LastCreatedPullReqID,
}
}
// FindBranchesWithoutOpenPRs finds branches without open pull requests for a repository,
// or with closed pull requests whose SHA doesn't match the provided SHA.
func (s *branchStore) FindBranchesWithoutOpenPRs(
ctx context.Context,
repoID int64,
principalID int64,
cutOffTime int64,
limit uint64,
sha string,
) ([]types.BranchTable, error) {
db := dbtx.GetAccessor(ctx, s.db)
// todo: handle complicated scenario whenever pull request is merged and someone pushes to same branch
// it gets complicated as with squash and merge the sha will change in main and pullreq branch causing
// isAncestor sha match
sqlQuery := branchSelectBase + `
LEFT JOIN pullreqs ON
((branch_repo_id = pullreq_source_repo_id
AND branch_name = pullreq_source_branch
AND branch_last_created_pullreq_id!=NULL) OR (branch_last_created_pullreq_id = pullreq_id))
WHERE branch_repo_id = $1
AND branch_updated_by = $2
AND branch_updated > $3
AND (pullreq_id IS NULL OR (pullreq_state != 'open' AND pullreq_source_sha != branch_sha))
AND branch_sha != $4
ORDER BY branch_updated DESC
LIMIT $5
`
dst := make([]*branch, 0, limit)
err := db.SelectContext(
ctx,
&dst,
sqlQuery,
repoID,
principalID,
cutOffTime,
sha,
limit,
)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find branches without PRs")
}
result := make([]types.BranchTable, len(dst))
for i, b := range dst {
result[i] = b.ToType()
}
return result, nil
}
func (s *branchStore) Find(ctx context.Context, repoID int64, name string) (*types.BranchTable, error) {
db := dbtx.GetAccessor(ctx, s.db)
const sqlQuery = branchSelectBase + `
WHERE branch_repo_id = $1 AND branch_name = $2
`
var dst branch
if err := db.GetContext(ctx, &dst, sqlQuery, repoID, name); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find branch by name")
}
result := dst.ToType()
return &result, nil
}
// Delete deletes a branch by repo ID and branch name.
func (s *branchStore) Delete(ctx context.Context, repoID int64, name string) error {
db := dbtx.GetAccessor(ctx, s.db)
const sqlQuery = `
DELETE FROM branches
WHERE branch_repo_id = $1 AND branch_name = $2
`
if _, err := db.ExecContext(ctx, sqlQuery, repoID, name); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to delete branch")
}
return nil
}
// Upsert creates a new branch or updates an existing one if it already exists.
func (s *branchStore) Upsert(ctx context.Context, repoID int64, branch *types.BranchTable) error {
db := dbtx.GetAccessor(ctx, s.db)
const sqlQuery = `
INSERT INTO branches (
branch_repo_id
,branch_name
,branch_sha
,branch_created_by
,branch_created
,branch_updated_by
,branch_updated
,branch_last_created_pullreq_id
) VALUES (
:branch_repo_id
,:branch_name
,:branch_sha
,:branch_created_by
,:branch_created
,:branch_updated_by
,:branch_updated
,:branch_last_created_pullreq_id
) ON CONFLICT (branch_repo_id, branch_name) DO UPDATE SET
branch_sha = EXCLUDED.branch_sha
,branch_updated_by = EXCLUDED.branch_updated_by
,branch_updated = EXCLUDED.branch_updated
,branch_last_created_pullreq_id = COALESCE(EXCLUDED.branch_last_created_pullreq_id,
branches.branch_last_created_pullreq_id)
`
query, args, err := db.BindNamed(sqlQuery, mapInternalBranch(branch, repoID))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind branch parameters")
}
_, err = db.ExecContext(ctx, query, args...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to upsert branch")
}
return nil
}
// UpdateLastPR updates the last created pull request ID for a branch.
func (s *branchStore) UpdateLastPR(
ctx context.Context,
repoID int64,
branchName string,
pullReqID *int64,
) error {
db := dbtx.GetAccessor(ctx, s.db)
const sqlQuery = `
UPDATE branches
SET branch_last_created_pullreq_id = $1
WHERE branch_repo_id = $2 AND branch_name = $3
`
_, err := db.ExecContext(ctx, sqlQuery, pullReqID, repoID, branchName)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update branch's last created pull request ID")
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/repo_test.go | app/store/database/repo_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database_test
import (
"context"
"encoding/json"
"strconv"
"testing"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/store/database"
"github.com/harness/gitness/types"
)
const (
numTestRepos = 10
repoSize = int64(100)
)
func TestDatabase_GetSize(t *testing.T) {
db, teardown := setupDB(t)
defer teardown()
principalStore, spaceStore, spacePathStore, repoStore := setupStores(t, db)
ctx := context.Background()
createUser(ctx, t, principalStore)
createSpace(ctx, t, spaceStore, spacePathStore, userID, 1, 0)
repoID := int64(1)
createRepo(ctx, t, repoStore, repoID, 1, repoSize)
tests := []struct {
name string
Size int64
areSizesEq bool
}{
{
name: "size equal to repo size",
Size: repoSize,
areSizesEq: true,
},
{
name: "size less than repo size",
Size: repoSize / 2,
areSizesEq: false,
},
{
name: "size greater than repo size",
Size: repoSize * 2,
areSizesEq: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
size, err := repoStore.GetSize(ctx, repoID)
if err != nil {
t.Errorf("GetSize() error = %v, want error = %v", err, nil)
}
areSizesEq := size == tt.Size
if areSizesEq != tt.areSizesEq {
t.Errorf("size == tt.Size = %v, want %v", areSizesEq, tt.areSizesEq)
}
})
}
}
func TestDatabase_Count(t *testing.T) {
db, teardown := setupDB(t)
defer teardown()
principalStore, spaceStore, spacePathStore, repoStore := setupStores(t, db)
ctx := context.Background()
createUser(ctx, t, principalStore)
createSpace(ctx, t, spaceStore, spacePathStore, userID, 1, 0)
numRepos := createRepos(ctx, t, repoStore, 0, numTestRepos, 1)
count, err := repoStore.Count(ctx, 1, &types.RepoFilter{})
if err != nil {
t.Fatalf("failed to count repos %v", err)
}
if count != numRepos {
t.Errorf("count = %v, want %v", count, numRepos)
}
}
func TestDatabase_CountAll(t *testing.T) {
db, teardown := setupDB(t)
defer teardown()
principalStore, spaceStore, spacePathStore, repoStore := setupStores(t, db)
ctx := context.Background()
createUser(ctx, t, principalStore)
numSpaces := createNestedSpaces(ctx, t, spaceStore, spacePathStore)
var numRepos int64
for i := 1; i <= numSpaces; i++ {
numRepos += createRepos(ctx, t, repoStore, numRepos, numTestRepos/2, int64(i))
}
count, err := repoStore.Count(ctx, 1, &types.RepoFilter{Recursive: true})
if err != nil {
t.Fatalf("failed to count repos %v", err)
}
if count != numRepos {
t.Errorf("count = %v, want %v", count, numRepos)
}
}
func TestDatabase_List(t *testing.T) {
db, teardown := setupDB(t)
defer teardown()
principalStore, spaceStore, spacePathStore, repoStore := setupStores(t, db)
ctx := context.Background()
createUser(ctx, t, principalStore)
createSpace(ctx, t, spaceStore, spacePathStore, userID, 1, 0)
numRepos := createRepos(ctx, t, repoStore, 0, numTestRepos, 1)
repos, err := repoStore.List(ctx, 1, &types.RepoFilter{})
if err != nil {
t.Fatalf("failed to count repos %v", err)
}
lenRepos := int64(len(repos))
if lenRepos != numRepos {
t.Errorf("count = %v, want %v", lenRepos, numRepos)
}
}
func TestDatabase_ListAll(t *testing.T) {
db, teardown := setupDB(t)
defer teardown()
principalStore, spaceStore, spacePathStore, repoStore := setupStores(t, db)
ctx := context.Background()
createUser(ctx, t, principalStore)
numSpaces := createNestedSpaces(ctx, t, spaceStore, spacePathStore)
var numRepos int64
for i := 1; i <= numSpaces; i++ {
numRepos += createRepos(ctx, t, repoStore, numRepos, numTestRepos/2, int64(i))
}
repos, err := repoStore.List(ctx, 1,
&types.RepoFilter{Size: numSpaces * numTestRepos, Recursive: true})
if err != nil {
t.Fatalf("failed to count repos %v", err)
}
lenRepos := int64(len(repos))
if lenRepos != numRepos {
t.Errorf("count = %v, want %v", lenRepos, numRepos)
}
}
func createRepo(
ctx context.Context,
t *testing.T,
repoStore *database.RepoStore,
id int64,
spaceID int64,
size int64,
) {
t.Helper()
identifier := "repo_" + strconv.FormatInt(id, 10)
repo := types.Repository{
Identifier: identifier, ID: id, GitUID: identifier,
ParentID: spaceID,
Size: size,
Tags: json.RawMessage{},
}
if err := repoStore.Create(ctx, &repo); err != nil {
t.Fatalf("failed to create repo %v", err)
}
}
func createRepos(
ctx context.Context,
t *testing.T,
repoStore *database.RepoStore,
numCreatedRepos int64,
numReposToCreate int64,
spaceID int64,
) int64 {
t.Helper()
var numRepos int64
for j := 0; j < int(numReposToCreate); j++ {
// numCreatedRepos+numRepos ensures the uniqueness of the repo id
createRepo(ctx, t, repoStore, numCreatedRepos+numRepos, spaceID, 0)
numRepos++
}
return numRepos
}
func createNestedSpaces(
ctx context.Context,
t *testing.T,
spaceStore *database.SpaceStore,
spacePathStore store.SpacePathStore,
) int {
t.Helper()
spaceTree, numSpaces := createSpaceTree()
createSpace(ctx, t, spaceStore, spacePathStore, userID, 1, 0)
for i := 1; i < numSpaces; i++ {
parentID := int64(i)
for _, spaceID := range spaceTree[parentID] {
createSpace(ctx, t, spaceStore, spacePathStore, userID, spaceID, parentID)
}
}
return numSpaces
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/usergroup.go | app/store/database/usergroup.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
gitnessAppStore "github.com/harness/gitness/app/store"
"github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
)
var _ gitnessAppStore.UserGroupStore = (*UserGroupStore)(nil)
func NewUserGroupStore(db *sqlx.DB) *UserGroupStore {
return &UserGroupStore{
db: db,
}
}
type UserGroupStore struct {
db *sqlx.DB
}
type UserGroup struct {
SpaceID int64 `db:"usergroup_space_id"`
ID int64 `db:"usergroup_id"`
Identifier string `db:"usergroup_identifier"`
Name string `db:"usergroup_name"`
Description string `db:"usergroup_description"`
Created int64 `db:"usergroup_created"`
Updated int64 `db:"usergroup_updated"`
Scope int64 `db:"usergroup_scope"`
}
const (
userGroupColumns = `
usergroup_id
,usergroup_identifier
,usergroup_name
,usergroup_description
,usergroup_space_id
,usergroup_created
,usergroup_updated
,usergroup_scope`
userGroupSelectBase = `SELECT ` + userGroupColumns + ` FROM usergroups`
)
// FindByIdentifier returns a usergroup by its identifier.
func (s *UserGroupStore) FindByIdentifier(
ctx context.Context,
spaceID int64,
identifier string,
) (*types.UserGroup, error) {
const sqlQuery = userGroupSelectBase + ` WHERE usergroup_identifier = $1 AND usergroup_space_id = $2`
db := dbtx.GetAccessor(ctx, s.db)
dst := &UserGroup{}
if err := db.GetContext(ctx, dst, sqlQuery, identifier, spaceID); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find usergroup by identifier %s", identifier)
}
return mapUserGroup(dst), nil
}
// Find returns a usergroup by its id.
func (s *UserGroupStore) Find(ctx context.Context, id int64) (*types.UserGroup, error) {
const sqlQuery = userGroupSelectBase + ` WHERE usergroup_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := &UserGroup{}
if err := db.GetContext(ctx, dst, sqlQuery, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find usergroup by id %d", id)
}
return mapUserGroup(dst), nil
}
func (s *UserGroupStore) Map(ctx context.Context, ids []int64) (map[int64]*types.UserGroup, error) {
result, err := s.FindManyByIDs(ctx, ids)
if err != nil {
return nil, err
}
if len(result) == 0 {
return nil, store.ErrResourceNotFound
}
mapResult := make(map[int64]*types.UserGroup, len(result))
for _, r := range result {
mapResult[r.ID] = r
}
return mapResult, nil
}
func (s *UserGroupStore) FindManyByIDs(
ctx context.Context,
ids []int64,
) (map[int64]*types.UserGroup, error) {
stmt := database.Builder.
Select(userGroupColumns).
From("usergroups").
Where(squirrel.Eq{"usergroup_id": ids})
db := dbtx.GetAccessor(ctx, s.db)
sqlQuery, params, err := stmt.ToSql()
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to generate find many usergroups by ids query")
}
var dst []*UserGroup
if err := db.SelectContext(ctx, &dst, sqlQuery, params...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "find many by ids for usergroups query failed")
}
result := make(map[int64]*types.UserGroup, len(dst))
for _, ug := range dst {
result[ug.ID] = mapUserGroup(ug)
}
return result, nil
}
func (s *UserGroupStore) FindManyByIdentifiersAndSpaceID(
ctx context.Context,
identifiers []string,
spaceID int64,
) ([]*types.UserGroup, error) {
stmt := database.Builder.
Select(userGroupColumns).
From("usergroups").
Where(squirrel.Eq{"usergroup_identifier": identifiers}).
Where("usergroup_space_id = ?", spaceID)
db := dbtx.GetAccessor(ctx, s.db)
sqlQuery, params, err := stmt.ToSql()
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to generate find many usergroups query")
}
dst := []*UserGroup{}
if err := db.SelectContext(ctx, &dst, sqlQuery, params...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "find many by identifiers for usergroups query failed")
}
result := make([]*types.UserGroup, len(dst))
for i, u := range dst {
result[i] = mapUserGroup(u)
}
return result, nil
}
// Create Creates a usergroup in the database.
func (s *UserGroupStore) Create(
ctx context.Context,
spaceID int64,
userGroup *types.UserGroup,
) error {
const sqlQuery = `
INSERT INTO usergroups (
usergroup_identifier
,usergroup_name
,usergroup_description
,usergroup_space_id
,usergroup_created
,usergroup_updated
,usergroup_scope
) values (
:usergroup_identifier
,:usergroup_name
,:usergroup_description
,:usergroup_space_id
,:usergroup_created
,:usergroup_updated
,:usergroup_scope
) RETURNING usergroup_id`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, mapInternalUserGroup(userGroup, spaceID))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind usergroup object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&userGroup.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to insert usergroup")
}
return nil
}
func (s *UserGroupStore) CreateOrUpdate(
ctx context.Context,
spaceID int64,
userGroup *types.UserGroup,
) error {
const sqlQuery = `
INSERT INTO usergroups (
usergroup_identifier
,usergroup_name
,usergroup_description
,usergroup_space_id
,usergroup_created
,usergroup_updated
,usergroup_scope
) values (
:usergroup_identifier
,:usergroup_name
,:usergroup_description
,:usergroup_space_id
,:usergroup_created
,:usergroup_updated
,:usergroup_scope
) ON CONFLICT (usergroup_space_id, LOWER(usergroup_identifier)) DO UPDATE SET
usergroup_name = EXCLUDED.usergroup_name,
usergroup_description = EXCLUDED.usergroup_description,
usergroup_updated = EXCLUDED.usergroup_updated
`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, mapInternalUserGroup(userGroup, spaceID))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind usergroup object")
}
if _, err = db.ExecContext(ctx, query, arg...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to insert usergroup")
}
return nil
}
func mapUserGroup(ug *UserGroup) *types.UserGroup {
return &types.UserGroup{
ID: ug.ID,
Identifier: ug.Identifier,
Name: ug.Name,
Description: ug.Description,
SpaceID: ug.SpaceID,
Created: ug.Created,
Updated: ug.Updated,
Scope: ug.Scope,
}
}
func mapInternalUserGroup(u *types.UserGroup, spaceID int64) *UserGroup {
return &UserGroup{
ID: u.ID,
Identifier: u.Identifier,
Name: u.Name,
Description: u.Description,
SpaceID: spaceID,
Created: u.Created,
Updated: u.Updated,
Scope: u.Scope,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/principal_info.go | app/store/database/principal_info.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
)
var _ store.PrincipalInfoView = (*PrincipalInfoView)(nil)
// NewPrincipalInfoView returns a new PrincipalInfoView.
// It's used by the principal info cache.
func NewPrincipalInfoView(db *sqlx.DB) *PrincipalInfoView {
return &PrincipalInfoView{
db: db,
}
}
type PrincipalInfoView struct {
db *sqlx.DB
}
const (
principalInfoCommonColumns = `
principal_id
,principal_uid
,principal_email
,principal_display_name
,principal_type
,principal_created
,principal_updated`
)
type principalInfo struct {
ID int64 `db:"principal_id"`
UID string `db:"principal_uid"`
DisplayName string `db:"principal_display_name"`
Email string `db:"principal_email"`
Type enum.PrincipalType `db:"principal_type"`
Created int64 `db:"principal_created"`
Updated int64 `db:"principal_updated"`
}
// Find returns a single principal info object by id from the `principals` database table.
func (s *PrincipalInfoView) Find(ctx context.Context, id int64) (*types.PrincipalInfo, error) {
const sqlQuery = `
SELECT ` + principalInfoCommonColumns + `
FROM principals
WHERE principal_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
v := db.QueryRowContext(ctx, sqlQuery, id)
if err := v.Err(); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to find principal info")
}
info := &types.PrincipalInfo{}
if err := v.Scan(&info.ID, &info.UID, &info.Email, &info.DisplayName,
&info.Type, &info.Created, &info.Updated); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to scan principal info")
}
return info, nil
}
// FindMany returns a several principal info objects by id from the `principals` database table.
func (s *PrincipalInfoView) FindMany(ctx context.Context, ids []int64) ([]*types.PrincipalInfo, error) {
db := dbtx.GetAccessor(ctx, s.db)
stmt := database.Builder.
Select(principalInfoCommonColumns).
From("principals").
Where(squirrel.Eq{"principal_id": ids})
sqlQuery, params, err := stmt.ToSql()
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to generate find many principal info SQL query")
}
rows, err := db.QueryContext(ctx, sqlQuery, params...)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to query find many principal info")
}
defer func() {
_ = rows.Close()
}()
result := make([]*types.PrincipalInfo, 0, len(ids))
for rows.Next() {
info := &types.PrincipalInfo{}
err = rows.Scan(&info.ID, &info.UID, &info.Email, &info.DisplayName,
&info.Type, &info.Created, &info.Updated)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to scan principal info")
}
result = append(result, info)
}
err = rows.Err()
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to read principal info data")
}
return result, nil
}
func mapToPrincipalInfo(p *principalInfo) types.PrincipalInfo {
return types.PrincipalInfo{
ID: p.ID,
UID: p.UID,
DisplayName: p.DisplayName,
Email: p.Email,
Type: p.Type,
Created: p.Created,
Updated: p.Updated,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/usage_metrics.go | app/store/database/usage_metrics.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"errors"
"time"
"github.com/harness/gitness/app/store"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/jmoiron/sqlx"
)
var _ store.UsageMetricStore = (*UsageMetricsStore)(nil)
type usageMetric struct {
RootSpaceID int64 `db:"usage_metric_space_id"`
Date int64 `db:"usage_metric_date"`
Created int64 `db:"usage_metric_created"`
Updated int64 `db:"usage_metric_updated"`
BandwidthOut int64 `db:"usage_metric_bandwidth_out"`
BandwidthIn int64 `db:"usage_metric_bandwidth_in"`
StorageTotal int64 `db:"usage_metric_storage_total"`
LFSStorageTotal int64 `db:"usage_metric_lfs_storage_total"`
Pushes int64 `db:"usage_metric_pushes"`
Version int64 `db:"usage_metric_version"`
}
// NewUsageMetricsStore returns a new UsageMetricsStore.
func NewUsageMetricsStore(db *sqlx.DB) *UsageMetricsStore {
return &UsageMetricsStore{
db: db,
}
}
// UsageMetricsStore implements store.UsageMetrics backed by a relational database.
type UsageMetricsStore struct {
db *sqlx.DB
}
func (s *UsageMetricsStore) getVersion(
ctx context.Context,
rootSpaceID int64,
date int64,
) int64 {
const sqlQuery = `
SELECT
usage_metric_version
FROM usage_metrics
WHERE usage_metric_space_id = $1 AND usage_metric_date = $2
`
var version int64
err := s.db.QueryRowContext(ctx, sqlQuery, rootSpaceID, date).Scan(&version)
if err != nil {
return 0
}
return version
}
func (s *UsageMetricsStore) Upsert(ctx context.Context, in *types.UsageMetric) error {
sqlQuery := `
INSERT INTO usage_metrics (
usage_metric_space_id
,usage_metric_date
,usage_metric_created
,usage_metric_updated
,usage_metric_bandwidth_out
,usage_metric_bandwidth_in
,usage_metric_storage_total
,usage_metric_lfs_storage_total
,usage_metric_pushes
,usage_metric_version
) VALUES (
:usage_metric_space_id
,:usage_metric_date
,:usage_metric_created
,:usage_metric_updated
,:usage_metric_bandwidth_out
,:usage_metric_bandwidth_in
,:usage_metric_storage_total
,:usage_metric_lfs_storage_total
,:usage_metric_pushes
,:usage_metric_version
)
ON CONFLICT (usage_metric_space_id, usage_metric_date)
DO UPDATE
SET
usage_metric_version = EXCLUDED.usage_metric_version
,usage_metric_updated = EXCLUDED.usage_metric_updated
,usage_metric_bandwidth_out = usage_metrics.usage_metric_bandwidth_out + EXCLUDED.usage_metric_bandwidth_out
,usage_metric_bandwidth_in = usage_metrics.usage_metric_bandwidth_in + EXCLUDED.usage_metric_bandwidth_in
,usage_metric_pushes = usage_metrics.usage_metric_pushes + EXCLUDED.usage_metric_pushes
`
if in.StorageTotal > 0 {
sqlQuery += `
,usage_metric_storage_total = EXCLUDED.usage_metric_storage_total`
}
if in.LFSStorageTotal > 0 {
sqlQuery += `
,usage_metric_lfs_storage_total = EXCLUDED.usage_metric_lfs_storage_total`
}
sqlQuery += `
WHERE usage_metrics.usage_metric_version = EXCLUDED.usage_metric_version - 1`
db := dbtx.GetAccessor(ctx, s.db)
now := time.Now()
today := s.Date(now)
if !in.Date.IsZero() {
today = s.Date(in.Date)
}
query, args, err := db.BindNamed(sqlQuery, usageMetric{
RootSpaceID: in.RootSpaceID,
Date: today,
Created: now.UnixMilli(),
Updated: now.UnixMilli(),
BandwidthOut: in.BandwidthOut,
BandwidthIn: in.BandwidthIn,
StorageTotal: in.StorageTotal,
LFSStorageTotal: in.LFSStorageTotal,
Pushes: in.Pushes,
Version: s.getVersion(ctx, in.RootSpaceID, today) + 1,
})
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to bind query")
}
result, err := db.ExecContext(ctx, query, args...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to upsert usage_metric")
}
n, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "failed to fetch number of rows affected")
}
if n == 0 {
return gitness_store.ErrVersionConflict
}
return nil
}
// UpsertOptimistic upsert the usage metric details using the optimistic locking mechanism.
func (s *UsageMetricsStore) UpsertOptimistic(
ctx context.Context,
in *types.UsageMetric,
) error {
for {
err := s.Upsert(ctx, in)
if err == nil {
return nil
}
if !errors.Is(err, gitness_store.ErrVersionConflict) {
return err
}
}
}
func (s *UsageMetricsStore) GetMetrics(
ctx context.Context,
rootSpaceID int64,
start int64,
end int64,
) (*types.UsageMetric, error) {
const sqlQuery = `
SELECT
COALESCE(SUM(usage_metric_bandwidth_out), 0) AS usage_metric_bandwidth_out,
COALESCE(SUM(usage_metric_bandwidth_in), 0) AS usage_metric_bandwidth_in,
COALESCE(AVG(usage_metric_storage_total), 0) AS usage_metric_storage_total,
COALESCE(AVG(usage_metric_lfs_storage_total), 0) AS usage_metric_lfs_storage_total,
COALESCE(SUM(usage_metric_pushes), 0) AS usage_metric_pushes
FROM usage_metrics
WHERE
usage_metric_space_id = $1 AND
usage_metric_date BETWEEN $2 AND $3`
result := &types.UsageMetric{
RootSpaceID: rootSpaceID,
}
startTime := time.UnixMilli(start)
endTime := time.UnixMilli(end)
err := s.db.QueryRowContext(
ctx,
sqlQuery,
rootSpaceID,
s.Date(startTime),
s.Date(endTime),
).Scan(
&result.BandwidthOut,
&result.BandwidthIn,
&result.StorageTotal,
&result.LFSStorageTotal,
&result.Pushes,
)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to get metric")
}
return result, nil
}
func (s *UsageMetricsStore) List(
ctx context.Context,
start int64,
end int64,
) ([]types.UsageMetric, error) {
const sqlQuery = `
SELECT
usage_metric_space_id,
COALESCE(SUM(usage_metric_bandwidth_out), 0) AS usage_metric_bandwidth_out,
COALESCE(SUM(usage_metric_bandwidth_in), 0) AS usage_metric_bandwidth_in,
COALESCE(AVG(usage_metric_storage_total), 0) AS usage_metric_storage_total,
COALESCE(AVG(usage_metric_lfs_storage_total), 0) AS usage_metric_lfs_storage_total,
COALESCE(SUM(usage_metric_pushes), 0) AS usage_metric_pushes
FROM usage_metrics
WHERE
usage_metric_date BETWEEN $1 AND $2
GROUP BY usage_metric_space_id
ORDER BY
usage_metric_bandwidth_out DESC,
usage_metric_bandwidth_in DESC,
usage_metric_storage_total DESC,
usage_metric_lfs_storage_total DESC,
usage_metric_pushes DESC`
startTime := time.UnixMilli(start)
endTime := time.UnixMilli(end)
db := dbtx.GetAccessor(ctx, s.db)
rows, err := db.QueryContext(ctx, sqlQuery, s.Date(startTime), s.Date(endTime))
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to list usage metrics")
}
defer rows.Close()
results := make([]types.UsageMetric, 0, 16)
for rows.Next() {
metric := types.UsageMetric{}
err = rows.Scan(
&metric.RootSpaceID,
&metric.BandwidthOut,
&metric.BandwidthIn,
&metric.StorageTotal,
&metric.LFSStorageTotal,
&metric.Pushes,
)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to scan usage_metrics")
}
results = append(results, metric)
}
if err = rows.Err(); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "failed to list usage_metrics")
}
return results, nil
}
func (s *UsageMetricsStore) Date(t time.Time) int64 {
year, month, day := t.Date()
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).UnixMilli()
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/connector.go | app/store/database/connector.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/harness/gitness/app/store"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
var _ store.ConnectorStore = (*connectorStore)(nil)
const (
//nolint:goconst
connectorQueryBase = `
SELECT` + connectorColumns + `
FROM connectors`
connectorColumns = `
connector_id,
connector_identifier,
connector_description,
connector_type,
connector_auth_type,
connector_created_by,
connector_space_id,
connector_last_test_attempt,
connector_last_test_error_msg,
connector_last_test_status,
connector_created,
connector_updated,
connector_version,
connector_address,
connector_insecure,
connector_username,
connector_github_app_installation_id,
connector_github_app_application_id,
connector_region,
connector_password,
connector_token,
connector_aws_key,
connector_aws_secret,
connector_github_app_private_key,
connector_token_refresh
`
)
type connector struct {
ID int64 `db:"connector_id"`
Identifier string `db:"connector_identifier"`
Description string `db:"connector_description"`
Type string `db:"connector_type"`
AuthType string `db:"connector_auth_type"`
CreatedBy int64 `db:"connector_created_by"`
SpaceID int64 `db:"connector_space_id"`
LastTestAttempt int64 `db:"connector_last_test_attempt"`
LastTestErrorMsg string `db:"connector_last_test_error_msg"`
LastTestStatus string `db:"connector_last_test_status"`
Created int64 `db:"connector_created"`
Updated int64 `db:"connector_updated"`
Version int64 `db:"connector_version"`
Address sql.NullString `db:"connector_address"`
Insecure sql.NullBool `db:"connector_insecure"`
Username sql.NullString `db:"connector_username"`
GithubAppInstallationID sql.NullString `db:"connector_github_app_installation_id"`
GithubAppApplicationID sql.NullString `db:"connector_github_app_application_id"`
Region sql.NullString `db:"connector_region"`
// Password fields are stored as reference to secrets table
Password sql.NullInt64 `db:"connector_password"`
Token sql.NullInt64 `db:"connector_token"`
AWSKey sql.NullInt64 `db:"connector_aws_key"`
AWSSecret sql.NullInt64 `db:"connector_aws_secret"`
GithubAppPrivateKey sql.NullInt64 `db:"connector_github_app_private_key"`
TokenRefresh sql.NullInt64 `db:"connector_token_refresh"`
}
// NewConnectorStore returns a new ConnectorStore.
// The secret store is used to resolve the secret references.
func NewConnectorStore(db *sqlx.DB, secretStore store.SecretStore) store.ConnectorStore {
return &connectorStore{
db: db,
secretStore: secretStore,
}
}
func (s *connectorStore) mapFromDBConnectors(ctx context.Context, src []*connector) ([]*types.Connector, error) {
dst := make([]*types.Connector, len(src))
for i, v := range src {
m, err := s.mapFromDBConnector(ctx, v)
if err != nil {
return nil, fmt.Errorf("could not map from db connector: %w", err)
}
dst[i] = m
}
return dst, nil
}
func (s *connectorStore) mapToDBConnector(ctx context.Context, v *types.Connector) (*connector, error) {
to := connector{
ID: v.ID,
Identifier: v.Identifier,
Description: v.Description,
Type: v.Type.String(),
SpaceID: v.SpaceID,
CreatedBy: v.CreatedBy,
Created: v.Created,
Updated: v.Updated,
Version: v.Version,
LastTestAttempt: v.LastTestAttempt,
LastTestErrorMsg: v.LastTestErrorMsg,
LastTestStatus: v.LastTestStatus.String(),
}
// Parse connector specific configs
err := s.convertConfigToDB(ctx, v, &to)
if err != nil {
return nil, fmt.Errorf("could not convert config to db: %w", err)
}
return &to, nil
}
func (s *connectorStore) convertConfigToDB(
ctx context.Context,
source *types.Connector,
to *connector,
) error {
switch {
case source.Github != nil:
to.Address = sql.NullString{String: source.Github.APIURL, Valid: true}
to.Insecure = sql.NullBool{Bool: source.Github.Insecure, Valid: true}
if source.Github.Auth == nil {
return fmt.Errorf("auth is required for github connectors")
}
if source.Github.Auth.AuthType != enum.ConnectorAuthTypeBearer {
return fmt.Errorf("only bearer token auth is supported for github connectors")
}
to.AuthType = source.Github.Auth.AuthType.String()
creds := source.Github.Auth.Bearer
// use the same space ID as the connector
tokenID, err := s.secretIdentiferToID(ctx, creds.Token.Identifier, source.SpaceID)
if err != nil {
return fmt.Errorf("could not find secret: %w", err)
}
to.Token = sql.NullInt64{Int64: tokenID, Valid: true}
default:
return fmt.Errorf("no connector config found for type: %s", source.Type)
}
return nil
}
// secretIdentiferToID finds the secret ID given the space ID and the identifier.
func (s *connectorStore) secretIdentiferToID(
ctx context.Context,
identifier string,
spaceID int64,
) (int64, error) {
secret, err := s.secretStore.FindByIdentifier(ctx, spaceID, identifier)
if err != nil {
return 0, err
}
return secret.ID, nil
}
func (s *connectorStore) mapFromDBConnector(
ctx context.Context,
dbConnector *connector,
) (*types.Connector, error) {
connector := &types.Connector{
ID: dbConnector.ID,
Identifier: dbConnector.Identifier,
Description: dbConnector.Description,
Type: enum.ConnectorType(dbConnector.Type),
SpaceID: dbConnector.SpaceID,
CreatedBy: dbConnector.CreatedBy,
LastTestAttempt: dbConnector.LastTestAttempt,
LastTestErrorMsg: dbConnector.LastTestErrorMsg,
LastTestStatus: enum.ConnectorStatus(dbConnector.LastTestStatus),
Created: dbConnector.Created,
Updated: dbConnector.Updated,
Version: dbConnector.Version,
}
err := s.populateConnectorData(ctx, dbConnector, connector)
if err != nil {
return nil, fmt.Errorf("could not populate connector data: %w", err)
}
return connector, nil
}
func (s *connectorStore) populateConnectorData(
ctx context.Context,
source *connector,
to *types.Connector,
) error {
switch enum.ConnectorType(source.Type) {
case enum.ConnectorTypeGithub:
githubData, err := s.parseGithubConnectorData(ctx, source)
if err != nil {
return fmt.Errorf("could not parse github connector data: %w", err)
}
to.Github = githubData
// Cases for other connectors can be added here
default:
return fmt.Errorf("unsupported connector type: %s", source.Type)
}
return nil
}
func (s *connectorStore) parseGithubConnectorData(
ctx context.Context,
connector *connector,
) (*types.GithubConnectorData, error) {
auth, err := s.parseAuthenticationData(ctx, connector)
if err != nil {
return nil, fmt.Errorf("could not parse authentication data: %w", err)
}
return &types.GithubConnectorData{
APIURL: connector.Address.String,
Insecure: connector.Insecure.Bool,
Auth: auth,
}, nil
}
func (s *connectorStore) parseAuthenticationData(
ctx context.Context,
connector *connector,
) (*types.ConnectorAuth, error) {
authType, err := enum.ParseConnectorAuthType(connector.AuthType)
if err != nil {
return nil, err
}
switch authType {
case enum.ConnectorAuthTypeBasic:
if !connector.Username.Valid || !connector.Password.Valid {
return nil, fmt.Errorf("basic auth requires both username and password")
}
passwordRef, err := s.convertToRef(ctx, connector.Password.Int64)
if err != nil {
return nil, fmt.Errorf("could not convert basicauth password to ref: %w", err)
}
return &types.ConnectorAuth{
AuthType: enum.ConnectorAuthTypeBasic,
Basic: &types.BasicAuthCreds{
Username: connector.Username.String,
Password: passwordRef,
},
}, nil
case enum.ConnectorAuthTypeBearer:
if !connector.Token.Valid {
return nil, fmt.Errorf("bearer auth requires a token")
}
tokenRef, err := s.convertToRef(ctx, connector.Token.Int64)
if err != nil {
return nil, fmt.Errorf("could not convert bearer token to ref: %w", err)
}
return &types.ConnectorAuth{
AuthType: enum.ConnectorAuthTypeBearer,
Bearer: &types.BearerTokenCreds{
Token: tokenRef,
},
}, nil
default:
return nil, fmt.Errorf("unsupported auth type: %s", connector.AuthType)
}
}
func (s *connectorStore) convertToRef(ctx context.Context, id int64) (types.SecretRef, error) {
secret, err := s.secretStore.Find(ctx, id)
if err != nil {
return types.SecretRef{}, err
}
return types.SecretRef{
Identifier: secret.Identifier,
}, nil
}
type connectorStore struct {
db *sqlx.DB
secretStore store.SecretStore
}
// Find returns a connector given a connector ID.
func (s *connectorStore) Find(ctx context.Context, id int64) (*types.Connector, error) {
const findQueryStmt = connectorQueryBase + `
WHERE connector_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(connector)
if err := db.GetContext(ctx, dst, findQueryStmt, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find connector")
}
return s.mapFromDBConnector(ctx, dst)
}
// FindByIdentifier returns a connector in a given space with a given identifier.
func (s *connectorStore) FindByIdentifier(
ctx context.Context,
spaceID int64,
identifier string,
) (*types.Connector, error) {
const findQueryStmt = connectorQueryBase + `
WHERE connector_space_id = $1 AND connector_identifier = $2`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(connector)
if err := db.GetContext(ctx, dst, findQueryStmt, spaceID, identifier); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find connector")
}
return s.mapFromDBConnector(ctx, dst)
}
// Create creates a connector.
func (s *connectorStore) Create(ctx context.Context, connector *types.Connector) error {
dbConnector, err := s.mapToDBConnector(ctx, connector)
if err != nil {
return err
}
const connectorInsertStmt = `
INSERT INTO connectors (
connector_description
,connector_type
,connector_created_by
,connector_space_id
,connector_identifier
,connector_last_test_attempt
,connector_last_test_error_msg
,connector_last_test_status
,connector_created
,connector_updated
,connector_version
,connector_auth_type
,connector_address
,connector_insecure
,connector_username
,connector_github_app_installation_id
,connector_github_app_application_id
,connector_region
,connector_password
,connector_token
,connector_aws_key
,connector_aws_secret
,connector_github_app_private_key
,connector_token_refresh
) VALUES (
:connector_description
,:connector_type
,:connector_created_by
,:connector_space_id
,:connector_identifier
,:connector_last_test_attempt
,:connector_last_test_error_msg
,:connector_last_test_status
,:connector_created
,:connector_updated
,:connector_version
,:connector_auth_type
,:connector_address
,:connector_insecure
,:connector_username
,:connector_github_app_installation_id
,:connector_github_app_application_id
,:connector_region
,:connector_password
,:connector_token
,:connector_aws_key
,:connector_aws_secret
,:connector_github_app_private_key
,:connector_token_refresh
) RETURNING connector_id`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(connectorInsertStmt, dbConnector)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind connector object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&connector.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "connector query failed")
}
return nil
}
func (s *connectorStore) Update(ctx context.Context, p *types.Connector) error {
conn, err := s.mapToDBConnector(ctx, p)
if err != nil {
return err
}
const connectorUpdateStmt = `
UPDATE connectors
SET
connector_description = :connector_description
,connector_identifier = :connector_identifier
,connector_last_test_attempt = :connector_last_test_attempt
,connector_last_test_error_msg = :connector_last_test_error_msg
,connector_last_test_status = :connector_last_test_status
,connector_updated = :connector_updated
,connector_version = :connector_version
,connector_auth_type = :connector_auth_type
,connector_address = :connector_address
,connector_insecure = :connector_insecure
,connector_username = :connector_username
,connector_github_app_installation_id = :connector_github_app_installation_id
,connector_github_app_application_id = :connector_github_app_application_id
,connector_region = :connector_region
,connector_password = :connector_password
,connector_token = :connector_token
,connector_aws_key = :connector_aws_key
,connector_aws_secret = :connector_aws_secret
,connector_github_app_private_key = :connector_github_app_private_key
,connector_token_refresh = :connector_token_refresh
WHERE connector_id = :connector_id AND connector_version = :connector_version - 1`
o := *conn
o.Version++
o.Updated = time.Now().UnixMilli()
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(connectorUpdateStmt, o)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind connector object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update connector")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
p.Version = o.Version
p.Updated = o.Updated
return nil
}
// UpdateOptLock updates the connector using the optimistic locking mechanism.
func (s *connectorStore) UpdateOptLock(ctx context.Context,
connector *types.Connector,
mutateFn func(connector *types.Connector) error,
) (*types.Connector, error) {
for {
dup := *connector
err := mutateFn(&dup)
if err != nil {
return nil, err
}
err = s.Update(ctx, &dup)
if err == nil {
return &dup, nil
}
if !errors.Is(err, gitness_store.ErrVersionConflict) {
return nil, err
}
connector, err = s.Find(ctx, connector.ID)
if err != nil {
return nil, err
}
}
}
// List lists all the connectors present in a space.
func (s *connectorStore) List(
ctx context.Context,
parentID int64,
filter types.ListQueryFilter,
) ([]*types.Connector, error) {
stmt := database.Builder.
Select(connectorColumns).
From("connectors").
Where("connector_space_id = ?", fmt.Sprint(parentID))
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("connector_identifier", filter.Query))
}
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*connector{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return s.mapFromDBConnectors(ctx, dst)
}
// Delete deletes a connector given a connector ID.
func (s *connectorStore) Delete(ctx context.Context, id int64) error {
const connectorDeleteStmt = `
DELETE FROM connectors
WHERE connector_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, connectorDeleteStmt, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Could not delete connector")
}
return nil
}
// DeleteByIdentifier deletes a connector with a given identifier in a space.
func (s *connectorStore) DeleteByIdentifier(ctx context.Context, spaceID int64, identifier string) error {
const connectorDeleteStmt = `
DELETE FROM connectors
WHERE connector_space_id = $1 AND connector_identifier = $2`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, connectorDeleteStmt, spaceID, identifier); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Could not delete connector")
}
return nil
}
// Count of connectors in a space.
func (s *connectorStore) Count(ctx context.Context, parentID int64, filter types.ListQueryFilter) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("connectors").
Where("connector_space_id = ?", parentID)
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("connector_identifier", filter.Query))
}
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/step.go | app/store/database/step.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"github.com/harness/gitness/app/store"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/jmoiron/sqlx"
sqlxtypes "github.com/jmoiron/sqlx/types"
)
var _ store.StepStore = (*stepStore)(nil)
const (
stepColumns = `
step_id
,step_stage_id
,step_number
,step_name
,step_status
,step_error
,step_errignore
,step_exit_code
,step_started
,step_stopped
,step_version
,step_depends_on
,step_image
,step_detached
,step_schema
`
)
type step struct {
ID int64 `db:"step_id"`
StageID int64 `db:"step_stage_id"`
Number int64 `db:"step_number"`
ParentGroupID int64 `db:"step_parent_group_id"`
Name string `db:"step_name"`
Status enum.CIStatus `db:"step_status"`
Error string `db:"step_error"`
ErrIgnore bool `db:"step_errignore"`
ExitCode int `db:"step_exit_code"`
Started int64 `db:"step_started"`
Stopped int64 `db:"step_stopped"`
Version int64 `db:"step_version"`
DependsOn sqlxtypes.JSONText `db:"step_depends_on"`
Image string `db:"step_image"`
Detached bool `db:"step_detached"`
Schema string `db:"step_schema"`
}
// NewStepStore returns a new StepStore.
func NewStepStore(db *sqlx.DB) store.StepStore {
return &stepStore{
db: db,
}
}
type stepStore struct {
db *sqlx.DB
}
// FindByNumber returns a step given a stage ID and a step number.
func (s *stepStore) FindByNumber(ctx context.Context, stageID int64, stepNum int) (*types.Step, error) {
const findQueryStmt = `
SELECT` + stepColumns + `
FROM steps
WHERE step_stage_id = $1 AND step_number = $2`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(step)
if err := db.GetContext(ctx, dst, findQueryStmt, stageID, stepNum); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find step")
}
return mapInternalToStep(dst)
}
// Create creates a step.
func (s *stepStore) Create(ctx context.Context, step *types.Step) error {
const stepInsertStmt = `
INSERT INTO steps (
step_stage_id
,step_number
,step_name
,step_status
,step_error
,step_parent_group_id
,step_errignore
,step_exit_code
,step_started
,step_stopped
,step_version
,step_depends_on
,step_image
,step_detached
,step_schema
) VALUES (
:step_stage_id
,:step_number
,:step_name
,:step_status
,:step_error
,:step_parent_group_id
,:step_errignore
,:step_exit_code
,:step_started
,:step_stopped
,:step_version
,:step_depends_on
,:step_image
,:step_detached
,:step_schema
) RETURNING step_id`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(stepInsertStmt, mapStepToInternal(step))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind step object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&step.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Step query failed")
}
return nil
}
// Update tries to update a step in the datastore and returns a locking error
// if it was unable to do so.
func (s *stepStore) Update(ctx context.Context, e *types.Step) error {
const stepUpdateStmt = `
UPDATE steps
SET
step_name = :step_name
,step_status = :step_status
,step_error = :step_error
,step_errignore = :step_errignore
,step_exit_code = :step_exit_code
,step_started = :step_started
,step_stopped = :step_stopped
,step_depends_on = :step_depends_on
,step_image = :step_image
,step_detached = :step_detached
,step_schema = :step_schema
,step_version = :step_version
WHERE step_id = :step_id AND step_version = :step_version - 1`
step := mapStepToInternal(e)
step.Version++
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(stepUpdateStmt, step)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind step object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update step")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
m, err := mapInternalToStep(step)
if err != nil {
return fmt.Errorf("could not map step object: %w", err)
}
*e = *m
e.Version = step.Version
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/infra_provider_resource.go | app/store/database/infra_provider_resource.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
const (
infraProviderResourceIDColumn = `ipreso_id`
infraProviderResourceInsertColumns = `
ipreso_uid,
ipreso_display_name,
ipreso_infra_provider_config_id,
ipreso_type,
ipreso_space_id,
ipreso_created,
ipreso_updated,
ipreso_cpu,
ipreso_memory,
ipreso_disk,
ipreso_network,
ipreso_region,
ipreso_metadata,
ipreso_is_deleted,
ipreso_deleted
`
infraProviderResourceSelectColumns = "ipreso_id," + infraProviderResourceInsertColumns
infraProviderResourceTable = `infra_provider_resources`
)
type infraProviderResource struct {
ID int64 `db:"ipreso_id"`
Identifier string `db:"ipreso_uid"`
Name string `db:"ipreso_display_name"`
InfraProviderConfigID int64 `db:"ipreso_infra_provider_config_id"`
InfraProviderType enum.InfraProviderType `db:"ipreso_type"`
SpaceID int64 `db:"ipreso_space_id"`
CPU null.String `db:"ipreso_cpu"`
Memory null.String `db:"ipreso_memory"`
Disk null.String `db:"ipreso_disk"`
Network null.String `db:"ipreso_network"`
Region string `db:"ipreso_region"` // need list maybe
Metadata []byte `db:"ipreso_metadata"`
Created int64 `db:"ipreso_created"`
Updated int64 `db:"ipreso_updated"`
IsDeleted bool `db:"ipreso_is_deleted"`
Deleted null.Int `db:"ipreso_deleted"`
}
var _ store.InfraProviderResourceStore = (*infraProviderResourceStore)(nil)
// NewInfraProviderResourceStore returns a new InfraProviderResourceStore.
func NewInfraProviderResourceStore(
db *sqlx.DB,
spaceIDCache store.SpaceIDCache,
) store.InfraProviderResourceStore {
return &infraProviderResourceStore{
db: db,
spaceIDCache: spaceIDCache,
}
}
type infraProviderResourceStore struct {
db *sqlx.DB
spaceIDCache store.SpaceIDCache
}
func (s infraProviderResourceStore) List(
ctx context.Context,
infraProviderConfigID int64,
_ types.ListQueryFilter,
) ([]*types.InfraProviderResource, error) {
subQuery := squirrel.Select("MAX(ipreso_created)").
From(infraProviderResourceTable).
Where("ipreso_infra_provider_config_id = $1", infraProviderConfigID).
Where("ipreso_is_deleted = false").
GroupBy("ipreso_uid")
stmt := squirrel.Select(infraProviderResourceSelectColumns).
From(infraProviderResourceTable).
Where("ipreso_infra_provider_config_id = $2", infraProviderConfigID).
Where("ipreso_is_deleted = false").
Where(squirrel.Expr("ipreso_created IN (?)", subQuery)).
OrderBy("ipreso_uid", "ipreso_created DESC")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := new([]infraProviderResource)
if err := db.SelectContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to list infraprovider resources for config %d",
infraProviderConfigID)
}
return s.mapToInfraProviderResources(ctx, *dst)
}
func (s infraProviderResourceStore) Find(ctx context.Context, id int64) (*types.InfraProviderResource, error) {
stmt := database.Builder.
Select(infraProviderResourceSelectColumns).
From(infraProviderResourceTable).
Where(infraProviderResourceIDColumn+" = $1", id).
Where("ipreso_is_deleted = false")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
dst := new(infraProviderResource)
db := dbtx.GetAccessor(ctx, s.db)
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find infraprovider resource %d", id)
}
return s.mapToInfraProviderResource(ctx, dst)
}
func (s infraProviderResourceStore) FindByConfigAndIdentifier(
ctx context.Context,
spaceID int64,
infraProviderConfigID int64,
identifier string,
) (*types.InfraProviderResource, error) {
stmt :=
database.Builder.
Select(infraProviderResourceSelectColumns).
From(infraProviderResourceTable).
OrderBy("ipreso_created DESC").
Limit(1).
Where("ipreso_uid = ?", identifier).
Where("ipreso_space_id = ?", spaceID).
Where("ipreso_infra_provider_config_id = ?", infraProviderConfigID).
Where("ipreso_is_deleted = false")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
dst := new(infraProviderResource)
db := dbtx.GetAccessor(ctx, s.db)
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find infraprovider resource %s", identifier)
}
return s.mapToInfraProviderResource(ctx, dst)
}
func (s infraProviderResourceStore) Create(
ctx context.Context,
infraProviderResource *types.InfraProviderResource,
) error {
metadata, marshalErr := json.Marshal(infraProviderResource.Metadata)
if marshalErr != nil {
return marshalErr
}
stmt := database.Builder.
Insert(infraProviderResourceTable).
Columns(infraProviderResourceInsertColumns).
Values(
infraProviderResource.UID,
infraProviderResource.Name,
infraProviderResource.InfraProviderConfigID,
infraProviderResource.InfraProviderType,
infraProviderResource.SpaceID,
infraProviderResource.Created,
infraProviderResource.Updated,
infraProviderResource.CPU,
infraProviderResource.Memory,
infraProviderResource.Disk,
infraProviderResource.Network,
infraProviderResource.Region,
metadata,
infraProviderResource.IsDeleted,
infraProviderResource.Deleted,
).
Suffix(ReturningClause + infraProviderResourceIDColumn)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
if err = db.QueryRowContext(ctx, sql, args...).Scan(&infraProviderResource.ID); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "infra provider resource create failed %s", infraProviderResource.UID)
}
return nil
}
func (s infraProviderResourceStore) mapToInfraProviderResource(
ctx context.Context,
in *infraProviderResource,
) (*types.InfraProviderResource, error) {
res, err := toInfraProviderResource(in)
if err != nil {
return nil, err
}
spaceCore, err := s.spaceIDCache.Get(ctx, res.SpaceID)
if err != nil {
return nil, fmt.Errorf("couldn't set space path to the infra resource in DB: %d", res.SpaceID)
}
res.SpacePath = spaceCore.Path
return res, nil
}
func toInfraProviderResource(
in *infraProviderResource,
) (*types.InfraProviderResource, error) {
metadataParamsMap := make(map[string]string)
if len(in.Metadata) > 0 {
marshalErr := json.Unmarshal(in.Metadata, &metadataParamsMap)
if marshalErr != nil {
return nil, marshalErr
}
}
res := &types.InfraProviderResource{
UID: in.Identifier,
InfraProviderConfigID: in.InfraProviderConfigID,
ID: in.ID,
InfraProviderType: in.InfraProviderType,
Name: in.Name,
SpaceID: in.SpaceID,
CPU: in.CPU.Ptr(),
Memory: in.Memory.Ptr(),
Disk: in.Disk.Ptr(),
Network: in.Network.Ptr(),
Region: in.Region,
Metadata: metadataParamsMap,
Created: in.Created,
Updated: in.Updated,
IsDeleted: in.IsDeleted,
Deleted: in.Deleted.Ptr(),
}
return res, nil
}
func (s infraProviderResourceStore) mapToInfraProviderResources(
ctx context.Context,
resources []infraProviderResource,
) ([]*types.InfraProviderResource, error) {
var err error
res := make([]*types.InfraProviderResource, len(resources))
for i := range resources {
res[i], err = s.mapToInfraProviderResource(ctx, &resources[i])
if err != nil {
return nil, err
}
}
return res, nil
}
var _ store.InfraProviderResourceView = (*InfraProviderResourceView)(nil)
// NewInfraProviderResourceView returns a new InfraProviderResourceView.
// It's used by the infraprovider resource cache.
func NewInfraProviderResourceView(db *sqlx.DB, spaceStore store.SpaceStore) *InfraProviderResourceView {
return &InfraProviderResourceView{
db: db,
spaceStore: spaceStore,
}
}
type InfraProviderResourceView struct {
db *sqlx.DB
spaceStore store.SpaceStore
}
var _ store.InfraProviderResourceView = (*InfraProviderResourceView)(nil)
func (i InfraProviderResourceView) Find(ctx context.Context, id int64) (*types.InfraProviderResource, error) {
stmt := database.Builder.
Select(infraProviderResourceSelectColumns).
From(infraProviderResourceTable).
Where(infraProviderResourceIDColumn+" = $1", id)
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
dst := new(infraProviderResource)
db := dbtx.GetAccessor(ctx, i.db)
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find infraprovider providerResource %d", id)
}
providerResource, err := toInfraProviderResource(dst)
if err != nil {
return nil, err
}
providerConfig, err := i.findInfraProviderConfig(ctx, providerResource.InfraProviderConfigID)
if err == nil && providerConfig != nil {
providerResource.InfraProviderConfigIdentifier = providerConfig.Identifier
providerResource.InfraProviderConfigName = providerConfig.Name
}
resourceSpace, err := i.spaceStore.Find(ctx, providerResource.SpaceID)
if err == nil {
providerResource.SpacePath = resourceSpace.Path
}
return providerResource, err
}
func (i InfraProviderResourceView) findInfraProviderConfig(
ctx context.Context,
id int64,
) (*infraProviderConfig, error) {
stmt := database.Builder.
Select(infraProviderConfigSelectColumns).
From(infraProviderConfigTable).
Where(infraProviderConfigIDColumn+" = $1", id) //nolint:goconst
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
dst := new(infraProviderConfig)
db := dbtx.GetAccessor(ctx, i.db)
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find infraprovider config %d", id)
}
return dst, nil
}
func (i InfraProviderResourceView) FindMany(ctx context.Context, ids []int64) ([]*types.InfraProviderResource, error) {
stmt := database.Builder.
Select(infraProviderResourceSelectColumns).
From(infraProviderResourceTable).
Where(squirrel.Eq{infraProviderResourceIDColumn: ids})
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
dst := new([]infraProviderResource)
db := dbtx.GetAccessor(ctx, i.db)
if err := db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find infraprovider resources")
}
return i.mapToInfraProviderResources(ctx, *dst)
}
func (i InfraProviderResourceView) mapToInfraProviderResources(
ctx context.Context,
resources []infraProviderResource,
) ([]*types.InfraProviderResource, error) {
var err error
res := make([]*types.InfraProviderResource, len(resources))
for idx := range resources {
res[idx], err = toInfraProviderResource(&resources[idx])
if err != nil {
return nil, err
}
resourceSpace, err := i.spaceStore.Find(ctx, res[idx].SpaceID)
if err == nil {
res[idx].SpacePath = resourceSpace.Path
}
}
return res, nil
}
func (s infraProviderResourceStore) Delete(ctx context.Context, id int64) error {
now := time.Now().UnixMilli()
stmt := database.Builder.
Update(infraProviderResourceTable).
Set("ipreso_updated", now).
Set("ipreso_deleted", now).
Set("ipreso_is_deleted", true).
Where("ipreso_id = $4", id)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "Failed to update infraprovider resource %d", id)
}
return nil
}
// Update updates an existing infra provider resource in the database.
func (s infraProviderResourceStore) Update(
ctx context.Context,
infraProviderResource *types.InfraProviderResource,
) error {
metadata, err := json.Marshal(infraProviderResource.Metadata)
if err != nil {
return errors.Wrap(err, "Failed to marshal metadata")
}
now := time.Now().UnixMilli()
infraProviderResource.Updated = now
stmt := database.Builder.
Update(infraProviderResourceTable).
Set("ipreso_display_name", infraProviderResource.Name).
Set("ipreso_updated", infraProviderResource.Updated).
Set("ipreso_cpu", infraProviderResource.CPU).
Set("ipreso_memory", infraProviderResource.Memory).
Set("ipreso_disk", infraProviderResource.Disk).
Set("ipreso_network", infraProviderResource.Network).
Set("ipreso_region", infraProviderResource.Region).
Set("ipreso_metadata", metadata).
Set("ipreso_is_deleted", infraProviderResource.IsDeleted).
Set("ipreso_deleted", infraProviderResource.Deleted).
Where("ipreso_id = ?", infraProviderResource.ID)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "infra provider resource update failed %s", infraProviderResource.UID)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/gitspace_event.go | app/store/database/gitspace_event.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/jmoiron/sqlx"
)
var _ store.GitspaceEventStore = (*gitspaceEventStore)(nil)
const (
gitspaceEventIDColumn = `geven_id`
gitspaceEventsColumns = `
geven_event,
geven_created,
geven_entity_type,
geven_query_key,
geven_entity_id,
geven_timestamp
`
gitspaceEventsColumnsWithID = gitspaceEventIDColumn + `,
` + gitspaceEventsColumns
gitspaceEventsTable = `gitspace_events`
)
type gitspaceEventStore struct {
db *sqlx.DB
}
type gitspaceEvent struct {
ID int64 `db:"geven_id"`
Event enum.GitspaceEventType `db:"geven_event"`
Created int64 `db:"geven_created"`
EntityType enum.GitspaceEntityType `db:"geven_entity_type"`
QueryKey string `db:"geven_query_key"`
EntityID int64 `db:"geven_entity_id"`
Timestamp int64 `db:"geven_timestamp"`
}
func NewGitspaceEventStore(db *sqlx.DB) store.GitspaceEventStore {
return &gitspaceEventStore{
db: db,
}
}
func (g gitspaceEventStore) FindLatestByTypeAndGitspaceConfigID(
ctx context.Context,
eventType enum.GitspaceEventType,
gitspaceConfigID int64,
) (*types.GitspaceEvent, error) {
stmt := database.Builder.
Select(gitspaceEventsColumnsWithID).
From(gitspaceEventsTable).
Where("geven_event = $1", eventType).
Where("geven_entity_id = $2", gitspaceConfigID).
OrderBy("geven_timestamp DESC")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert squirrel builder to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, g.db)
gitspaceEventEntity := new(gitspaceEvent)
if err = db.GetContext(ctx, gitspaceEventEntity, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find gitspace event for %d", gitspaceConfigID)
}
return g.mapGitspaceEvent(gitspaceEventEntity), nil
}
func (g gitspaceEventStore) Create(ctx context.Context, gitspaceEvent *types.GitspaceEvent) error {
stmt := database.Builder.
Insert(gitspaceEventsTable).
Columns(gitspaceEventsColumns).
Values(
gitspaceEvent.Event,
gitspaceEvent.Created,
gitspaceEvent.EntityType,
gitspaceEvent.QueryKey,
gitspaceEvent.EntityID,
gitspaceEvent.Timestamp,
).
Suffix("RETURNING " + gitspaceEventIDColumn)
db := dbtx.GetAccessor(ctx, g.db)
sql, args, err := stmt.ToSql()
if err != nil {
return fmt.Errorf("failed to convert squirrel builder to sql: %w", err)
}
if err = db.QueryRowContext(ctx, sql, args...).Scan(&gitspaceEvent.ID); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "failed to create gitspace event for %s", gitspaceEvent.QueryKey)
}
return nil
}
func (g gitspaceEventStore) List(
ctx context.Context,
filter *types.GitspaceEventFilter,
) ([]*types.GitspaceEvent, int, error) {
queryStmt := database.Builder.
Select(gitspaceEventsColumnsWithID).
From(gitspaceEventsTable)
queryStmt = g.setQueryFilter(queryStmt, filter)
queryStmt = g.setSortFilter(queryStmt, filter)
queryStmt = g.setPaginationFilter(queryStmt, filter)
sql, args, err := queryStmt.ToSql()
if err != nil {
return nil, 0, fmt.Errorf("failed to convert squirrel builder to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, g.db)
var gitspaceEventEntities []*gitspaceEvent
if err = db.SelectContext(ctx, &gitspaceEventEntities, sql, args...); err != nil {
return nil, 0, database.ProcessSQLErrorf(ctx, err, "Failed to list gitspace event")
}
countStmt := database.Builder.
Select("count(*)").
From(gitspaceEventsTable)
countStmt = g.setQueryFilter(countStmt, filter)
sql, args, err = countStmt.ToSql()
if err != nil {
return nil, 0, fmt.Errorf("failed to convert squirrel builder to sql: %w", err)
}
var count int
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return nil, 0, database.ProcessSQLErrorf(ctx, err, "Failed executing custom count query")
}
gitspaceEvents := g.mapGitspaceEvents(gitspaceEventEntities)
return gitspaceEvents, count, nil
}
func (g gitspaceEventStore) setQueryFilter(
stmt squirrel.SelectBuilder,
filter *types.GitspaceEventFilter,
) squirrel.SelectBuilder {
if filter.QueryKey != "" {
stmt = stmt.Where(squirrel.Eq{"geven_query_key": filter.QueryKey})
}
if filter.EntityType != "" {
stmt = stmt.Where(squirrel.Eq{"geven_entity_type": filter.EntityType})
}
if filter.EntityID != 0 {
stmt = stmt.Where(squirrel.Eq{"geven_entity_id": filter.EntityID})
}
if len(filter.SkipEvents) != 0 {
stmt = stmt.Where(squirrel.NotEq{"geven_event": filter.SkipEvents})
}
return stmt
}
func (g gitspaceEventStore) setSortFilter(
stmt squirrel.SelectBuilder,
_ *types.GitspaceEventFilter,
) squirrel.SelectBuilder {
return stmt.OrderBy("geven_timestamp DESC")
}
func (g gitspaceEventStore) setPaginationFilter(
stmt squirrel.SelectBuilder,
filter *types.GitspaceEventFilter,
) squirrel.SelectBuilder {
offset := (filter.Page - 1) * filter.Size
//nolint:gosec
stmt = stmt.Offset(uint64(offset)).Limit(uint64(filter.Size))
return stmt
}
func (g gitspaceEventStore) mapGitspaceEvents(gitspaceEventEntities []*gitspaceEvent) []*types.GitspaceEvent {
gitspaceEvents := make([]*types.GitspaceEvent, len(gitspaceEventEntities))
for index, gitspaceEventEntity := range gitspaceEventEntities {
currentEntity := gitspaceEventEntity
gitspaceEvents[index] = g.mapGitspaceEvent(currentEntity)
}
return gitspaceEvents
}
func (g gitspaceEventStore) mapGitspaceEvent(event *gitspaceEvent) *types.GitspaceEvent {
return &types.GitspaceEvent{
Event: event.Event,
Created: event.Created,
EntityType: event.EntityType,
QueryKey: event.QueryKey,
EntityID: event.EntityID,
Timestamp: event.Timestamp,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/pullreq_reviews.go | app/store/database/pullreq_reviews.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/jmoiron/sqlx"
)
var _ store.PullReqReviewStore = (*PullReqReviewStore)(nil)
// NewPullReqReviewStore returns a new PullReqReviewStore.
func NewPullReqReviewStore(db *sqlx.DB) *PullReqReviewStore {
return &PullReqReviewStore{
db: db,
}
}
// PullReqReviewStore implements store.PullReqReviewStore backed by a relational database.
type PullReqReviewStore struct {
db *sqlx.DB
}
// pullReqReview is used to fetch pull request review data from the database.
type pullReqReview struct {
ID int64 `db:"pullreq_review_id"`
CreatedBy int64 `db:"pullreq_review_created_by"`
Created int64 `db:"pullreq_review_created"`
Updated int64 `db:"pullreq_review_updated"`
PullReqID int64 `db:"pullreq_review_pullreq_id"`
Decision enum.PullReqReviewDecision `db:"pullreq_review_decision"`
SHA string `db:"pullreq_review_sha"`
}
const (
pullreqReviewColumns = `
pullreq_review_id
,pullreq_review_created_by
,pullreq_review_created
,pullreq_review_updated
,pullreq_review_pullreq_id
,pullreq_review_decision
,pullreq_review_sha`
pullreqReviewSelectBase = `
SELECT` + pullreqReviewColumns + `
FROM pullreq_reviews`
)
// Find finds the pull request activity by id.
func (s *PullReqReviewStore) Find(ctx context.Context, id int64) (*types.PullReqReview, error) {
const sqlQuery = pullreqReviewSelectBase + `
WHERE pullreq_review_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := &pullReqReview{}
if err := db.GetContext(ctx, dst, sqlQuery, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find pull request activity")
}
return mapPullReqReview(dst), nil
}
// Create creates a new pull request.
func (s *PullReqReviewStore) Create(ctx context.Context, v *types.PullReqReview) error {
const sqlQuery = `
INSERT INTO pullreq_reviews (
pullreq_review_created_by
,pullreq_review_created
,pullreq_review_updated
,pullreq_review_pullreq_id
,pullreq_review_decision
,pullreq_review_sha
) values (
:pullreq_review_created_by
,:pullreq_review_created
,:pullreq_review_updated
,:pullreq_review_pullreq_id
,:pullreq_review_decision
,:pullreq_review_sha
) RETURNING pullreq_review_id`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, mapInternalPullReqReview(v))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind pull request review object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&v.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to insert pull request review")
}
return nil
}
func mapPullReqReview(v *pullReqReview) *types.PullReqReview {
return (*types.PullReqReview)(v) // the two types are identical, except for the tags
}
func mapInternalPullReqReview(v *types.PullReqReview) *pullReqReview {
return (*pullReqReview)(v) // the two types are identical, except for the tags
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/step_map.go | app/store/database/step_map.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"encoding/json"
"fmt"
"github.com/harness/gitness/types"
)
func mapInternalToStep(in *step) (*types.Step, error) {
var dependsOn []string
err := json.Unmarshal(in.DependsOn, &dependsOn)
if err != nil {
return nil, fmt.Errorf("could not unmarshal step.DependsOn: %w", err)
}
return &types.Step{
ID: in.ID,
StageID: in.StageID,
Number: in.Number,
Name: in.Name,
Status: in.Status,
Error: in.Error,
ErrIgnore: in.ErrIgnore,
ExitCode: in.ExitCode,
Started: in.Started,
Stopped: in.Stopped,
Version: in.Version,
DependsOn: dependsOn,
Image: in.Image,
Detached: in.Detached,
Schema: in.Schema,
}, nil
}
func mapStepToInternal(in *types.Step) *step {
return &step{
ID: in.ID,
StageID: in.StageID,
Number: in.Number,
Name: in.Name,
Status: in.Status,
Error: in.Error,
ErrIgnore: in.ErrIgnore,
ExitCode: in.ExitCode,
Started: in.Started,
Stopped: in.Stopped,
Version: in.Version,
DependsOn: EncodeToSQLXJSON(in.DependsOn),
Image: in.Image,
Detached: in.Detached,
Schema: in.Schema,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/pipeline_join.go | app/store/database/pipeline_join.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"database/sql"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
// pipelineExecutionjoin struct represents a joined row between pipelines and executions.
type pipelineExecutionJoin struct {
*types.Pipeline
ID sql.NullInt64 `db:"execution_id"`
PipelineID sql.NullInt64 `db:"execution_pipeline_id"`
Action sql.NullString `db:"execution_action"`
Message sql.NullString `db:"execution_message"`
After sql.NullString `db:"execution_after"`
RepoID sql.NullInt64 `db:"execution_repo_id"`
Trigger sql.NullString `db:"execution_trigger"`
Number sql.NullInt64 `db:"execution_number"`
Status sql.NullString `db:"execution_status"`
Error sql.NullString `db:"execution_error"`
Link sql.NullString `db:"execution_link"`
Timestamp sql.NullInt64 `db:"execution_timestamp"`
Title sql.NullString `db:"execution_title"`
Fork sql.NullString `db:"execution_source_repo"`
Source sql.NullString `db:"execution_source"`
Target sql.NullString `db:"execution_target"`
Author sql.NullString `db:"execution_author"`
AuthorName sql.NullString `db:"execution_author_name"`
AuthorEmail sql.NullString `db:"execution_author_email"`
AuthorAvatar sql.NullString `db:"execution_author_avatar"`
Started sql.NullInt64 `db:"execution_started"`
Finished sql.NullInt64 `db:"execution_finished"`
Created sql.NullInt64 `db:"execution_created"`
Updated sql.NullInt64 `db:"execution_updated"`
}
func convert(rows []*pipelineExecutionJoin) []*types.Pipeline {
pipelines := []*types.Pipeline{}
for _, k := range rows {
pipeline := convertPipelineJoin(k)
pipelines = append(pipelines, pipeline)
}
return pipelines
}
func convertPipelineJoin(join *pipelineExecutionJoin) *types.Pipeline {
ret := join.Pipeline
if !join.ID.Valid {
return ret
}
ret.Execution = &types.Execution{
ID: join.ID.Int64,
PipelineID: join.PipelineID.Int64,
RepoID: join.RepoID.Int64,
Action: enum.TriggerAction(join.Action.String),
Trigger: join.Trigger.String,
Number: join.Number.Int64,
After: join.After.String,
Message: join.Message.String,
Status: enum.ParseCIStatus(join.Status.String),
Error: join.Error.String,
Link: join.Link.String,
Timestamp: join.Timestamp.Int64,
Title: join.Title.String,
Fork: join.Fork.String,
Source: join.Source.String,
Target: join.Target.String,
Author: join.Author.String,
AuthorName: join.AuthorName.String,
AuthorEmail: join.AuthorEmail.String,
AuthorAvatar: join.AuthorAvatar.String,
Started: join.Started.Int64,
Finished: join.Finished.Int64,
Created: join.Created.Int64,
Updated: join.Updated.Int64,
}
return ret
}
type pipelineRepoJoin struct {
*types.Pipeline
RepoID sql.NullInt64 `db:"repo_id"`
RepoUID sql.NullString `db:"repo_uid"`
}
func convertPipelineRepoJoins(rows []*pipelineRepoJoin) []*types.Pipeline {
pipelines := []*types.Pipeline{}
for _, k := range rows {
pipeline := convertPipelineRepoJoin(k)
pipelines = append(pipelines, pipeline)
}
return pipelines
}
func convertPipelineRepoJoin(join *pipelineRepoJoin) *types.Pipeline {
ret := join.Pipeline
if !join.RepoID.Valid {
return ret
}
ret.RepoUID = join.RepoUID.String
return ret
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/label_value.go | app/store/database/label_value.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
const (
MaxLabelValueSize = 10e6
labelValueColumns = `
label_value_label_id
,label_value_value
,label_value_color
,label_value_created
,label_value_updated
,label_value_created_by
,label_value_updated_by`
labelValueSelectBase = `SELECT label_value_id, ` + labelValueColumns + ` FROM label_values`
)
type labelValue struct {
ID int64 `db:"label_value_id"`
LabelID int64 `db:"label_value_label_id"`
Value string `db:"label_value_value"`
Color enum.LabelColor `db:"label_value_color"`
Created int64 `db:"label_value_created"`
Updated int64 `db:"label_value_updated"`
CreatedBy int64 `db:"label_value_created_by"`
UpdatedBy int64 `db:"label_value_updated_by"`
}
type labelValueInfo struct {
ValueID null.Int `db:"label_value_id"`
LabelID null.Int `db:"label_value_label_id"`
Value null.String `db:"label_value_value"`
ValueColor null.String `db:"label_value_color"`
}
type labelValueStore struct {
db *sqlx.DB
}
func NewLabelValueStore(
db *sqlx.DB,
) store.LabelValueStore {
return &labelValueStore{
db: db,
}
}
var _ store.LabelValueStore = (*labelValueStore)(nil)
func (s *labelValueStore) Define(ctx context.Context, lblVal *types.LabelValue) error {
const sqlQuery = `
INSERT INTO label_values (` + labelValueColumns + `)` + `
values (
:label_value_label_id
,:label_value_value
,:label_value_color
,:label_value_created
,:label_value_updated
,:label_value_created_by
,:label_value_updated_by
)
RETURNING label_value_id`
db := dbtx.GetAccessor(ctx, s.db)
query, args, err := db.BindNamed(sqlQuery, mapInternalLabelValue(lblVal))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind query")
}
if err = db.QueryRowContext(ctx, query, args...).Scan(&lblVal.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to create label value")
}
return nil
}
func (s *labelValueStore) Update(ctx context.Context, lblVal *types.LabelValue) error {
const sqlQuery = `
UPDATE label_values SET
label_value_value = :label_value_value
,label_value_color = :label_value_color
,label_value_updated = :label_value_updated
,label_value_updated_by = :label_value_updated_by
WHERE label_value_id = :label_value_id`
db := dbtx.GetAccessor(ctx, s.db)
query, args, err := db.BindNamed(sqlQuery, mapInternalLabelValue(lblVal))
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind query")
}
if _, err := db.ExecContext(ctx, query, args...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update label value")
}
return nil
}
func (s *labelValueStore) Delete(
ctx context.Context,
labelID int64,
value string,
) error {
const sqlQuery = `
DELETE FROM label_values
WHERE label_value_label_id = $1 AND LOWER(label_value_value) = LOWER($2)`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sqlQuery, labelID, value); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to delete label")
}
return nil
}
func (s *labelValueStore) DeleteMany(
ctx context.Context,
labelID int64,
values []string,
) error {
stmt := database.Builder.
Delete("label_values").
Where("label_value_label_id = ?", labelID).
Where(squirrel.Eq{"label_value_value": values})
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to delete label")
}
return nil
}
// Count returns a count of label values for a specified label.
func (s *labelValueStore) Count(
ctx context.Context,
labelID int64,
opts types.ListQueryFilter,
) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("label_values")
stmt = stmt.Where("label_value_label_id = ?", labelID)
if opts.Query != "" {
stmt = stmt.Where(PartialMatch("label_value_value", opts.Query))
}
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
// List returns a list of label values for a specified label.
func (s *labelValueStore) List(
ctx context.Context,
labelID int64,
opts types.ListQueryFilter,
) ([]*types.LabelValue, error) {
stmt := database.Builder.
Select(`label_value_id, ` + labelValueColumns).
From("label_values")
stmt = stmt.Where("label_value_label_id = ?", labelID)
stmt = stmt.Limit(database.Limit(opts.Size))
stmt = stmt.Offset(database.Offset(opts.Page, opts.Size))
if opts.Query != "" {
stmt = stmt.Where(PartialMatch("label_value_value", opts.Query))
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var dst []*labelValue
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Fail to list labels")
}
return mapSliceLabelValue(dst), nil
}
func (s *labelValueStore) ListInfosByLabelIDs(
ctx context.Context,
labelIDs []int64,
) (map[int64][]*types.LabelValueInfo, error) {
stmt := database.Builder.
Select(`
label_value_id
,label_value_label_id
,label_value_value
,label_value_color
`).
From("label_values").
Where(squirrel.Eq{"label_value_label_id": labelIDs}).
OrderBy("label_value_value")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var dst []*labelValueInfo
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Fail to list labels")
}
valueInfos := mapLabelValuInfos(dst)
labelValueMap := make(map[int64][]*types.LabelValueInfo)
for _, info := range valueInfos {
labelValueMap[*info.LabelID] = append(labelValueMap[*info.LabelID], info)
}
return labelValueMap, nil
}
func (s *labelValueStore) FindByLabelID(
ctx context.Context,
labelID int64,
value string,
) (*types.LabelValue, error) {
const sqlQuery = labelValueSelectBase + `
WHERE label_value_label_id = $1 AND LOWER(label_value_value) = LOWER($2)`
db := dbtx.GetAccessor(ctx, s.db)
var dst labelValue
if err := db.GetContext(ctx, &dst, sqlQuery, labelID, value); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find label")
}
return mapLabelValue(&dst), nil
}
func (s *labelValueStore) FindByID(ctx context.Context, id int64) (*types.LabelValue, error) {
const sqlQuery = labelValueSelectBase + `
WHERE label_value_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
var dst labelValue
if err := db.GetContext(ctx, &dst, sqlQuery, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find label")
}
return mapLabelValue(&dst), nil
}
func mapLabelValue(lbl *labelValue) *types.LabelValue {
return &types.LabelValue{
ID: lbl.ID,
LabelID: lbl.LabelID,
Value: lbl.Value,
Color: lbl.Color,
Created: lbl.Created,
Updated: lbl.Updated,
CreatedBy: lbl.CreatedBy,
UpdatedBy: lbl.UpdatedBy,
}
}
func mapSliceLabelValue(dbLabelValues []*labelValue) []*types.LabelValue {
result := make([]*types.LabelValue, len(dbLabelValues))
for i, lbl := range dbLabelValues {
result[i] = mapLabelValue(lbl)
}
return result
}
func mapInternalLabelValue(lblVal *types.LabelValue) *labelValue {
return &labelValue{
ID: lblVal.ID,
LabelID: lblVal.LabelID,
Value: lblVal.Value,
Color: lblVal.Color,
Created: lblVal.Created,
Updated: lblVal.Updated,
CreatedBy: lblVal.CreatedBy,
UpdatedBy: lblVal.UpdatedBy,
}
}
func mapLabeValuelInfo(internal *labelValueInfo) *types.LabelValueInfo {
if !internal.ValueID.Valid {
return nil
}
return &types.LabelValueInfo{
ID: internal.ValueID.Ptr(),
LabelID: internal.LabelID.Ptr(),
Value: internal.Value.Ptr(),
Color: internal.ValueColor.Ptr(),
}
}
func mapLabelValuInfos(
dbLabels []*labelValueInfo,
) []*types.LabelValueInfo {
result := make([]*types.LabelValueInfo, len(dbLabels))
for i, lbl := range dbLabels {
result[i] = mapLabeValuelInfo(lbl)
}
return result
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/public_access.go | app/store/database/public_access.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types/enum"
"github.com/jmoiron/sqlx"
)
var _ store.PublicAccessStore = (*PublicAccessStore)(nil)
// NewPublicAccessStore returns a new PublicAccessStore.
func NewPublicAccessStore(db *sqlx.DB) *PublicAccessStore {
return &PublicAccessStore{
db: db,
}
}
// PublicAccessStore implements store.PublicAccessStore backed by a relational database.
type PublicAccessStore struct {
db *sqlx.DB
}
func (p *PublicAccessStore) Find(
ctx context.Context,
typ enum.PublicResourceType,
id int64,
) (bool, error) {
var sqlQuery string
switch typ {
case enum.PublicResourceTypeRepo:
sqlQuery = `SELECT EXISTS(SELECT * FROM public_access_repo WHERE public_access_repo_id = $1)`
case enum.PublicResourceTypeSpace:
sqlQuery = `SELECT EXISTS(SELECT * FROM public_access_space WHERE public_access_space_id = $1)`
case enum.PublicResourceTypeRegistry:
sqlQuery = `SELECT EXISTS(SELECT * FROM public_access_registry WHERE public_access_registry_id = $1)`
default:
return false, fmt.Errorf("public resource type %q is not supported", typ)
}
var exists bool
db := dbtx.GetAccessor(ctx, p.db)
if err := db.QueryRowContext(ctx, sqlQuery, id).Scan(&exists); err != nil {
return false, database.ProcessSQLErrorf(ctx, err, "Select query failed")
}
return exists, nil
}
func (p *PublicAccessStore) Create(
ctx context.Context,
typ enum.PublicResourceType,
id int64,
) error {
var sqlQuery string
switch typ {
case enum.PublicResourceTypeRepo:
sqlQuery = `INSERT INTO public_access_repo(public_access_repo_id) VALUES($1)`
case enum.PublicResourceTypeSpace:
sqlQuery = `INSERT INTO public_access_space(public_access_space_id) VALUES($1)`
case enum.PublicResourceTypeRegistry:
sqlQuery = `INSERT INTO public_access_registry(public_access_registry_id) VALUES($1)`
default:
return fmt.Errorf("public resource type %q is not supported", typ)
}
db := dbtx.GetAccessor(ctx, p.db)
if _, err := db.ExecContext(ctx, sqlQuery, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert query failed")
}
return nil
}
func (p *PublicAccessStore) Delete(
ctx context.Context,
typ enum.PublicResourceType,
id int64,
) error {
var sqlQuery string
switch typ {
case enum.PublicResourceTypeRepo:
sqlQuery = `DELETE FROM public_access_repo WHERE public_access_repo_id = $1`
case enum.PublicResourceTypeSpace:
sqlQuery = `DELETE FROM public_access_space WHERE public_access_space_id = $1`
case enum.PublicResourceTypeRegistry:
sqlQuery = `DELETE FROM public_access_registry WHERE public_access_registry_id = $1`
default:
return fmt.Errorf("public resource type %q is not supported", typ)
}
db := dbtx.GetAccessor(ctx, p.db)
if _, err := db.ExecContext(ctx, sqlQuery, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "the delete query failed")
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/rule.go | app/store/database/rule.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/harness/gitness/app/store"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/Masterminds/squirrel"
"github.com/guregu/null"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/rs/zerolog/log"
)
var _ store.RuleStore = (*RuleStore)(nil)
// NewRuleStore returns a new RuleStore.
func NewRuleStore(
db *sqlx.DB,
pCache store.PrincipalInfoCache,
) *RuleStore {
return &RuleStore{
pCache: pCache,
db: db,
}
}
// RuleStore implements a store.RuleStore backed by a relational database.
type RuleStore struct {
db *sqlx.DB
pCache store.PrincipalInfoCache
}
type rule struct {
ID int64 `db:"rule_id"`
Version int64 `db:"rule_version"`
CreatedBy int64 `db:"rule_created_by"`
Created int64 `db:"rule_created"`
Updated int64 `db:"rule_updated"`
SpaceID null.Int `db:"rule_space_id"`
RepoID null.Int `db:"rule_repo_id"`
Identifier string `db:"rule_uid"`
Description string `db:"rule_description"`
Type enum.RuleType `db:"rule_type"`
State enum.RuleState `db:"rule_state"`
Pattern string `db:"rule_pattern"`
RepoTarget string `db:"rule_repo_target"`
Definition string `db:"rule_definition"`
Scope int64 `db:"rule_scope"`
}
const (
ruleColumns = `
rule_id
,rule_version
,rule_created_by
,rule_created
,rule_updated
,rule_space_id
,rule_repo_id
,rule_uid
,rule_description
,rule_type
,rule_state
,rule_pattern
,rule_repo_target
,rule_definition
,rule_scope`
ruleSelectBase = `
SELECT` + ruleColumns + `
FROM rules`
)
// Find finds the rule by id.
func (s *RuleStore) Find(ctx context.Context, id int64) (*types.Rule, error) {
const sqlQuery = ruleSelectBase + `
WHERE rule_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := &rule{}
if err := db.GetContext(ctx, dst, sqlQuery, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find rule")
}
r := s.mapToRule(ctx, dst)
return &r, nil
}
func (s *RuleStore) FindByIdentifier(
ctx context.Context,
parentType enum.RuleParent,
parentID int64,
identifier string,
) (*types.Rule, error) {
stmt := database.Builder.
Select(ruleColumns).
From("rules").
Where("LOWER(rule_uid) = ?", strings.ToLower(identifier))
switch parentType {
case enum.RuleParentRepo:
stmt = stmt.Where("rule_repo_id = ?", parentID)
case enum.RuleParentSpace:
stmt = stmt.Where("rule_space_id = ?", parentID)
default:
return nil, fmt.Errorf("rule parent type '%s' is not supported", parentType)
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert find rule by Identifier to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
dst := &rule{}
if err = db.GetContext(ctx, dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing find rule by identifier query")
}
r := s.mapToRule(ctx, dst)
return &r, nil
}
// Create creates a new protection rule.
func (s *RuleStore) Create(ctx context.Context, rule *types.Rule) error {
const sqlQuery = `
INSERT INTO rules (
rule_version
,rule_created_by
,rule_created
,rule_updated
,rule_space_id
,rule_repo_id
,rule_uid
,rule_description
,rule_type
,rule_state
,rule_pattern
,rule_repo_target
,rule_definition
,rule_scope
) values (
:rule_version
,:rule_created_by
,:rule_created
,:rule_updated
,:rule_space_id
,:rule_repo_id
,:rule_uid
,:rule_description
,:rule_type
,:rule_state
,:rule_pattern
,:rule_repo_target
,:rule_definition
,:rule_scope
) RETURNING rule_id`
db := dbtx.GetAccessor(ctx, s.db)
dbRule := mapToInternalRule(rule)
query, arg, err := db.BindNamed(sqlQuery, &dbRule)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind rule object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&dbRule.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Insert rule query failed")
}
r := s.mapToRule(ctx, &dbRule)
*rule = r
return nil
}
// Update updates the protection rule details.
func (s *RuleStore) Update(ctx context.Context, rule *types.Rule) error {
const sqlQuery = `
UPDATE rules
SET
rule_version = :rule_version
,rule_updated = :rule_updated
,rule_uid = :rule_uid
,rule_description = :rule_description
,rule_state = :rule_state
,rule_pattern = :rule_pattern
,rule_repo_target = :rule_repo_target
,rule_definition = :rule_definition
WHERE rule_id = :rule_id AND rule_version = :rule_version - 1`
dbRule := mapToInternalRule(rule)
dbRule.Version++
dbRule.Updated = time.Now().UnixMilli()
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(sqlQuery, dbRule)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind rule object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update rule")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rule rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
rule.Version = dbRule.Version
rule.Updated = dbRule.Updated
return nil
}
// Delete the protection rule.
func (s *RuleStore) Delete(ctx context.Context, id int64) error {
const sqlQuery = `
DELETE FROM rules
WHERE rule_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, sqlQuery, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "the delete rule query failed")
}
return nil
}
// Count returns count of protection rules matching the provided criteria.
func (s *RuleStore) Count(
ctx context.Context,
parents []types.RuleParentInfo,
filter *types.RuleFilter,
) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("rules")
err := selectRuleParents(parents, &stmt)
if err != nil {
return 0, fmt.Errorf("failed to select rule parents: %w", err)
}
stmt = s.applyFilter(stmt, filter)
sql, args, err := stmt.ToSql()
if err != nil {
return 0, fmt.Errorf("failed to convert count rules query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count rules query")
}
return count, nil
}
// List returns a list of protection rules of a repository or a space.
func (s *RuleStore) List(
ctx context.Context,
parents []types.RuleParentInfo,
filter *types.RuleFilter,
) ([]types.Rule, error) {
stmt := database.Builder.
Select(ruleColumns).
From("rules")
err := selectRuleParents(parents, &stmt)
if err != nil {
return nil, fmt.Errorf("failed to select rule parents: %w", err)
}
stmt = s.applyFilter(stmt, filter)
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
order := filter.Order
if order == enum.OrderDefault {
order = enum.OrderAsc
}
switch filter.Sort {
case enum.RuleSortCreated:
stmt = stmt.OrderBy("rule_created " + order.String())
case enum.RuleSortUpdated:
stmt = stmt.OrderBy("rule_updated " + order.String())
// TODO [CODE-1363]: remove after identifier migration.
case enum.RuleSortUID, enum.RuleSortIdentifier:
stmt = stmt.OrderBy("LOWER(rule_uid) " + order.String())
}
sql, args, err := stmt.ToSql()
if err != nil {
return nil, fmt.Errorf("failed to convert query to sql: %w", err)
}
db := dbtx.GetAccessor(ctx, s.db)
dst := make([]rule, 0)
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return s.mapToRules(ctx, dst), nil
}
type ruleInfo struct {
SpacePath string `db:"space_path"`
RepoPath string `db:"repo_path"`
ID int64 `db:"rule_id"`
Identifier string `db:"rule_uid"`
Type enum.RuleType `db:"rule_type"`
State enum.RuleState `db:"rule_state"`
Pattern string `db:"rule_pattern"`
RepoTarget string `db:"rule_repo_target"`
Definition string `db:"rule_definition"`
}
const listRepoRulesQuery = `
WITH RECURSIVE
repo_info(repo_id, repo_uid, repo_space_id) AS (
SELECT repo_id, repo_uid, repo_parent_id
FROM repositories
WHERE repo_id = $1
),
space_parents(space_id, space_uid, space_parent_id) AS (
SELECT space_id, space_uid, space_parent_id
FROM spaces
INNER JOIN repo_info ON repo_info.repo_space_id = spaces.space_id
UNION ALL
SELECT spaces.space_id, spaces.space_uid, spaces.space_parent_id
FROM spaces
INNER JOIN space_parents ON space_parents.space_parent_id = spaces.space_id
),
spaces_with_path(space_id, space_parent_id, space_uid, space_full_path) AS (
SELECT space_id, space_parent_id, space_uid, space_uid
FROM space_parents
WHERE space_parent_id IS NULL
UNION ALL
SELECT
space_parents.space_id,
space_parents.space_parent_id,
space_parents.space_uid,
spaces_with_path.space_full_path || '/' || space_parents.space_uid
FROM space_parents
INNER JOIN spaces_with_path ON spaces_with_path.space_id = space_parents.space_parent_id
)
SELECT
space_full_path AS "space_path"
,'' as "repo_path"
,rule_id
,rule_uid
,rule_type
,rule_state
,rule_pattern
,rule_repo_target
,rule_definition
FROM spaces_with_path
INNER JOIN rules ON rules.rule_space_id = spaces_with_path.space_id
WHERE rule_state IN ('active', 'monitor') %s
UNION ALL
SELECT
'' as "space_path"
,space_full_path || '/' || repo_info.repo_uid AS "repo_path"
,rule_id
,rule_uid
,rule_type
,rule_state
,rule_pattern
,rule_repo_target
,rule_definition
FROM rules
INNER JOIN repo_info ON repo_info.repo_id = rules.rule_repo_id
INNER JOIN spaces_with_path ON spaces_with_path.space_id = repo_info.repo_space_id
WHERE rule_state IN ('active', 'monitor') %s`
var listRepoRulesQueryAll = fmt.Sprintf(listRepoRulesQuery, "", "")
var listRepoRulesQueryTypesPg = fmt.Sprintf(listRepoRulesQuery, "AND rule_type = ANY($2)", "AND rule_type = ANY($2)")
// ListAllRepoRules returns a list of all protection rules that can be applied on a repository.
// This includes the rules defined directly on the repository and all those defined on the parent spaces.
func (s *RuleStore) ListAllRepoRules(
ctx context.Context,
repoID int64,
ruleTypes ...enum.RuleType,
) ([]types.RuleInfoInternal, error) {
useRuleTypes := len(ruleTypes) > 0
usingPostgres := s.db.DriverName() == PostgresDriverName
query := listRepoRulesQueryAll
if useRuleTypes && usingPostgres {
query = listRepoRulesQueryTypesPg
}
db := dbtx.GetAccessor(ctx, s.db)
var err error
result := make([]ruleInfo, 0)
if useRuleTypes {
if usingPostgres {
err = db.SelectContext(ctx, &result, query, repoID, pq.Array(ruleTypes))
} else {
ruleTypeQuery := ruleTypeQuery(ruleTypes...)
query = fmt.Sprintf(listRepoRulesQuery, ruleTypeQuery, ruleTypeQuery)
err = db.SelectContext(ctx, &result, query, repoID)
}
} else {
err = db.SelectContext(ctx, &result, query, repoID)
}
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return s.mapToRuleInfos(result), nil
}
func (s *RuleStore) UpdateParentSpace(
ctx context.Context,
srcParentSpaceID int64,
targetParentSpaceID int64,
) (int64, error) {
stmt := database.Builder.Update("rules").
Set("rule_space_id", targetParentSpaceID).
Where("rule_space_id = ?", srcParentSpaceID)
db := dbtx.GetAccessor(ctx, s.db)
query, args, err := stmt.ToSql()
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to bind query")
}
result, err := db.ExecContext(ctx, query, args...)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to update rule")
}
count, err := result.RowsAffected()
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "failed to get number of updated rows")
}
return count, nil
}
func ruleTypeQuery(ruleTypes ...enum.RuleType) string {
var b strings.Builder
b.WriteString(`AND rule_type IN (`)
for i, rt := range ruleTypes {
if i > 0 {
b.WriteString(`,`)
}
b.WriteByte('\'')
b.WriteString(string(rt))
b.WriteByte('\'')
}
b.WriteString(`)`)
return b.String()
}
func (*RuleStore) applyFilter(
stmt squirrel.SelectBuilder,
filter *types.RuleFilter,
) squirrel.SelectBuilder {
if len(filter.States) == 1 {
stmt = stmt.Where("rule_state = ?", filter.States[0])
} else if len(filter.States) > 1 {
stmt = stmt.Where(squirrel.Eq{"rule_state": filter.States})
}
if len(filter.Types) == 1 {
stmt = stmt.Where("rule_type = ?", filter.Types[0])
} else if len(filter.Types) > 1 {
stmt = stmt.Where(squirrel.Eq{"rule_type": filter.Types})
}
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("rule_uid", filter.Query))
}
return stmt
}
func (s *RuleStore) mapToRule(
ctx context.Context,
in *rule,
) types.Rule {
r := types.Rule{
ID: in.ID,
Version: in.Version,
CreatedBy: in.CreatedBy,
Created: in.Created,
Updated: in.Updated,
SpaceID: in.SpaceID.Ptr(),
RepoID: in.RepoID.Ptr(),
Identifier: in.Identifier,
Description: in.Description,
Type: in.Type,
State: in.State,
Pattern: json.RawMessage(in.Pattern),
RepoTarget: json.RawMessage(in.RepoTarget),
Definition: json.RawMessage(in.Definition),
Scope: in.Scope,
}
createdBy, err := s.pCache.Get(ctx, in.CreatedBy)
if err != nil {
log.Ctx(ctx).Err(err).Msg("failed to load rule creator")
}
if createdBy != nil {
r.CreatedByInfo = *createdBy
}
return r
}
func (s *RuleStore) mapToRules(
ctx context.Context,
rules []rule,
) []types.Rule {
res := make([]types.Rule, len(rules))
for i := range rules {
res[i] = s.mapToRule(ctx, &rules[i])
}
return res
}
func mapToInternalRule(in *types.Rule) rule {
return rule{
ID: in.ID,
Version: in.Version,
CreatedBy: in.CreatedBy,
Created: in.Created,
Updated: in.Updated,
SpaceID: null.IntFromPtr(in.SpaceID),
RepoID: null.IntFromPtr(in.RepoID),
Identifier: in.Identifier,
Description: in.Description,
Type: in.Type,
State: in.State,
Pattern: string(in.Pattern),
RepoTarget: string(in.RepoTarget),
Definition: string(in.Definition),
Scope: in.Scope,
}
}
func (*RuleStore) mapToRuleInfo(in *ruleInfo) types.RuleInfoInternal {
return types.RuleInfoInternal{
RuleInfo: types.RuleInfo{
SpacePath: in.SpacePath,
RepoPath: in.RepoPath,
ID: in.ID,
Identifier: in.Identifier,
Type: in.Type,
State: in.State,
},
Pattern: json.RawMessage(in.Pattern),
RepoTarget: json.RawMessage(in.RepoTarget),
Definition: json.RawMessage(in.Definition),
}
}
func (s *RuleStore) mapToRuleInfos(
ruleInfos []ruleInfo,
) []types.RuleInfoInternal {
res := make([]types.RuleInfoInternal, len(ruleInfos))
for i := range ruleInfos {
res[i] = s.mapToRuleInfo(&ruleInfos[i])
}
return res
}
func selectRuleParents(
parents []types.RuleParentInfo,
stmt *squirrel.SelectBuilder,
) error {
var parentSelector squirrel.Or
for _, parent := range parents {
switch parent.Type {
case enum.RuleParentRepo:
parentSelector = append(parentSelector, squirrel.Eq{
"rule_repo_id": parent.ID,
})
case enum.RuleParentSpace:
parentSelector = append(parentSelector, squirrel.Eq{
"rule_space_id": parent.ID,
})
default:
return fmt.Errorf("rule parent type '%s' is not supported", parent.Type)
}
}
*stmt = stmt.Where(parentSelector)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/template.go | app/store/database/template.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
var _ store.TemplateStore = (*templateStore)(nil)
const (
templateQueryBase = `
SELECT` + templateColumns + `
FROM templates`
templateColumns = `
template_id,
template_description,
template_type,
template_space_id,
template_uid,
template_data,
template_created,
template_updated,
template_version
`
)
// NewTemplateStore returns a new TemplateStore.
func NewTemplateStore(db *sqlx.DB) store.TemplateStore {
return &templateStore{
db: db,
}
}
type templateStore struct {
db *sqlx.DB
}
// Find returns a template given a template ID.
func (s *templateStore) Find(ctx context.Context, id int64) (*types.Template, error) {
const findQueryStmt = templateQueryBase + `
WHERE template_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(types.Template)
if err := db.GetContext(ctx, dst, findQueryStmt, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find template")
}
return dst, nil
}
// FindByIdentifierAndType returns a template in a space with a given identifier and a given type.
func (s *templateStore) FindByIdentifierAndType(
ctx context.Context,
spaceID int64,
identifier string,
resolverType enum.ResolverType) (*types.Template, error) {
const findQueryStmt = templateQueryBase + `
WHERE template_space_id = $1 AND template_uid = $2 AND template_type = $3`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(types.Template)
if err := db.GetContext(
ctx,
dst,
findQueryStmt,
spaceID,
identifier,
resolverType.String(),
); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find template")
}
return dst, nil
}
// Create creates a template.
func (s *templateStore) Create(ctx context.Context, template *types.Template) error {
const templateInsertStmt = `
INSERT INTO templates (
template_description,
template_space_id,
template_uid,
template_data,
template_type,
template_created,
template_updated,
template_version
) VALUES (
:template_description,
:template_space_id,
:template_uid,
:template_data,
:template_type,
:template_created,
:template_updated,
:template_version
) RETURNING template_id`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(templateInsertStmt, template)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind template object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&template.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "template query failed")
}
return nil
}
func (s *templateStore) Update(ctx context.Context, p *types.Template) error {
const templateUpdateStmt = `
UPDATE templates
SET
template_description = :template_description,
template_uid = :template_uid,
template_data = :template_data,
template_type = :template_type,
template_updated = :template_updated,
template_version = :template_version
WHERE template_id = :template_id AND template_version = :template_version - 1`
updatedAt := time.Now()
template := *p
template.Version++
template.Updated = updatedAt.UnixMilli()
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(templateUpdateStmt, template)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind template object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update template")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
p.Version = template.Version
p.Updated = template.Updated
return nil
}
// UpdateOptLock updates the pipeline using the optimistic locking mechanism.
func (s *templateStore) UpdateOptLock(ctx context.Context,
template *types.Template,
mutateFn func(template *types.Template) error,
) (*types.Template, error) {
for {
dup := *template
err := mutateFn(&dup)
if err != nil {
return nil, err
}
err = s.Update(ctx, &dup)
if err == nil {
return &dup, nil
}
if !errors.Is(err, gitness_store.ErrVersionConflict) {
return nil, err
}
template, err = s.Find(ctx, template.ID)
if err != nil {
return nil, err
}
}
}
// List lists all the templates present in a space.
func (s *templateStore) List(
ctx context.Context,
parentID int64,
filter types.ListQueryFilter,
) ([]*types.Template, error) {
stmt := database.Builder.
Select(templateColumns).
From("templates").
Where("template_space_id = ?", fmt.Sprint(parentID))
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("template_uid", filter.Query))
}
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*types.Template{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return dst, nil
}
// Delete deletes a template given a template ID.
func (s *templateStore) Delete(ctx context.Context, id int64) error {
const templateDeleteStmt = `
DELETE FROM templates
WHERE template_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, templateDeleteStmt, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Could not delete template")
}
return nil
}
// DeleteByIdentifierAndType deletes a template with a given identifier in a space.
func (s *templateStore) DeleteByIdentifierAndType(
ctx context.Context,
spaceID int64,
identifier string,
resolverType enum.ResolverType,
) error {
const templateDeleteStmt = `
DELETE FROM templates
WHERE template_space_id = $1 AND template_uid = $2 AND template_type = $3`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, templateDeleteStmt, spaceID, identifier, resolverType.String()); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Could not delete template")
}
return nil
}
// Count of templates in a space.
func (s *templateStore) Count(ctx context.Context, parentID int64, filter types.ListQueryFilter) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("templates").
Where("template_space_id = ?", parentID)
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("template_uid", filter.Query))
}
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/plugin.go | app/store/database/plugin.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
var _ store.PluginStore = (*pluginStore)(nil)
const (
pluginColumns = `
plugin_uid
,plugin_description
,plugin_type
,plugin_version
,plugin_logo
,plugin_spec
`
)
// NewPluginStore returns a new PluginStore.
func NewPluginStore(db *sqlx.DB) store.PluginStore {
return &pluginStore{
db: db,
}
}
type pluginStore struct {
db *sqlx.DB
}
// Create creates a new entry in the plugin datastore.
func (s *pluginStore) Create(ctx context.Context, plugin *types.Plugin) error {
const pluginInsertStmt = `
INSERT INTO plugins (
plugin_uid
,plugin_description
,plugin_type
,plugin_version
,plugin_logo
,plugin_spec
) VALUES (
:plugin_uid
,:plugin_description
,:plugin_type
,:plugin_version
,:plugin_logo
,:plugin_spec
) RETURNING plugin_uid`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(pluginInsertStmt, plugin)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind plugin object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&plugin.Identifier); err != nil {
return database.ProcessSQLErrorf(ctx, err, "plugin query failed")
}
return nil
}
// Find finds a version of a plugin.
func (s *pluginStore) Find(ctx context.Context, name, version string) (*types.Plugin, error) {
const pluginFindStmt = `
SELECT` + pluginColumns +
`FROM plugins
WHERE plugin_uid = $1 AND plugin_version = $2
`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(types.Plugin)
if err := db.GetContext(ctx, dst, pluginFindStmt, name, version); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find pipeline")
}
return dst, nil
}
// List returns back the list of plugins along with their associated schemas.
func (s *pluginStore) List(
ctx context.Context,
filter types.ListQueryFilter,
) ([]*types.Plugin, error) {
stmt := database.Builder.
Select(pluginColumns).
From("plugins")
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("plugin_uid", filter.Query))
}
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*types.Plugin{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return dst, nil
}
// ListAll returns back the full list of plugins in the database.
func (s *pluginStore) ListAll(
ctx context.Context,
) ([]*types.Plugin, error) {
stmt := database.Builder.
Select(pluginColumns).
From("plugins")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*types.Plugin{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return dst, nil
}
// Count of plugins matching the filter criteria.
func (s *pluginStore) Count(ctx context.Context, filter types.ListQueryFilter) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("plugins")
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("plugin_uid", filter.Query))
}
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
// Update updates a plugin row.
func (s *pluginStore) Update(ctx context.Context, p *types.Plugin) error {
const pluginUpdateStmt = `
UPDATE plugins
SET
plugin_description = :plugin_description
,plugin_type = :plugin_type
,plugin_version = :plugin_version
,plugin_logo = :plugin_logo
,plugin_spec = :plugin_spec
WHERE plugin_uid = :plugin_uid`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(pluginUpdateStmt, p)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind plugin object")
}
_, err = db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update plugin")
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/pipeline.go | app/store/database/pipeline.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
var _ store.PipelineStore = (*pipelineStore)(nil)
const (
pipelineQueryBase = `
SELECT` +
pipelineColumns + `
FROM pipelines`
pipelineColumns = `
pipeline_id
,pipeline_description
,pipeline_created_by
,pipeline_disabled
,pipeline_uid
,pipeline_seq
,pipeline_repo_id
,pipeline_default_branch
,pipeline_config_path
,pipeline_created
,pipeline_updated
,pipeline_version
`
)
// NewPipelineStore returns a new PipelineStore.
func NewPipelineStore(db *sqlx.DB) store.PipelineStore {
return &pipelineStore{
db: db,
}
}
type pipelineStore struct {
db *sqlx.DB
}
// Find returns a pipeline given a pipeline ID.
func (s *pipelineStore) Find(ctx context.Context, id int64) (*types.Pipeline, error) {
const findQueryStmt = pipelineQueryBase + `
WHERE pipeline_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(types.Pipeline)
if err := db.GetContext(ctx, dst, findQueryStmt, id); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find pipeline")
}
return dst, nil
}
// FindByIdentifier returns a pipeline for a given repo with a given Identifier.
func (s *pipelineStore) FindByIdentifier(
ctx context.Context,
repoID int64,
identifier string,
) (*types.Pipeline, error) {
const findQueryStmt = pipelineQueryBase + `
WHERE pipeline_repo_id = $1 AND pipeline_uid = $2`
db := dbtx.GetAccessor(ctx, s.db)
dst := new(types.Pipeline)
if err := db.GetContext(ctx, dst, findQueryStmt, repoID, identifier); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find pipeline")
}
return dst, nil
}
// Create creates a pipeline.
func (s *pipelineStore) Create(ctx context.Context, pipeline *types.Pipeline) error {
const pipelineInsertStmt = `
INSERT INTO pipelines (
pipeline_description
,pipeline_uid
,pipeline_seq
,pipeline_repo_id
,pipeline_disabled
,pipeline_created_by
,pipeline_default_branch
,pipeline_config_path
,pipeline_created
,pipeline_updated
,pipeline_version
) VALUES (
:pipeline_description,
:pipeline_uid,
:pipeline_seq,
:pipeline_repo_id,
:pipeline_disabled,
:pipeline_created_by,
:pipeline_default_branch,
:pipeline_config_path,
:pipeline_created,
:pipeline_updated,
:pipeline_version
) RETURNING pipeline_id`
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(pipelineInsertStmt, pipeline)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind pipeline object")
}
if err = db.QueryRowContext(ctx, query, arg...).Scan(&pipeline.ID); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Pipeline query failed")
}
return nil
}
// Update updates a pipeline.
func (s *pipelineStore) Update(ctx context.Context, p *types.Pipeline) error {
const pipelineUpdateStmt = `
UPDATE pipelines
SET
pipeline_description = :pipeline_description,
pipeline_uid = :pipeline_uid,
pipeline_seq = :pipeline_seq,
pipeline_disabled = :pipeline_disabled,
pipeline_default_branch = :pipeline_default_branch,
pipeline_config_path = :pipeline_config_path,
pipeline_updated = :pipeline_updated,
pipeline_version = :pipeline_version
WHERE pipeline_id = :pipeline_id AND pipeline_version = :pipeline_version - 1`
updatedAt := time.Now()
pipeline := *p
pipeline.Version++
pipeline.Updated = updatedAt.UnixMilli()
db := dbtx.GetAccessor(ctx, s.db)
query, arg, err := db.BindNamed(pipelineUpdateStmt, pipeline)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to bind pipeline object")
}
result, err := db.ExecContext(ctx, query, arg...)
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to update pipeline")
}
count, err := result.RowsAffected()
if err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to get number of updated rows")
}
if count == 0 {
return gitness_store.ErrVersionConflict
}
p.Updated = pipeline.Updated
p.Version = pipeline.Version
return nil
}
// List lists all the pipelines for a repository.
func (s *pipelineStore) List(
ctx context.Context,
repoID int64,
filter *types.ListPipelinesFilter,
) ([]*types.Pipeline, error) {
stmt := database.Builder.
Select(pipelineColumns).
From("pipelines").
Where("pipeline_repo_id = ?", fmt.Sprint(repoID))
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("pipeline_uid", filter.Query))
}
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*types.Pipeline{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return dst, nil
}
// ListInSpace lists all the pipelines for a space.
func (s *pipelineStore) ListInSpace(
ctx context.Context,
spaceID int64,
filter types.ListPipelinesFilter,
) ([]*types.Pipeline, error) {
const pipelineWithRepoColumns = pipelineColumns + `
,repo_id
,repo_uid
`
stmt := database.Builder.
Select(pipelineWithRepoColumns).
From("pipelines").
InnerJoin("repositories ON pipeline_repo_id = repo_id").
Where("repo_parent_id = ?", spaceID)
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("pipeline_uid", filter.Query))
}
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*pipelineRepoJoin{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return convertPipelineRepoJoins(dst), nil
}
// ListLatest lists all the pipelines under a repository with information
// about the latest build if available.
func (s *pipelineStore) ListLatest(
ctx context.Context,
repoID int64,
filter *types.ListPipelinesFilter,
) ([]*types.Pipeline, error) {
const pipelineExecutionColumns = pipelineColumns + `
,executions.execution_id
,executions.execution_pipeline_id
,execution_repo_id
,execution_trigger
,execution_number
,execution_status
,execution_error
,execution_link
,execution_message
,execution_after
,execution_timestamp
,execution_title
,execution_author
,execution_author_name
,execution_author_email
,execution_author_avatar
,execution_source
,execution_target
,execution_source_repo
,execution_started
,execution_finished
,execution_created
,execution_updated
`
// Create a subquery to get max execution IDs for each unique execution pipeline ID.
subquery := database.Builder.
Select("execution_pipeline_id, MAX(execution_id) AS execution_id").
From("executions").
Where("execution_repo_id = ?").
GroupBy("execution_pipeline_id")
// Convert the subquery to SQL.
subquerySQL, _, err := subquery.ToSql()
if err != nil {
return nil, err
}
// Left join the previous table with executions and pipelines table.
stmt := database.Builder.
Select(pipelineExecutionColumns).
From("pipelines").
LeftJoin("("+subquerySQL+") AS max_executions ON pipelines.pipeline_id = max_executions.execution_pipeline_id").
LeftJoin("executions ON executions.execution_id = max_executions.execution_id").
Where("pipeline_repo_id = ?", fmt.Sprint(repoID))
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("pipeline_uid", filter.Query))
}
stmt = stmt.Limit(database.Limit(filter.Size))
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
dst := []*pipelineExecutionJoin{}
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed executing custom list query")
}
return convert(dst), nil
}
// UpdateOptLock updates the pipeline using the optimistic locking mechanism.
func (s *pipelineStore) UpdateOptLock(ctx context.Context,
pipeline *types.Pipeline,
mutateFn func(pipeline *types.Pipeline) error) (*types.Pipeline, error) {
for {
dup := *pipeline
err := mutateFn(&dup)
if err != nil {
return nil, err
}
err = s.Update(ctx, &dup)
if err == nil {
return &dup, nil
}
if !errors.Is(err, gitness_store.ErrVersionConflict) {
return nil, err
}
pipeline, err = s.Find(ctx, pipeline.ID)
if err != nil {
return nil, err
}
}
}
// Count of pipelines under a repo, if repoID is zero it will count all pipelines in the system.
func (s *pipelineStore) Count(
ctx context.Context,
repoID int64,
filter *types.ListPipelinesFilter,
) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("pipelines")
if repoID > 0 {
stmt = stmt.Where("pipeline_repo_id = ?", repoID)
}
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("pipeline_uid", filter.Query))
}
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
var count int64
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
// CountInSpace counts the number of pipelines in a space.
func (s *pipelineStore) CountInSpace(
ctx context.Context,
spaceID int64,
filter types.ListPipelinesFilter,
) (int64, error) {
stmt := database.Builder.
Select("count(*)").
From("pipelines").
InnerJoin("repositories ON pipeline_repo_id = repo_id").
Where("repo_parent_id = ?", spaceID)
if filter.Query != "" {
stmt = stmt.Where(PartialMatch("pipeline_uid", filter.Query))
}
var count int64
sql, args, err := stmt.ToSql()
if err != nil {
return 0, errors.Wrap(err, "Failed to convert query to sql")
}
db := dbtx.GetAccessor(ctx, s.db)
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
if err != nil {
return 0, database.ProcessSQLErrorf(ctx, err, "Failed executing count query")
}
return count, nil
}
// Delete deletes a pipeline given a pipeline ID.
func (s *pipelineStore) Delete(ctx context.Context, id int64) error {
const pipelineDeleteStmt = `
DELETE FROM pipelines
WHERE pipeline_id = $1`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, pipelineDeleteStmt, id); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Could not delete pipeline")
}
return nil
}
// DeleteByIdentifier deletes a pipeline with a given Identifier under a given repo.
func (s *pipelineStore) DeleteByIdentifier(ctx context.Context, repoID int64, identifier string) error {
const pipelineDeleteStmt = `
DELETE FROM pipelines
WHERE pipeline_repo_id = $1 AND pipeline_uid = $2`
db := dbtx.GetAccessor(ctx, s.db)
if _, err := db.ExecContext(ctx, pipelineDeleteStmt, repoID, identifier); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Could not delete pipeline")
}
return nil
}
// Increment increments the pipeline sequence number. It will keep retrying in case
// of optimistic lock errors.
func (s *pipelineStore) IncrementSeqNum(ctx context.Context, pipeline *types.Pipeline) (*types.Pipeline, error) {
for {
var err error
pipeline.Seq++
err = s.Update(ctx, pipeline)
if err == nil {
return pipeline, nil
} else if !errors.Is(err, gitness_store.ErrVersionConflict) {
return pipeline, errors.Wrap(err, "could not increment pipeline sequence number")
}
pipeline, err = s.Find(ctx, pipeline.ID)
if err != nil {
return nil, errors.Wrap(err, "could not increment pipeline sequence number")
}
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/store/database/infra_provisioned.go | app/store/database/infra_provisioned.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"context"
"fmt"
appstore "github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
const (
infraProvisionedIDColumn = `iprov_id`
infraProvisionedColumns = `
iprov_gitspace_id,
iprov_type,
iprov_infra_provider_resource_id,
iprov_space_id,
iprov_created,
iprov_updated,
iprov_response_metadata,
iprov_opentofu_params,
iprov_infra_status,
iprov_server_host_ip,
iprov_server_host_port,
iprov_proxy_host,
iprov_proxy_port,
iprov_gateway_host
`
infraProvisionedSelectColumns = infraProvisionedIDColumn + `,
` + infraProvisionedColumns
infraProvisionedTable = `infra_provisioned`
)
var _ appstore.InfraProvisionedStore = (*infraProvisionedStore)(nil)
type infraProvisionedStore struct {
db *sqlx.DB
}
type infraProvisioned struct {
ID int64 `db:"iprov_id"`
GitspaceInstanceID int64 `db:"iprov_gitspace_id"`
InfraProviderType enum.InfraProviderType `db:"iprov_type"`
InfraProviderResourceID int64 `db:"iprov_infra_provider_resource_id"`
SpaceID int64 `db:"iprov_space_id"`
Created int64 `db:"iprov_created"`
Updated int64 `db:"iprov_updated"`
ResponseMetadata *string `db:"iprov_response_metadata"`
InputParams string `db:"iprov_opentofu_params"`
InfraStatus enum.InfraStatus `db:"iprov_infra_status"`
ServerHostIP string `db:"iprov_server_host_ip"`
ServerHostPort string `db:"iprov_server_host_port"`
ProxyHost string `db:"iprov_proxy_host"`
ProxyPort int32 `db:"iprov_proxy_port"`
GatewayHost string `db:"iprov_gateway_host"`
}
type infraProvisionedGatewayView struct {
GitspaceInstanceIdentifier string `db:"iprov_gitspace_uid"`
SpaceID int64 `db:"iprov_space_id"`
ServerHostIP string `db:"iprov_server_host_ip"`
ServerHostPort string `db:"iprov_server_host_port"`
Infrastructure *string `db:"iprov_response_metadata"`
}
func NewInfraProvisionedStore(db *sqlx.DB) appstore.InfraProvisionedStore {
return &infraProvisionedStore{
db: db,
}
}
func (i infraProvisionedStore) Find(ctx context.Context, id int64) (*types.InfraProvisioned, error) {
stmt := database.Builder.
Select(infraProvisionedSelectColumns).
From(infraProvisionedTable).
Where(infraProvisionedIDColumn+" = ?", id) // nolint:goconst
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
entity := new(infraProvisioned)
db := dbtx.GetAccessor(ctx, i.db)
err = db.GetContext(ctx, entity, sql, args...)
if err != nil {
return nil, database.ProcessSQLErrorf(ctx, err, "Failed to find infraprovisioned for %d", id)
}
return entity.toDTO(), nil
}
func (i infraProvisionedStore) FindAllLatestByGateway(
ctx context.Context,
gatewayHost string,
) ([]*types.InfraProvisionedGatewayView, error) {
stmt := database.Builder.
Select(`gits_uid as iprov_gitspace_uid,
iprov_space_id,
iprov_server_host_ip,
iprov_server_host_port,
iprov_response_metadata`).
From(infraProvisionedTable).
Join(fmt.Sprintf("%s ON iprov_gitspace_id = gits_id", gitspaceInstanceTable)).
Where("iprov_gateway_host = ?", gatewayHost).
Where("iprov_infra_status = ?", enum.InfraStatusProvisioned).
OrderBy("iprov_created DESC")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
var entities []*infraProvisionedGatewayView
db := dbtx.GetAccessor(ctx, i.db)
err = db.SelectContext(ctx, &entities, sql, args...)
if err != nil {
return nil, database.ProcessSQLErrorf(
ctx, err, "Failed to find infraprovisioned for host %s", gatewayHost)
}
var result = make([]*types.InfraProvisionedGatewayView, len(entities))
for index, entity := range entities {
result[index] = &types.InfraProvisionedGatewayView{
GitspaceInstanceIdentifier: entity.GitspaceInstanceIdentifier,
SpaceID: entity.SpaceID,
ServerHostIP: entity.ServerHostIP,
ServerHostPort: entity.ServerHostPort,
Infrastructure: entity.Infrastructure,
}
}
return result, nil
}
func (i infraProvisionedStore) FindLatestByGitspaceInstanceID(
ctx context.Context,
gitspaceInstanceID int64,
) (*types.InfraProvisioned, error) {
stmt := database.Builder.
Select(infraProvisionedSelectColumns).
From(infraProvisionedTable).
Where("iprov_gitspace_id = ?", gitspaceInstanceID).
OrderBy("iprov_created DESC")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
entity := new(infraProvisioned)
db := dbtx.GetAccessor(ctx, i.db)
if err := db.GetContext(ctx, entity, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(
ctx, err, "Failed to find latestinfraprovisioned for instance %d", gitspaceInstanceID)
}
return entity.toDTO(), nil
}
func (i infraProvisionedStore) FindLatestByGitspaceInstanceIdentifier(
ctx context.Context,
spaceID int64,
gitspaceInstanceIdentifier string,
) (*types.InfraProvisioned, error) {
stmt := database.Builder.
Select(infraProvisionedSelectColumns).
From(infraProvisionedTable).
Join(fmt.Sprintf("%s ON iprov_gitspace_id = gits_id", gitspaceInstanceTable)).
Where("gits_uid = ?", gitspaceInstanceIdentifier).
Where("iprov_space_id = ?", spaceID).
OrderBy("iprov_created DESC")
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
entity := new(infraProvisioned)
db := dbtx.GetAccessor(ctx, i.db)
if err := db.GetContext(ctx, entity, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(
ctx, err, "Failed to find infraprovisioned for instance %s", gitspaceInstanceIdentifier)
}
return entity.toDTO(), nil
}
func (i infraProvisionedStore) FindStoppedInfraForGitspaceConfigIdentifierByState(
ctx context.Context,
gitspaceConfigIdentifier string,
state enum.GitspaceInstanceStateType,
) (*types.InfraProvisioned, error) {
gitsSubQuery := fmt.Sprintf(`
SELECT gits.gits_id
FROM %s gits
JOIN %s conf ON gits.gits_gitspace_config_id = conf.gconf_id
WHERE conf.gconf_uid = '%s' AND gits.gits_state = '%s'
ORDER BY gits.gits_created DESC
LIMIT 1`,
gitspaceInstanceTable, gitspaceConfigsTable, gitspaceConfigIdentifier, state)
// Build the main query
stmt := database.Builder.
Select(infraProvisionedSelectColumns).
From(infraProvisionedTable).
Where("iprov_infra_status = ?", enum.InfraStatusStopped).
Join(fmt.Sprintf("(%s) AS gits ON iprov_gitspace_id = gits.gits_id", gitsSubQuery))
sql, args, err := stmt.ToSql()
if err != nil {
return nil, errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
entity := new(infraProvisioned)
db := dbtx.GetAccessor(ctx, i.db)
if err := db.GetContext(ctx, entity, sql, args...); err != nil {
return nil, database.ProcessSQLErrorf(
ctx, err, "Failed to find infraprovisioned for config %s with state %s",
gitspaceConfigIdentifier, state)
}
return entity.toDTO(), nil
}
func (i infraProvisionedStore) Create(ctx context.Context, infraProvisioned *types.InfraProvisioned) error {
stmt := database.Builder.
Insert(infraProvisionedTable).
Columns(infraProvisionedColumns).
Values(
infraProvisioned.GitspaceInstanceID,
infraProvisioned.InfraProviderType,
infraProvisioned.InfraProviderResourceID,
infraProvisioned.SpaceID,
infraProvisioned.Created,
infraProvisioned.Updated,
infraProvisioned.ResponseMetadata,
infraProvisioned.InputParams,
infraProvisioned.InfraStatus,
infraProvisioned.ServerHostIP,
infraProvisioned.ServerHostPort,
infraProvisioned.ProxyHost,
infraProvisioned.ProxyPort,
infraProvisioned.GatewayHost,
).
Suffix(ReturningClause + infraProvisionedIDColumn)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, i.db)
if err = db.QueryRowContext(ctx, sql, args...).Scan(&infraProvisioned.ID); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "infraprovisioned create query failed for instance : %d",
infraProvisioned.GitspaceInstanceID)
}
return nil
}
func (i infraProvisionedStore) Delete(ctx context.Context, id int64) error {
stmt := database.Builder.
Delete(infraProvisionedTable).
Where(infraProvisionedIDColumn+" = ?", id)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, i.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(ctx, err, "Failed to delete infraprovisioned for %d", id)
}
return nil
}
func (i infraProvisionedStore) Update(ctx context.Context, infraProvisioned *types.InfraProvisioned) error {
stmt := database.Builder.
Update(infraProvisionedTable).
Set("iprov_response_metadata", infraProvisioned.ResponseMetadata).
Set("iprov_infra_status", infraProvisioned.InfraStatus).
Set("iprov_server_host_ip", infraProvisioned.ServerHostIP).
Set("iprov_server_host_port", infraProvisioned.ServerHostPort).
Set("iprov_opentofu_params", infraProvisioned.InputParams).
Set("iprov_updated", infraProvisioned.Updated).
Set("iprov_proxy_host", infraProvisioned.ProxyHost).
Set("iprov_proxy_port", infraProvisioned.ProxyPort).
Set("iprov_gateway_host", infraProvisioned.GatewayHost).
Where(infraProvisionedIDColumn+" = ?", infraProvisioned.ID)
sql, args, err := stmt.ToSql()
if err != nil {
return errors.Wrap(err, "Failed to convert squirrel builder to sql")
}
db := dbtx.GetAccessor(ctx, i.db)
if _, err := db.ExecContext(ctx, sql, args...); err != nil {
return database.ProcessSQLErrorf(
ctx, err, "Failed to update infra provisioned for instance %d", infraProvisioned.GitspaceInstanceID)
}
return nil
}
func (entity infraProvisioned) toDTO() *types.InfraProvisioned {
return &types.InfraProvisioned{
ID: entity.ID,
GitspaceInstanceID: entity.GitspaceInstanceID,
InfraProviderType: entity.InfraProviderType,
InfraProviderResourceID: entity.InfraProviderResourceID,
SpaceID: entity.SpaceID,
Created: entity.Created,
Updated: entity.Updated,
ResponseMetadata: entity.ResponseMetadata,
InputParams: entity.InputParams,
InfraStatus: entity.InfraStatus,
ServerHostIP: entity.ServerHostIP,
ServerHostPort: entity.ServerHostPort,
ProxyHost: entity.ProxyHost,
ProxyPort: entity.ProxyPort,
GatewayHost: entity.GatewayHost,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.