repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/events.go | app/services/webhook/events.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"errors"
"fmt"
"github.com/harness/gitness/events"
"github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
"go.uber.org/multierr"
)
func generateTriggerIDFromEventID(eventID string) string {
return fmt.Sprintf("event-%s", eventID)
}
// triggerForEventWithRepo triggers all webhooks for the given repo and triggerType
// using the eventID to generate a deterministic triggerID and using the output of bodyFn as payload.
// The method tries to find the repository and principal and provides both to the bodyFn to generate the body.
// NOTE: technically we could avoid this call if we send the data via the event (though then events will get big).
func (s *Service) triggerForEventWithRepo(
ctx context.Context,
triggerType enum.WebhookTrigger,
eventID string,
principalID int64,
repoID int64,
createBodyFn func(*types.Principal, *types.Repository) (any, error),
) error {
principal, err := s.WebhookExecutor.FindPrincipalForEvent(ctx, principalID)
if err != nil {
return err
}
repo, err := s.findRepositoryForEvent(ctx, repoID)
if err != nil {
return err
}
// create body
body, err := createBodyFn(principal, repo)
if err != nil {
return fmt.Errorf("body creation function failed: %w", err)
}
parents, err := s.getParentInfoRepo(ctx, repo.ID, true)
if err != nil {
return fmt.Errorf("failed to get webhook parent info for parents: %w", err)
}
return s.WebhookExecutor.TriggerForEvent(ctx, eventID, parents, triggerType, body)
}
// triggerForEventWithPullReq triggers all webhooks for the given repo and triggerType
// using the eventID to generate a deterministic triggerID and using the output of bodyFn as payload.
// The method tries to find the pullreq, principal, target repo, and source repo
// and provides all to the bodyFn to generate the body.
// NOTE: technically we could avoid this call if we send the data via the event (though then events will get big).
func (s *Service) triggerForEventWithPullReq(
ctx context.Context,
triggerType enum.WebhookTrigger, eventID string, principalID int64, prID int64,
createBodyFn func(
principal *types.Principal, pr *types.PullReq,
targetRepo *types.Repository, sourceRepo *types.Repository,
) (any, error),
) error {
principal, err := s.WebhookExecutor.FindPrincipalForEvent(ctx, principalID)
if err != nil {
return err
}
pr, err := s.findPullReqForEvent(ctx, prID)
if err != nil {
return err
}
targetRepo, err := s.findRepositoryForEvent(ctx, pr.TargetRepoID)
if err != nil {
return fmt.Errorf("failed to get pr target repo: %w", err)
}
if pr.SourceRepoID == nil {
return events.NewDiscardEventErrorf("source repo for PR id '%d' doesn't exist anymore", pr.ID)
}
sourceRepo := targetRepo
if *pr.SourceRepoID != pr.TargetRepoID {
sourceRepo, err = s.findRepositoryForEvent(ctx, *pr.SourceRepoID)
if errors.Is(err, store.ErrResourceNotFound) {
return events.NewDiscardEventErrorf("source repo for PR id '%d' doesn't exist anymore", pr.ID)
}
if err != nil {
return fmt.Errorf("failed to get pr source repo: %w", err)
}
}
// create body
body, err := createBodyFn(principal, pr, targetRepo, sourceRepo)
if err != nil {
return fmt.Errorf("body creation function failed: %w", err)
}
parents, err := s.getParentInfoRepo(ctx, targetRepo.ID, true)
if err != nil {
return fmt.Errorf("failed to get webhook parent info: %w", err)
}
return s.WebhookExecutor.TriggerForEvent(ctx, eventID, parents, triggerType, body)
}
// findRepositoryForEvent finds the repository for the provided repoID.
func (s *Service) findRepositoryForEvent(ctx context.Context, repoID int64) (*types.Repository, error) {
repo, err := s.repoStore.Find(ctx, repoID)
if err != nil && errors.Is(err, store.ErrResourceNotFound) {
// not found error is unrecoverable - most likely a racing condition of repo being deleted by now
return nil, events.NewDiscardEventErrorf("repo with id '%d' doesn't exist anymore", repoID)
}
if err != nil {
// all other errors we return and force the event to be reprocessed
return nil, fmt.Errorf("failed to get repo for id '%d': %w", repoID, err)
}
return repo, nil
}
// findPullReqForEvent finds the pullrequest for the provided prID.
func (s *Service) findPullReqForEvent(ctx context.Context, prID int64) (*types.PullReq, error) {
pr, err := s.pullreqStore.Find(ctx, prID)
if err != nil && errors.Is(err, store.ErrResourceNotFound) {
// not found error is unrecoverable - most likely a racing condition of repo being deleted by now
return nil, events.NewDiscardEventErrorf("PR with id '%d' doesn't exist anymore", prID)
}
if err != nil {
// all other errors we return and force the event to be reprocessed
return nil, fmt.Errorf("failed to get PR for id '%d': %w", prID, err)
}
return pr, nil
}
// FindPrincipalForEvent finds the principal for the provided principalID.
func (w *WebhookExecutor) FindPrincipalForEvent(ctx context.Context, principalID int64) (*types.Principal, error) {
principal, err := w.principalStore.Find(ctx, principalID)
if err != nil && errors.Is(err, store.ErrResourceNotFound) {
// this should never happen (as we won't delete principals) - discard event
return nil, events.NewDiscardEventErrorf("principal with id '%d' doesn't exist anymore", principalID)
}
if err != nil {
// all other errors we return and force the event to be reprocessed
return nil, fmt.Errorf("failed to get principal for id '%d': %w", principalID, err)
}
return principal, nil
}
// TriggerForEvent triggers all webhooks for the given parentType/ID and triggerType
// using the eventID to generate a deterministic triggerID and sending the provided body as payload.
func (w *WebhookExecutor) TriggerForEvent(
ctx context.Context,
eventID string,
parents []types.WebhookParentInfo,
triggerType enum.WebhookTrigger,
body any,
) error {
triggerID := generateTriggerIDFromEventID(eventID)
results, err := w.triggerWebhooksFor(ctx, parents, triggerID, triggerType, body)
// return all errors and force the event to be reprocessed (it's not webhook execution specific!)
if err != nil {
return fmt.Errorf(
"failed to trigger %s (id: '%s') for webhooks %#v: %w",
triggerType, triggerID, parents, err,
)
}
// go through all events and figure out if we need to retry the event.
// Combine all errors into a single error to log (to reduce number of logs)
retryRequired := false
var errs error
for _, result := range results {
if result.Skipped() {
continue
}
// combine errors of non-successful executions
if result.Execution.Result != enum.WebhookExecutionResultSuccess {
errs = multierr.Append(errs,
fmt.Errorf("execution %d of webhook %d resulted in %s: %w",
result.Execution.ID, result.Webhook.ID, result.Execution.Result, result.Err))
}
if result.Execution.Result == enum.WebhookExecutionResultRetriableError {
retryRequired = true
}
}
// in case there was at least one error, log error details in single log to reduce log flooding
if errs != nil {
log.Ctx(ctx).Warn().Err(errs).Msgf("webhook execution for %#v had errors", parents)
}
// in case at least one webhook has to be retried, return an error to the event framework to have it reprocessed
if retryRequired {
return fmt.Errorf("at least one webhook execution resulted in a retry for %#v", parents)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/url_provider.go | app/services/webhook/url_provider.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"github.com/harness/gitness/types"
)
var _ URLProvider = (*GitnessURLProvider)(nil)
type GitnessURLProvider struct{}
func NewURLProvider(_ context.Context) *GitnessURLProvider {
return &GitnessURLProvider{}
}
func (u *GitnessURLProvider) GetWebhookURL(_ context.Context, webhook *types.WebhookCore) (string, error) {
// set URL as is (already has been validated, any other error will be caught in request creation)
return webhook.URL, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/update.go | app/services/webhook/update.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"errors"
"fmt"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/check"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
func (s *Service) sanitizeUpdateInput(in *types.WebhookUpdateInput) error {
// TODO [CODE-1363]: remove after identifier migration.
if in.Identifier == nil {
in.Identifier = in.UID
}
if in.Identifier != nil {
if err := check.Identifier(*in.Identifier); err != nil {
return err
}
}
if in.DisplayName != nil {
if err := check.DisplayName(*in.DisplayName); err != nil {
return err
}
}
if in.Description != nil {
if err := check.Description(*in.Description); err != nil {
return err
}
}
if in.URL != nil {
// internal is set to false as internal webhooks cannot be updated
if err := CheckURL(*in.URL, s.config.AllowLoopback, s.config.AllowPrivateNetwork, false); err != nil {
return err
}
}
if in.Secret != nil {
if err := CheckSecret(*in.Secret); err != nil {
return err
}
}
if in.Triggers != nil {
if err := CheckTriggers(in.Triggers); err != nil {
return err
}
}
if in.ExtraHeaders != nil {
if err := CheckExtraHeaders(in.ExtraHeaders); err != nil {
return err
}
}
return nil
}
func (s *Service) Update(
ctx context.Context,
principal *types.Principal,
webhookIdentifier string,
typ enum.WebhookType,
parentResource ParentResource,
in *types.WebhookUpdateInput,
) (*types.Webhook, error) {
hook, err := s.GetWebhookVerifyOwnership(ctx, parentResource.ID, parentResource.Type, webhookIdentifier)
if err != nil {
return nil, fmt.Errorf("failed to verify webhook ownership: %w", err)
}
oldHook := hook.Clone()
if err := s.sanitizeUpdateInput(in); err != nil {
return nil, err
}
if typ != hook.Type {
return nil, errors.New("changing type is not allowed")
}
// update webhook struct (only for values that are provided)
if in.Identifier != nil {
hook.Identifier = *in.Identifier
}
if in.DisplayName != nil {
hook.DisplayName = *in.DisplayName
}
if in.Description != nil {
hook.Description = *in.Description
}
if in.URL != nil {
hook.URL = *in.URL
}
if in.Secret != nil {
encryptedSecret, err := s.encrypter.Encrypt(*in.Secret)
if err != nil {
return nil, fmt.Errorf("failed to encrypt webhook secret: %w", err)
}
hook.Secret = string(encryptedSecret)
}
if in.Enabled != nil {
hook.Enabled = *in.Enabled
}
if in.Insecure != nil {
hook.Insecure = *in.Insecure
}
if in.Triggers != nil {
hook.Triggers = DeduplicateTriggers(in.Triggers)
}
if in.ExtraHeaders != nil {
hook.ExtraHeaders = in.ExtraHeaders
}
if err := s.webhookStore.Update(ctx, hook); err != nil {
return nil, err
}
if shouldAuditWebhook(typ) {
resourceType, nameKey := getWebhookAuditInfo(parentResource.Type)
err = s.auditService.Log(ctx,
*principal,
audit.NewResource(resourceType, hook.Identifier, nameKey, parentResource.Identifier),
audit.ActionUpdated,
parentResource.Path,
audit.WithOldObject(oldHook),
audit.WithNewObject(hook),
)
if err != nil {
log.Ctx(ctx).Warn().Msgf("failed to insert audit log for update webhook operation: %s", err)
}
}
s.sendSSE(ctx, parentResource, enum.SSETypeWebhookUpdated, hook)
return hook, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/common.go | app/services/webhook/common.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"net"
"net/url"
"strings"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/check"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
const (
// webhookMaxURLLength defines the max allowed length of a webhook URL.
webhookMaxURLLength = 2048
// webhookMaxSecretLength defines the max allowed length of a webhook secret.
webhookMaxSecretLength = 4096
// webhookMaxExtraHeaders defines the max number of custom headers allowed per webhook.
webhookMaxExtraHeaders = 20
)
var ErrInternalWebhookOperationNotAllowed = errors.Forbidden("changes to internal webhooks are not allowed")
// CheckURL validates the url of a webhook.
func CheckURL(rawURL string, allowLoopback bool, allowPrivateNetwork bool, internal bool) error {
// for internal webhooks skip URL validation as it is not used
if internal {
return nil
}
// check URL
if len(rawURL) > webhookMaxURLLength {
return check.NewValidationErrorf("The URL of a webhook can be at most %d characters long.",
webhookMaxURLLength)
}
parsedURL, err := url.Parse(rawURL)
if err != nil {
return check.NewValidationErrorf("The provided webhook url is invalid: %s", err)
}
host := parsedURL.Hostname()
if host == "" {
return check.NewValidationError("The URL of a webhook has to have a non-empty host.")
}
// basic validation for loopback / private network addresses (only sanitary to give user an early error)
// IMPORTANT: during webook execution loopback / private network addresses are blocked (handles DNS resolution)
if host == "localhost" {
return check.NewValidationError("localhost is not allowed.")
}
if ip := net.ParseIP(host); ip != nil {
if !allowLoopback && ip.IsLoopback() {
return check.NewValidationError("Loopback IP addresses are not allowed.")
}
if !allowPrivateNetwork && ip.IsPrivate() {
return check.NewValidationError("Private IP addresses are not allowed.")
}
}
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return check.NewValidationError("The scheme of a webhook must be either http or https.")
}
return nil
}
// CheckSecret validates the secret of a webhook.
func CheckSecret(secret string) error {
if len(secret) > webhookMaxSecretLength {
return check.NewValidationErrorf("The secret of a webhook can be at most %d characters long.",
webhookMaxSecretLength)
}
return nil
}
// CheckTriggers validates the triggers of a webhook.
func CheckTriggers(triggers []enum.WebhookTrigger) error {
// ignore duplicates here, should be deduplicated later
for _, trigger := range triggers {
if _, ok := trigger.Sanitize(); !ok {
return check.NewValidationErrorf("The provided webhook trigger '%s' is invalid.", trigger)
}
}
return nil
}
// CheckExtraHeaders validates the custom headers of a webhook.
func CheckExtraHeaders(headers []types.ExtraHeader) error {
if len(headers) > webhookMaxExtraHeaders {
return check.NewValidationErrorf("A webhook can have at most %d custom headers.", webhookMaxExtraHeaders)
}
headerKeys := make(map[string]struct{}, len(headers))
for _, header := range headers {
if header.Key == "" {
return check.NewValidationError("Header key cannot be empty.")
}
// check for duplicate header keys (case insensitive)
for existingKey := range headerKeys {
if strings.EqualFold(existingKey, header.Key) {
return check.NewValidationErrorf("Duplicate header key '%s' detected.", header.Key)
}
}
}
return nil
}
// DeduplicateTriggers de-duplicates the triggers provided by the user.
func DeduplicateTriggers(in []enum.WebhookTrigger) []enum.WebhookTrigger {
if len(in) == 0 {
return []enum.WebhookTrigger{}
}
triggerSet := make(map[enum.WebhookTrigger]bool, len(in))
out := make([]enum.WebhookTrigger, 0, len(in))
for _, trigger := range in {
if triggerSet[trigger] {
continue
}
triggerSet[trigger] = true
out = append(out, trigger)
}
return out
}
func ConvertTriggers(vals []string) []enum.WebhookTrigger {
res := make([]enum.WebhookTrigger, len(vals))
for i := range vals {
res[i] = enum.WebhookTrigger(vals[i])
}
return res
}
func shouldAuditWebhook(webhookType enum.WebhookType) bool {
return webhookType == enum.WebhookTypeExternal
}
func getWebhookAuditInfo(parentType enum.WebhookParent) (audit.ResourceType, string) {
switch parentType {
case enum.WebhookParentSpace:
return audit.ResourceTypeCodeWebhook, audit.SpaceName
case enum.WebhookParentRegistry:
return audit.ResourceTypeRegistryWebhook, audit.RegistryName
case enum.WebhookParentRepo:
return audit.ResourceTypeCodeWebhook, audit.RepoName
default:
return "", ""
}
}
func (s *Service) sendSSE(
ctx context.Context,
parentResource ParentResource,
sseType enum.SSEType,
webhook *types.Webhook,
) {
spaceID := parentResource.ID
if parentResource.Type == enum.WebhookParentRepo {
repo, err := s.repoStore.Find(ctx, parentResource.ID)
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msg("failed to find repo")
return
}
spaceID = repo.ParentID
}
s.sseStreamer.Publish(ctx, spaceID, sseType, webhook)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/metric/wire.go | app/services/metric/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"fmt"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
repoevents "github.com/harness/gitness/app/events/repo"
ruleevents "github.com/harness/gitness/app/events/rule"
userevents "github.com/harness/gitness/app/events/user"
"github.com/harness/gitness/app/services/publicaccess"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/services/settings"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/events"
"github.com/harness/gitness/job"
registrystore "github.com/harness/gitness/registry/app/store"
"github.com/harness/gitness/types"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideValues,
ProvideSubmitter,
ProvideCollectorJob,
)
func ProvideValues(ctx context.Context, config *types.Config, settingsSrv *settings.Service) (*Values, error) {
return NewValues(ctx, config, settingsSrv)
}
func ProvideSubmitter(
appCtx context.Context,
config *types.Config,
values *Values,
principalStore store.PrincipalStore,
principalInfoCache store.PrincipalInfoCache,
pullReqStore store.PullReqStore,
ruleStore store.RuleStore,
userEvReaderFactory *events.ReaderFactory[*userevents.Reader],
repoEvReaderFactory *events.ReaderFactory[*repoevents.Reader],
pullreqEvReaderFactory *events.ReaderFactory[*pullreqevents.Reader],
ruleEvReaderFactory *events.ReaderFactory[*ruleevents.Reader],
publicAccess publicaccess.Service,
spaceFinder refcache.SpaceFinder,
repoFinder refcache.RepoFinder,
) (Submitter, error) {
submitter, err := NewPostHog(appCtx, config, values, principalStore, principalInfoCache)
if err != nil {
return nil, fmt.Errorf("failed to create posthog metrics submitter: %w", err)
}
err = registerEventListeners(
appCtx,
config,
principalInfoCache,
pullReqStore,
ruleStore,
userEvReaderFactory,
repoEvReaderFactory,
pullreqEvReaderFactory,
ruleEvReaderFactory,
spaceFinder,
repoFinder,
publicAccess,
submitter,
)
if err != nil {
return nil, fmt.Errorf("failed to register metric event listeners: %w", err)
}
return submitter, nil
}
func ProvideCollectorJob(
config *types.Config,
values *Values,
userStore store.PrincipalStore,
repoStore store.RepoStore,
pipelineStore store.PipelineStore,
executionStore store.ExecutionStore,
scheduler *job.Scheduler,
executor *job.Executor,
gitspaceConfigStore store.GitspaceConfigStore,
registryStore registrystore.RegistryRepository,
artifactStore registrystore.ArtifactRepository,
submitter Submitter,
) (*CollectorJob, error) {
collector := NewCollectorJob(
values,
config.Metric.Endpoint,
config.Metric.Token,
userStore,
repoStore,
pipelineStore,
executionStore,
scheduler,
gitspaceConfigStore,
registryStore,
artifactStore,
submitter,
)
err := executor.Register(jobType, collector)
if err != nil {
return nil, err
}
return collector, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/metric/posthog.go | app/services/metric/posthog.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/posthog/posthog-go"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
const postHogGroupInstall = "install"
const postHogServerUserID = "harness-server"
var postHogAPIKey string
type PostHog struct {
client posthog.Client
installID string
hostname string
principalStore store.PrincipalStore
principalInfoCache store.PrincipalInfoCache
}
type group struct {
Type string
ID string
Properties map[string]any
}
func NewPostHog(
ctx context.Context,
config *types.Config,
values *Values,
principalStore store.PrincipalStore,
principalInfoCache store.PrincipalInfoCache,
) (*PostHog, error) {
if !values.Enabled || values.InstallID == "" {
return nil, nil //nolint:nilnil // PostHog is disabled
}
apiKey := postHogAPIKey
if apiKey == "" {
apiKey = config.Metric.PostHogProjectAPIKey
}
if apiKey == "" {
return nil, nil //nolint:nilnil // PostHog is disabled
}
logr := log.Ctx(ctx).With().Str("service.name", "posthog").Logger()
// https://posthog.com/docs/libraries/go#overriding-geoip-properties
phConfig := posthog.Config{
Endpoint: config.Metric.PostHogEndpoint,
PersonalApiKey: config.Metric.PostHogPersonalAPIKey,
Logger: &logger{Logger: logr},
DefaultEventProperties: posthog.NewProperties().Set("install_id", values.InstallID),
Callback: nil,
}
client, err := posthog.NewWithConfig(config.Metric.PostHogProjectAPIKey, phConfig)
if err != nil {
return nil, fmt.Errorf("failed to create PostHog client: %w", err)
}
ph := &PostHog{
client: client,
installID: values.InstallID,
hostname: values.Hostname,
principalStore: principalStore,
principalInfoCache: principalInfoCache,
}
go ph.submitDefaultGroupOnce(ctx)
return ph, nil
}
func (ph *PostHog) SubmitGroups(context.Context) error {
// No implementation
return nil
}
func (ph *PostHog) uniqueUserID(id string) string {
return ph.installID + ":" + id
}
func (ph *PostHog) Submit(
_ context.Context,
user *types.PrincipalInfo,
object Object,
verb Verb,
properties map[string]any,
) error {
if ph == nil {
return nil
}
var distinctID string
if user != nil {
distinctID = ph.uniqueUserID(user.UID)
p := posthog.NewProperties().Merge(properties)
p.Set("$set_once", map[string]any{
"type": user.Type,
"created": user.Created,
})
p.Set("$set", map[string]any{
"id": user.ID,
"username": user.UID,
"email": user.Email,
})
properties = p
}
err := ph.client.Enqueue(posthog.Capture{
DistinctId: distinctID,
Event: string(object) + ":" + string(verb),
Groups: posthog.NewGroups().Set(postHogGroupInstall, ph.installID),
Properties: properties,
})
if err != nil {
return fmt.Errorf("failed to enqueue event; object=%s verb=%s: %w", object, verb, err)
}
return nil
}
func (ph *PostHog) submitGroup(group group) error {
err := ph.client.Enqueue(posthog.GroupIdentify{
DistinctId: postHogServerUserID,
Type: group.Type,
Key: group.ID,
Properties: group.Properties,
})
if err != nil {
return fmt.Errorf("failed to enqueue group identify: %w", err)
}
return nil
}
func (ph *PostHog) submitDefaultGroup(ctx context.Context) error {
users, err := ph.principalStore.ListUsers(ctx, &types.UserFilter{
Page: 1,
Size: 1,
Sort: enum.UserAttrCreated,
Order: enum.OrderAsc,
})
if err != nil {
return fmt.Errorf("failed to list users: %w", err)
}
if len(users) == 0 {
return errors.New("no users found")
}
userFirst := users[0]
// Note: The PostHog UI identifies a group using the name property.
// If the name property is not found, it falls back to the group key.
// https://posthog.com/docs/product-analytics/group-analytics#how-to-set-group-properties
g := group{
Type: postHogGroupInstall,
ID: ph.installID,
Properties: posthog.NewProperties().
Set("name", "install").
Set("hostname", ph.hostname).
Set("email", userFirst.Email).
Set("created", userFirst.Created),
}
err = ph.submitGroup(g)
if err != nil {
return fmt.Errorf("failed to submit default group: %w", err)
}
return nil
}
func (ph *PostHog) submitDefaultGroupOnce(ctx context.Context) {
timer := time.NewTimer(time.Hour)
defer timer.Stop()
logr := log.Ctx(ctx).With().Str("service.name", "posthog").Logger()
for {
select {
case <-ctx.Done():
return
case <-timer.C:
if err := ph.submitDefaultGroup(ctx); err != nil {
logr.Err(err).Msg("failed to submit default group")
timer.Reset(time.Hour)
continue
}
return
}
}
}
type logger struct {
zerolog.Logger
}
func (l *logger) Logf(format string, args ...any) {
l.Info().Msgf(format, args...)
}
func (l *logger) Errorf(format string, args ...any) {
l.Error().Msgf(format, args...)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/metric/values.go | app/services/metric/values.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"fmt"
"os"
"strconv"
"github.com/harness/gitness/app/services/settings"
"github.com/harness/gitness/types"
"github.com/google/uuid"
)
type Values struct {
Enabled bool
Hostname string
InstallID string
}
func NewValues(
ctx context.Context,
config *types.Config,
settingsSrv *settings.Service,
) (*Values, error) {
doNotTrackEnv, _ := os.LookupEnv("DO_NOT_TRACK") // https://consoledonottrack.com/
doNotTrack, _ := strconv.ParseBool(doNotTrackEnv)
if doNotTrack || !config.Metric.Enabled {
return &Values{
Enabled: false,
InstallID: "",
}, nil
}
values := Values{
Enabled: true,
Hostname: config.InstanceID,
InstallID: "",
}
ok, err := settingsSrv.SystemGet(ctx, settings.KeyInstallID, &values.InstallID)
if err != nil {
return nil, fmt.Errorf("failed to find install id: %w", err)
}
if !ok || values.InstallID == "" {
values.InstallID = uuid.New().String()
err = settingsSrv.SystemSet(ctx, settings.KeyInstallID, values.InstallID)
if err != nil {
return nil, fmt.Errorf("failed to update system settings: %w", err)
}
}
return &values, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/metric/event_handlers.go | app/services/metric/event_handlers.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"fmt"
"time"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
repoevents "github.com/harness/gitness/app/events/repo"
ruleevents "github.com/harness/gitness/app/events/rule"
userevents "github.com/harness/gitness/app/events/user"
"github.com/harness/gitness/app/services/publicaccess"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/events"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/stream"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
func registerEventListeners(
ctx context.Context,
config *types.Config,
principalInfoCache store.PrincipalInfoCache,
pullReqStore store.PullReqStore,
ruleStore store.RuleStore,
userEvReaderFactory *events.ReaderFactory[*userevents.Reader],
repoEvReaderFactory *events.ReaderFactory[*repoevents.Reader],
pullreqEvReaderFactory *events.ReaderFactory[*pullreqevents.Reader],
ruleEvReaderFactory *events.ReaderFactory[*ruleevents.Reader],
spaceFinder refcache.SpaceFinder,
repoFinder refcache.RepoFinder,
publicAccess publicaccess.Service,
submitter Submitter,
) error {
if submitter == nil {
return nil
}
var err error
const groupMetricsUser = "gitness:metrics:user"
_, err = userEvReaderFactory.Launch(ctx, groupMetricsUser, config.InstanceID,
func(r *userevents.Reader) error {
const idleTimeout = 10 * time.Second
r.Configure(
stream.WithConcurrency(1),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(2),
))
h := handlersUser{
principalInfoCache: principalInfoCache,
submitter: submitter,
}
_ = r.RegisterCreated(h.Create)
_ = r.RegisterRegistered(h.Register)
_ = r.RegisterLoggedIn(h.Login)
return nil
})
if err != nil {
return err
}
const groupMetricsRepo = "gitness:metrics:repo"
_, err = repoEvReaderFactory.Launch(ctx, groupMetricsRepo, config.InstanceID,
func(r *repoevents.Reader) error {
const idleTimeout = 10 * time.Second
r.Configure(
stream.WithConcurrency(1),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(2),
))
h := handlersRepo{
principalInfoCache: principalInfoCache,
repoFinder: repoFinder,
publicAccess: publicAccess,
submitter: submitter,
}
_ = r.RegisterCreated(h.Create)
_ = r.RegisterPushed(h.Push)
_ = r.RegisterSoftDeleted(h.SoftDelete)
return nil
})
if err != nil {
return err
}
const groupMetricsPullReq = "gitness:metrics:pullreq"
_, err = pullreqEvReaderFactory.Launch(ctx, groupMetricsPullReq, config.InstanceID,
func(r *pullreqevents.Reader) error {
const idleTimeout = 10 * time.Second
r.Configure(
stream.WithConcurrency(1),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(2),
))
h := handlersPullReq{
principalInfoCache: principalInfoCache,
repoFinder: repoFinder,
pullReqStore: pullReqStore,
publicAccess: publicAccess,
submitter: submitter,
}
_ = r.RegisterCreated(h.Create)
_ = r.RegisterReopened(h.Reopen)
_ = r.RegisterClosed(h.Close)
_ = r.RegisterMerged(h.Merge)
_ = r.RegisterCommentCreated(h.CommentCreate)
return nil
})
if err != nil {
return err
}
const groupMetricsRule = "gitness:metrics:rule"
_, err = ruleEvReaderFactory.Launch(ctx, groupMetricsRule, config.InstanceID,
func(r *ruleevents.Reader) error {
const idleTimeout = 10 * time.Second
r.Configure(
stream.WithConcurrency(1),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(2),
))
h := handlersRule{
principalInfoCache: principalInfoCache,
spaceFinder: spaceFinder,
repoFinder: repoFinder,
ruleStore: ruleStore,
publicAccess: publicAccess,
submitter: submitter,
}
_ = r.RegisterCreated(h.Create)
return nil
})
if err != nil {
return err
}
return nil
}
func prepareProps(m map[string]any) map[string]any {
if m != nil {
return m
}
return make(map[string]any, 8)
}
// User fields.
const (
userID = "user_id"
userName = "user_name"
userEmail = "user_email"
userCreatedByID = "user_created_by_id"
userCreatedByName = "user_created_by_name"
userCreatedByEmail = "user_created_by_email"
)
type handlersUser struct {
principalInfoCache store.PrincipalInfoCache
submitter Submitter
}
func (h handlersUser) Register(ctx context.Context, e *events.Event[*userevents.RegisteredPayload]) error {
return h.submit(ctx, e.Payload.PrincipalID, VerbUserCreate, nil)
}
func (h handlersUser) Create(ctx context.Context, e *events.Event[*userevents.CreatedPayload]) error {
principal, err := h.principalInfoCache.Get(ctx, e.Payload.PrincipalID)
if err != nil {
return fmt.Errorf("failed to find principal who created a user: %w", err)
}
props := prepareProps(nil)
props[userCreatedByID] = principal.ID
props[userCreatedByName] = principal.UID
props[userCreatedByEmail] = principal.Email
return h.submit(ctx, e.Payload.CreatedPrincipalID, VerbUserCreate, props)
}
func (h handlersUser) Login(ctx context.Context, e *events.Event[*userevents.LoggedInPayload]) error {
return h.submit(ctx, e.Payload.PrincipalID, VerbUserLogin, nil)
}
func (h handlersUser) submit(
ctx context.Context,
principalID int64,
verb Verb,
props map[string]any,
) error {
principal, err := h.principalInfoCache.Get(ctx, principalID)
if err != nil {
return fmt.Errorf("failed to find principal info")
}
props = prepareProps(props)
props[userID] = principal.ID
props[userName] = principal.UID
props[userEmail] = principal.Email
err = h.submitter.Submit(ctx, principal, ObjectUser, verb, props)
if err != nil {
return fmt.Errorf("failed to submit metric data for user: %w", err)
}
return nil
}
// Space fields.
const (
spaceID = "space_id"
spaceName = "space_name"
spacePath = "space_path"
spaceParentID = "space_parent_id"
spacePrivate = "space_private"
)
// Repository fields.
const (
repoID = "repo_id"
repoName = "repo_name"
repoPath = "repo_path"
repoParentID = "repo_parent_id"
repoPrivate = "repo_private"
repoMigrated = "repo_migrated"
repoImported = "repo_imported"
repoImportedFrom = "repo_imported_from"
)
type handlersRepo struct {
principalInfoCache store.PrincipalInfoCache
repoFinder refcache.RepoFinder
publicAccess publicaccess.Service
submitter Submitter
}
func (h handlersRepo) Create(ctx context.Context, e *events.Event[*repoevents.CreatedPayload]) error {
props := prepareProps(nil)
props[repoPrivate] = !e.Payload.IsPublic
if e.Payload.IsMigrated {
props[repoMigrated] = true
}
if e.Payload.ImportedFrom != "" {
props[repoImported] = true
props[repoImportedFrom] = e.Payload.ImportedFrom
}
return h.submitForActive(ctx, e.Payload.RepoID, e.Payload.PrincipalID, VerbRepoCreate, props)
}
func (h handlersRepo) Push(
ctx context.Context,
e *events.Event[*repoevents.PushedPayload],
) error {
return h.submitForActive(ctx, e.Payload.RepoID, e.Payload.PrincipalID, VerbRepoPush, nil)
}
func (h handlersRepo) SoftDelete(
ctx context.Context,
e *events.Event[*repoevents.SoftDeletedPayload],
) error {
return h.submitForDeleted(ctx, e.Payload.RepoPath, e.Payload.Deleted, e.Payload.PrincipalID, VerbRepoDelete, nil)
}
func (h handlersRepo) submitForActive(
ctx context.Context,
id int64,
principalID int64,
verb Verb,
props map[string]any,
) error {
repo, err := h.repoFinder.FindByID(ctx, id)
if err != nil {
return fmt.Errorf("failed to find repository")
}
props, err = fillRepoData(ctx, props, repo, h.publicAccess)
if err != nil {
return fmt.Errorf("failed to fill repo data: %w", err)
}
err = h.submit(ctx, principalID, verb, props)
if err != nil {
return fmt.Errorf("failed to submit event: %w", err)
}
return nil
}
func (h handlersRepo) submitForDeleted(
ctx context.Context,
repoRef string,
deletedAt int64,
principalID int64,
verb Verb,
props map[string]any,
) error {
repo, err := h.repoFinder.FindDeletedByRef(ctx, repoRef, deletedAt)
if err != nil {
return fmt.Errorf("failed to find deleted repo: %w", err)
}
props, err = fillRepoData(ctx, props, repo.Core(), nil)
if err != nil {
return fmt.Errorf("failed to fill deleted repo data: %w", err)
}
err = h.submit(ctx, principalID, verb, props)
if err != nil {
return fmt.Errorf("failed to submit metric data for deleted repository: %w", err)
}
return nil
}
func (h handlersRepo) submit(
ctx context.Context,
principalID int64,
verb Verb,
props map[string]any,
) error {
principal, err := h.principalInfoCache.Get(ctx, principalID)
if err != nil {
return fmt.Errorf("failed to get principal info: %w", err)
}
err = h.submitter.Submit(ctx, principal, ObjectRepository, verb, props)
if err != nil {
return fmt.Errorf("failed to submit metric data for repositoy: %w", err)
}
return nil
}
// Pull request fields.
const (
prNumber = "pr_number"
prTargetBranch = "pr_target_branch"
prSourceBranch = "pr_source_branch"
prSourceRepoID = "pr_source_repo_id"
prSourceRepoName = "pr_source_repo_name"
prSourceRepoPath = "pr_source_repo_path"
prMergeMethod = "pr_merge_method"
prCommentReply = "pr_comment_reply"
)
type handlersPullReq struct {
principalInfoCache store.PrincipalInfoCache
repoFinder refcache.RepoFinder
pullReqStore store.PullReqStore
publicAccess publicaccess.Service
submitter Submitter
}
func (h handlersPullReq) Create(ctx context.Context, e *events.Event[*pullreqevents.CreatedPayload]) error {
return h.submit(ctx, e.Payload.PullReqID, e.Payload.PrincipalID, VerbPullReqCreate, nil)
}
func (h handlersPullReq) Close(ctx context.Context, e *events.Event[*pullreqevents.ClosedPayload]) error {
return h.submit(ctx, e.Payload.PullReqID, e.Payload.PrincipalID, VerbPullReqClose, nil)
}
func (h handlersPullReq) Reopen(ctx context.Context, e *events.Event[*pullreqevents.ReopenedPayload]) error {
return h.submit(ctx, e.Payload.PullReqID, e.Payload.PrincipalID, VerbPullReqReopen, nil)
}
func (h handlersPullReq) Merge(ctx context.Context, e *events.Event[*pullreqevents.MergedPayload]) error {
return h.submit(ctx, e.Payload.PullReqID, e.Payload.PrincipalID, VerbPullReqMerge, nil)
}
func (h handlersPullReq) CommentCreate(
ctx context.Context,
e *events.Event[*pullreqevents.CommentCreatedPayload],
) error {
props := prepareProps(nil)
props[prCommentReply] = e.Payload.IsReply
return h.submit(ctx, e.Payload.PullReqID, e.Payload.PrincipalID, VerbPullReqComment, props)
}
func (h handlersPullReq) submit(
ctx context.Context,
pullReqID, principalID int64,
verb Verb,
props map[string]any,
) error {
pr, err := h.pullReqStore.Find(ctx, pullReqID)
if err != nil {
return fmt.Errorf("failed to find pull request: %w", err)
}
props, err = fillPullReqProps(ctx, props, pr, h.repoFinder, h.publicAccess)
if err != nil {
return fmt.Errorf("failed to fill pull request props: %w", err)
}
principal, err := h.principalInfoCache.Get(ctx, principalID)
if err != nil {
return fmt.Errorf("failed to get principal info: %w", err)
}
err = h.submitter.Submit(ctx, principal, ObjectPullRequest, verb, props)
if err != nil {
return fmt.Errorf("failed to submit metric data for pull request: %w", err)
}
return nil
}
// Rule fields.
const (
ruleID = "rule_id"
ruleName = "rule_name"
ruleType = "rule_type"
)
func (h handlersRule) Create(ctx context.Context, e *events.Event[*ruleevents.CreatedPayload]) error {
return h.submit(ctx, e.Payload.RuleID, e.Payload.PrincipalID, VerbRuleCreate, nil)
}
func (h handlersRule) submit(
ctx context.Context,
ruleID, principalID int64,
verb Verb,
props map[string]any,
) error {
rule, err := h.ruleStore.Find(ctx, ruleID)
if err != nil {
return fmt.Errorf("failed to find pull request: %w", err)
}
props, err = fillRuleProps(ctx, props, rule, h.spaceFinder, h.repoFinder, h.publicAccess)
if err != nil {
return fmt.Errorf("failed to fill pull request props: %w", err)
}
principal, err := h.principalInfoCache.Get(ctx, principalID)
if err != nil {
return fmt.Errorf("failed to get principal info: %w", err)
}
err = h.submitter.Submit(ctx, principal, ObjectRule, verb, props)
if err != nil {
return fmt.Errorf("failed to submit metric data for rule: %w", err)
}
return nil
}
type handlersRule struct {
principalInfoCache store.PrincipalInfoCache
spaceFinder refcache.SpaceFinder
repoFinder refcache.RepoFinder
ruleStore store.RuleStore
publicAccess publicaccess.Service
submitter Submitter
}
func fillSpaceData(
ctx context.Context,
props map[string]any,
space *types.SpaceCore,
publicAccess publicaccess.Service,
) (map[string]any, error) {
props = prepareProps(props)
props[spaceID] = space.ID
props[spaceName] = space.Identifier
props[spacePath] = space.Path
props[spaceParentID] = space.ParentID
if _, ok := props[spacePrivate]; !ok && publicAccess != nil {
isRepoPublic, err := publicAccess.Get(ctx, enum.PublicResourceTypeSpace, space.Path)
if err != nil {
return nil, fmt.Errorf("failed to check public access for space: %w", err)
}
props[spacePrivate] = !isRepoPublic
}
return props, nil
}
func fillRepoData(
ctx context.Context,
props map[string]any,
repo *types.RepositoryCore,
publicAccess publicaccess.Service,
) (map[string]any, error) {
props = prepareProps(props)
props[repoID] = repo.ID
props[repoName] = repo.Identifier
props[repoPath] = repo.Path
props[repoParentID] = repo.ParentID
if _, ok := props[repoPrivate]; !ok && publicAccess != nil {
isRepoPublic, err := publicAccess.Get(ctx, enum.PublicResourceTypeRepo, repo.Path)
if err != nil {
return nil, fmt.Errorf("failed to check public access for repo: %w", err)
}
props[repoPrivate] = !isRepoPublic
}
return props, nil
}
func fillPullReqProps(
ctx context.Context,
props map[string]any,
pr *types.PullReq,
repoFinder refcache.RepoFinder,
publicAccess publicaccess.Service,
) (map[string]any, error) {
props = prepareProps(props)
props[prNumber] = pr.Number
props[prSourceBranch] = pr.SourceBranch
props[prTargetBranch] = pr.TargetBranch
if pr.MergeMethod != nil {
props[prMergeMethod] = string(*pr.MergeMethod)
}
targetRepo, err := repoFinder.FindByID(ctx, pr.TargetRepoID)
if err != nil {
return nil, fmt.Errorf("failed to find target repo: %w", err)
}
props, err = fillRepoData(ctx, props, targetRepo, publicAccess)
if err != nil {
return nil, fmt.Errorf("failed to fill repo data for target repo: %w", err)
}
var sourceRepo *types.RepositoryCore
if pr.SourceRepoID != nil && *pr.SourceRepoID != pr.TargetRepoID {
sourceRepo, err = repoFinder.FindByID(ctx, *pr.SourceRepoID)
if err != nil && !errors.Is(err, gitness_store.ErrResourceNotFound) {
return nil, fmt.Errorf("failed to get source repo by id: %w", err)
}
}
if sourceRepo != nil {
props[prSourceRepoID] = sourceRepo.ID
props[prSourceRepoName] = sourceRepo.Identifier
props[prSourceRepoPath] = sourceRepo.Path
}
return props, nil
}
func fillRuleProps(
ctx context.Context,
props map[string]any,
rule *types.Rule,
spaceFinder refcache.SpaceFinder,
repoFinder refcache.RepoFinder,
publicAccess publicaccess.Service,
) (map[string]any, error) {
props = prepareProps(props)
props[ruleID] = rule.RepoID
props[ruleName] = rule.Identifier
props[ruleType] = string(rule.Type)
//nolint:nestif
if rule.SpaceID != nil {
space, err := spaceFinder.FindByID(ctx, *rule.SpaceID)
if err != nil {
return nil, fmt.Errorf("failed to find space: %w", err)
}
props, err = fillSpaceData(ctx, props, space, publicAccess)
if err != nil {
return nil, fmt.Errorf("failed to fill space data for rule: %w", err)
}
} else if rule.RepoID != nil {
repo, err := repoFinder.FindByID(ctx, *rule.RepoID)
if err != nil {
return nil, fmt.Errorf("failed to find repo: %w", err)
}
props, err = fillRepoData(ctx, props, repo, publicAccess)
if err != nil {
return nil, fmt.Errorf("failed to fill repo data for rule: %w", err)
}
}
return props, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/metric/collector_job.go | app/services/metric/collector_job.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/job"
registrystore "github.com/harness/gitness/registry/app/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/harness/gitness/version"
)
const jobType = "metric-collector"
type metricData struct {
IP string `json:"ip"`
Hostname string `json:"hostname"`
InstallID string `json:"install_id"`
Installer string `json:"installed_by"`
Installed string `json:"installed_at"`
Version string `json:"version"`
Users int64 `json:"user_count"`
RepoCount int64 `json:"repo_count"`
PipelineCount int64 `json:"pipeline_count"`
ExecutionCount int64 `json:"execution_count"`
GitspaceCount int64 `json:"gitspace_count"`
RegistryCount int64 `json:"registry_count"`
ArtifactCount int64 `json:"artifact_count"`
}
type CollectorJob struct {
values *Values
endpoint string
token string
userStore store.PrincipalStore
repoStore store.RepoStore
pipelineStore store.PipelineStore
executionStore store.ExecutionStore
scheduler *job.Scheduler
gitspaceConfigStore store.GitspaceConfigStore
registryStore registrystore.RegistryRepository
artifactStore registrystore.ArtifactRepository
submitter Submitter
}
func NewCollectorJob(
values *Values,
endpoint string,
token string,
userStore store.PrincipalStore,
repoStore store.RepoStore,
pipelineStore store.PipelineStore,
executionStore store.ExecutionStore,
scheduler *job.Scheduler,
gitspaceConfigStore store.GitspaceConfigStore,
registryStore registrystore.RegistryRepository,
artifactStore registrystore.ArtifactRepository,
submitter Submitter,
) *CollectorJob {
return &CollectorJob{
values: values,
endpoint: endpoint,
token: token,
userStore: userStore,
repoStore: repoStore,
pipelineStore: pipelineStore,
executionStore: executionStore,
scheduler: scheduler,
gitspaceConfigStore: gitspaceConfigStore,
registryStore: registryStore,
artifactStore: artifactStore,
submitter: submitter,
}
}
func (c *CollectorJob) Register(ctx context.Context) error {
if !c.values.Enabled {
return nil
}
err := c.scheduler.AddRecurring(ctx, jobType, jobType, "0 0 * * *", time.Minute)
if err != nil {
return fmt.Errorf("failed to register recurring job for collector: %w", err)
}
return nil
}
func (c *CollectorJob) Handle(ctx context.Context, _ string, _ job.ProgressReporter) (string, error) {
if !c.values.Enabled {
return "", nil
}
err := c.submitter.SubmitGroups(ctx)
if err != nil {
return "", fmt.Errorf("failed to submit metric groups: %w", err)
}
// get first available user
users, err := c.userStore.ListUsers(ctx, &types.UserFilter{
Page: 1,
Size: 1,
Sort: enum.UserAttrCreated,
Order: enum.OrderAsc,
})
if err != nil {
return "", err
}
if len(users) == 0 {
return "", nil
}
// total users in the system
totalUsers, err := c.userStore.CountUsers(ctx, &types.UserFilter{})
if err != nil {
return "", fmt.Errorf("failed to get users count: %w", err)
}
// total repos in the system
totalRepos, err := c.repoStore.Count(ctx, 0, &types.RepoFilter{})
if err != nil {
return "", fmt.Errorf("failed to get repositories count: %w", err)
}
// total pipelines in the system
totalPipelines, err := c.pipelineStore.Count(ctx, 0, &types.ListPipelinesFilter{})
if err != nil {
return "", fmt.Errorf("failed to get pipelines count: %w", err)
}
// total executions in the system
totalExecutions, err := c.executionStore.Count(ctx, 0)
if err != nil {
return "", fmt.Errorf("failed to get executions count: %w", err)
}
// total gitspaces (configs) in the system
totalGitspaces, err := c.gitspaceConfigStore.Count(ctx, &types.GitspaceFilter{})
if err != nil {
return "", fmt.Errorf("failed to get gitspace count: %w", err)
}
totalRegistries, err := c.registryStore.Count(ctx)
if err != nil {
return "", fmt.Errorf("failed to get registries count: %w", err)
}
totalArtifacts, err := c.artifactStore.Count(ctx)
if err != nil {
return "", fmt.Errorf("failed to get artifacts count: %w", err)
}
data := metricData{
Hostname: c.values.Hostname,
InstallID: c.values.InstallID,
Installer: users[0].Email,
Installed: time.UnixMilli(users[0].Created).Format("2006-01-02 15:04:05"),
Version: version.Version.String(),
Users: totalUsers,
RepoCount: totalRepos,
PipelineCount: totalPipelines,
ExecutionCount: totalExecutions,
GitspaceCount: totalGitspaces,
RegistryCount: totalRegistries,
ArtifactCount: totalArtifacts,
}
buf := new(bytes.Buffer)
err = json.NewEncoder(buf).Encode(data)
if err != nil {
return "", fmt.Errorf("failed to encode metric data: %w", err)
}
endpoint := fmt.Sprintf("%s?api_key=%s", c.endpoint, c.token)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buf)
if err != nil {
return "", fmt.Errorf("failed to create a request for metric data to endpoint %s: %w", endpoint, err)
}
req.Header.Add("Accept", "application/json")
req.Header.Add("Content-Type", "application/json")
res, err := httpClient.Do(req)
if err != nil {
return "", fmt.Errorf("failed to send metric data to endpoint %s: %w", endpoint, err)
}
res.Body.Close()
return res.Status, nil
}
// httpClient should be used for HTTP requests. It
// is configured with a timeout for reliability.
var httpClient = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSHandshakeTimeout: 30 * time.Second,
DisableKeepAlives: true,
},
Timeout: 1 * time.Minute,
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/metric/common.go | app/services/metric/common.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"github.com/harness/gitness/types"
)
type Object string
const (
ObjectUser Object = "user"
ObjectRepository Object = "repo"
ObjectPullRequest Object = "pr"
ObjectRule Object = "rule"
)
type Verb string
// User verbs.
const (
VerbUserCreate Verb = "create"
VerbUserLogin Verb = "login"
)
// Repository verbs.
const (
VerbRepoCreate Verb = "create"
VerbRepoPush Verb = "push"
VerbRepoDelete Verb = "delete"
)
// Pull request verbs.
const (
VerbPullReqCreate Verb = "create"
VerbPullReqMerge Verb = "merge"
VerbPullReqClose Verb = "close"
VerbPullReqReopen Verb = "reopen"
VerbPullReqComment Verb = "comment"
)
// Rule verbs.
const (
VerbRuleCreate Verb = "create"
)
type Submitter interface {
// SubmitGroups should be called once a day to update info about all the groups.
SubmitGroups(ctx context.Context) error
// Submit submits an event.
Submit(
ctx context.Context,
user *types.PrincipalInfo,
object Object,
verb Verb,
properties map[string]any,
) error
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/repo/wire.go | app/services/repo/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package repo
import (
"context"
repoevents "github.com/harness/gitness/app/events/repo"
"github.com/harness/gitness/app/services/locker"
"github.com/harness/gitness/app/services/usage"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/events"
"github.com/harness/gitness/git"
"github.com/harness/gitness/job"
"github.com/harness/gitness/types"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideCalculator,
ProvideService,
)
func ProvideCalculator(
config *types.Config,
git git.Interface,
repoStore store.RepoStore,
spaceStore store.SpaceStore,
scheduler *job.Scheduler,
executor *job.Executor,
lfsStore store.LFSObjectStore,
usageMetricSender usage.Sender,
) (*SizeCalculator, error) {
job := &SizeCalculator{
enabled: config.RepoSize.Enabled,
cron: config.RepoSize.CRON,
maxDur: config.RepoSize.MaxDuration,
numWorkers: config.RepoSize.NumWorkers,
git: git,
repoStore: repoStore,
spaceStore: spaceStore,
scheduler: scheduler,
lfsStore: lfsStore,
usageMetricSender: usageMetricSender,
}
err := executor.Register(jobType, job)
if err != nil {
return nil, err
}
return job, nil
}
func ProvideService(ctx context.Context,
config *types.Config,
repoEvReporter *repoevents.Reporter,
repoReaderFactory *events.ReaderFactory[*repoevents.Reader],
repoStore store.RepoStore,
urlProvider url.Provider,
git git.Interface,
locker *locker.Locker,
) (*Service, error) {
return NewService(ctx, config, repoEvReporter, repoReaderFactory,
repoStore, urlProvider, git, locker)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/repo/handlers_default_branch.go | app/services/repo/handlers_default_branch.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package repo
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/bootstrap"
repoevents "github.com/harness/gitness/app/events/repo"
"github.com/harness/gitness/app/githook"
"github.com/harness/gitness/events"
"github.com/harness/gitness/git"
"github.com/rs/zerolog/log"
)
// handleUpdateDefaultBranch handles git update default branch using branch name from db (not event payload).
func (s *Service) handleUpdateDefaultBranch(
ctx context.Context,
event *events.Event[*repoevents.DefaultBranchUpdatedPayload],
) error {
// the max time we give an update default branch to succeed
const timeout = 2 * time.Minute
unlock, err := s.locker.LockDefaultBranch(
ctx,
event.Payload.RepoID,
event.Payload.NewName, // only used for logging
timeout+30*time.Second, // add 30s to the lock to give enough time for updating default branch
)
if err != nil {
return fmt.Errorf("failed to lock repo for updating default branch to %s", event.Payload.NewName)
}
defer unlock()
repo, err := s.repoStore.Find(ctx, event.Payload.RepoID)
if err != nil {
return fmt.Errorf("update default branch handler failed to find the repo: %w", err)
}
// create new, time-restricted context to guarantee update completion, even if request is canceled.
// TODO: a proper error handling solution required.
ctx, cancel := context.WithTimeout(
ctx,
timeout,
)
defer cancel()
systemPrincipal := bootstrap.NewSystemServiceSession().Principal
envVars, err := githook.GenerateEnvironmentVariables(
ctx,
s.urlProvider.GetInternalAPIURL(ctx),
repo.ID,
systemPrincipal.ID,
true,
true,
)
if err != nil {
return fmt.Errorf("failed to generate git hook env variables: %w", err)
}
err = s.git.UpdateDefaultBranch(ctx, &git.UpdateDefaultBranchParams{
WriteParams: git.WriteParams{
Actor: git.Identity{
Name: systemPrincipal.DisplayName,
Email: systemPrincipal.Email,
},
RepoUID: repo.GitUID,
EnvVars: envVars,
},
BranchName: repo.DefaultBranch,
})
if err != nil {
return fmt.Errorf("failed to update the repo default branch to %s", repo.DefaultBranch)
}
log.Ctx(ctx).Info().Msgf("git repo default branch updated to %s by default branch event handler", repo.DefaultBranch)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/repo/service.go | app/services/repo/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package repo
import (
"context"
"fmt"
"time"
repoevents "github.com/harness/gitness/app/events/repo"
"github.com/harness/gitness/app/services/locker"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/events"
"github.com/harness/gitness/git"
"github.com/harness/gitness/stream"
"github.com/harness/gitness/types"
)
const groupRepo = "gitness:repo"
type Service struct {
repoEvReporter *repoevents.Reporter
repoStore store.RepoStore
urlProvider url.Provider
git git.Interface
locker *locker.Locker
}
func NewService(
ctx context.Context,
config *types.Config,
repoEvReporter *repoevents.Reporter,
repoReaderFactory *events.ReaderFactory[*repoevents.Reader],
repoStore store.RepoStore,
urlProvider url.Provider,
git git.Interface,
locker *locker.Locker,
) (*Service, error) {
service := &Service{
repoEvReporter: repoEvReporter,
repoStore: repoStore,
urlProvider: urlProvider,
git: git,
locker: locker,
}
_, err := repoReaderFactory.Launch(ctx, groupRepo, config.InstanceID,
func(r *repoevents.Reader) error {
const idleTimeout = 15 * time.Second
r.Configure(
stream.WithConcurrency(1),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(3),
))
_ = r.RegisterDefaultBranchUpdated(service.handleUpdateDefaultBranch)
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to launch reader factory for repo git group: %w", err)
}
return service, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/repo/reposize.go | app/services/repo/reposize.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package repo
import (
"context"
"fmt"
"strings"
"sync"
"time"
"github.com/harness/gitness/app/services/usage"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/git"
"github.com/harness/gitness/job"
"github.com/harness/gitness/types"
"github.com/dustin/go-humanize"
"github.com/rs/zerolog/log"
)
const jobType = "repo-size-calculator"
type SizeCalculator struct {
enabled bool
cron string
maxDur time.Duration
numWorkers int
git git.Interface
scheduler *job.Scheduler
lfsStore store.LFSObjectStore
repoStore store.RepoStore
spaceStore store.SpaceStore
usageMetricSender usage.Sender
}
func (s *SizeCalculator) Register(ctx context.Context) error {
if !s.enabled {
return nil
}
err := s.scheduler.AddRecurring(ctx, jobType, jobType, s.cron, s.maxDur)
if err != nil {
return fmt.Errorf("failed to register recurring job for calculator: %w", err)
}
return nil
}
func (s *SizeCalculator) Handle(ctx context.Context, _ string, _ job.ProgressReporter) (string, error) {
defer func() {
if sendErr := s.sendMetric(ctx); sendErr != nil {
log.Ctx(ctx).Error().Err(sendErr).Msgf("failed to send metric")
}
}()
if !s.enabled {
return "", nil
}
sizeInfos, err := s.repoStore.ListSizeInfos(ctx)
if err != nil {
return "", fmt.Errorf("failed to get repository sizes: %w", err)
}
expiredBefore := time.Now().Add(s.maxDur)
log.Ctx(ctx).Info().Msgf(
"start repo size calculation (operation timeout: %s)",
expiredBefore.Format(time.RFC3339Nano),
)
var wg sync.WaitGroup
taskCh := make(chan *types.RepositorySizeInfo)
for i := 0; i < s.numWorkers; i++ {
wg.Add(1)
go worker(ctx, s, &wg, taskCh)
}
for _, sizeInfo := range sizeInfos {
select {
case <-ctx.Done():
break
case taskCh <- sizeInfo:
}
}
close(taskCh)
wg.Wait()
return "", nil
}
func worker(ctx context.Context, s *SizeCalculator, wg *sync.WaitGroup, taskCh <-chan *types.RepositorySizeInfo) {
defer wg.Done()
for sizeInfo := range taskCh {
log := log.Ctx(ctx).With().Str("repo_git_uid", sizeInfo.GitUID).Int64("repo_id", sizeInfo.ID).Logger()
log.Debug().Msgf("previous repo size: %d KiB", sizeInfo.Size)
gitSizeOut, err := s.git.GetRepositorySize(
ctx,
&git.GetRepositorySizeParams{ReadParams: git.ReadParams{RepoUID: sizeInfo.GitUID}})
if err != nil {
log.Error().Msgf("failed to get repo size: %s", err.Error())
continue
}
lfsSize, err := s.lfsStore.GetSizeInKBByRepoID(ctx, sizeInfo.ID)
if err != nil {
log.Error().Msgf("failed to get repo lfs objects size: %s", err.Error())
continue
}
if gitSizeOut.Size == sizeInfo.Size && lfsSize == sizeInfo.LFSSize {
log.Debug().Msg("repo size not changed")
continue
}
if err := s.repoStore.UpdateSize(ctx, sizeInfo.ID, gitSizeOut.Size, lfsSize); err != nil {
log.Error().Msgf("failed to update repo size: %s", err.Error())
continue
}
totalSize := humanize.Bytes(uint64((gitSizeOut.Size + lfsSize) * 1024)) //nolint:gosec
repoSize := humanize.Bytes(uint64(gitSizeOut.Size * 1024)) //nolint:gosec
repoLFSSize := humanize.Bytes(uint64(lfsSize * 1024)) //nolint:gosec
log.Debug().Msgf("new repo size: %s (git: %s, lfs: %s)", totalSize, repoSize, repoLFSSize)
}
}
func (s *SizeCalculator) sendMetric(
ctx context.Context,
) error {
date := time.Now()
if strings.HasPrefix(s.cron, "0 0") {
// if cron job runs at midnight store calculated size for prev day
date = date.Add(-24 * time.Hour)
}
spaces, err := s.spaceStore.GetRootSpacesSize(ctx)
if err != nil {
return fmt.Errorf("failed to fetch root spaces size: %w", err)
}
for _, rootSpace := range spaces {
err = s.usageMetricSender.Send(ctx, usage.Metric{
Time: date,
SpaceRef: rootSpace.Identifier,
StorageTotal: rootSpace.Size,
LFSStorageTotal: rootSpace.LFSSize,
})
if err != nil {
log.Ctx(ctx).Error().Err(err).
Str("space", rootSpace.Identifier).
Msg("failed to send usage metric for root space %s")
}
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspaceinfraevent/wire.go | app/services/gitspaceinfraevent/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspaceinfraevent
import (
"context"
gitspaceevents "github.com/harness/gitness/app/events/gitspace"
gitspaceinfraevents "github.com/harness/gitness/app/events/gitspaceinfra"
"github.com/harness/gitness/app/gitspace/orchestrator"
"github.com/harness/gitness/app/services/gitspace"
"github.com/harness/gitness/app/services/gitspaceevent"
"github.com/harness/gitness/events"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideService,
)
func ProvideService(
ctx context.Context,
config *gitspaceevent.Config,
gitspaceInfraEventReaderFactory *events.ReaderFactory[*gitspaceinfraevents.Reader],
orchestrator orchestrator.Orchestrator,
gitspaceSvc *gitspace.Service,
eventReporter *gitspaceevents.Reporter,
) (*Service, error) {
return NewService(
ctx,
config,
gitspaceInfraEventReaderFactory,
orchestrator,
gitspaceSvc,
eventReporter,
)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspaceinfraevent/service.go | app/services/gitspaceinfraevent/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspaceinfraevent
import (
"context"
"fmt"
"time"
gitspaceevents "github.com/harness/gitness/app/events/gitspace"
gitspaceinfraevents "github.com/harness/gitness/app/events/gitspaceinfra"
"github.com/harness/gitness/app/gitspace/orchestrator"
"github.com/harness/gitness/app/services/gitspace"
"github.com/harness/gitness/app/services/gitspaceevent"
"github.com/harness/gitness/events"
"github.com/harness/gitness/stream"
)
const groupGitspaceInfraEvents = "gitness:gitspaceinfra"
type Service struct {
config *gitspaceevent.Config
orchestrator orchestrator.Orchestrator
gitspaceSvc *gitspace.Service
eventReporter *gitspaceevents.Reporter
}
func NewService(
ctx context.Context,
config *gitspaceevent.Config,
gitspaceInfraEventReaderFactory *events.ReaderFactory[*gitspaceinfraevents.Reader],
orchestrator orchestrator.Orchestrator,
gitspaceSvc *gitspace.Service,
eventReporter *gitspaceevents.Reporter,
) (*Service, error) {
if err := config.Sanitize(); err != nil {
return nil, fmt.Errorf("provided gitspace infra event service config is invalid: %w", err)
}
service := &Service{
config: config,
orchestrator: orchestrator,
gitspaceSvc: gitspaceSvc,
eventReporter: eventReporter,
}
_, err := gitspaceInfraEventReaderFactory.Launch(ctx, groupGitspaceInfraEvents, config.EventReaderName,
func(r *gitspaceinfraevents.Reader) error {
var idleTimeout = time.Duration(config.TimeoutInMins) * time.Minute
r.Configure(
stream.WithConcurrency(config.Concurrency),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(config.MaxRetries),
))
_ = r.RegisterGitspaceInfraEvent(service.handleGitspaceInfraResumeEvent)
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to launch gitspace infra event reader: %w", err)
}
return service, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspaceinfraevent/handler.go | app/services/gitspaceinfraevent/handler.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspaceinfraevent
import (
"context"
"fmt"
"time"
gitspaceEvents "github.com/harness/gitness/app/events/gitspace"
gitspaceInfraEvents "github.com/harness/gitness/app/events/gitspaceinfra"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
func (s *Service) handleGitspaceInfraResumeEvent(
ctx context.Context,
event *events.Event[*gitspaceInfraEvents.GitspaceInfraEventPayload],
) error {
log.Debug().Msgf("received infra resume event, type: %s, gitspace configID: %s",
event.Payload.Type,
event.Payload.Infra.GitspaceConfigIdentifier,
)
payload := event.Payload
ctxWithTimedOut, cancel := context.WithTimeout(ctx, time.Duration(s.config.TimeoutInMins)*time.Minute)
defer cancel()
config, fetchErr := s.getConfig(ctxWithTimedOut, payload.Infra.SpaceID, payload.Infra.GitspaceConfigIdentifier)
if fetchErr != nil {
return fetchErr
}
instance := config.GitspaceInstance
if payload.Infra.GitspaceInstanceIdentifier != "" {
gitspaceInstance, err := s.gitspaceSvc.FindInstanceByIdentifier(
ctxWithTimedOut,
payload.Infra.GitspaceInstanceIdentifier,
)
if err != nil {
return fmt.Errorf("failed to fetch gitspace instance: %w", err)
}
instance = gitspaceInstance
config.GitspaceInstance = instance
}
defer func() {
// TODO: Update would not be needed for provision, stop and deprovision. Needs to be removed later.
updateErr := s.gitspaceSvc.UpdateInstance(ctx, instance)
if updateErr != nil {
log.Err(updateErr).Msgf("failed to update gitspace instance")
}
}()
log.Debug().Msgf("gitspace config found, ID: %s, instance identifier: %s",
payload.Infra.GitspaceConfigIdentifier, instance.Identifier)
var err error
if payload.Infra.Status == enum.InfraStatusError {
log.Error().Msgf(
"infra status is error, updating gitspace instance %s state to error",
instance.Identifier,
)
instance.State = enum.GitspaceInstanceStateError
return nil
}
switch payload.Type {
case enum.InfraEventProvision:
if config.GitspaceInstance.Identifier != payload.Infra.GitspaceInstanceIdentifier {
return fmt.Errorf("gitspace instance is not latest, stopping provisioning")
}
updatedInstance, resumeStartErr := s.orchestrator.ResumeStartGitspace(ctxWithTimedOut, *config, payload.Infra)
if resumeStartErr != nil {
updatedInstance.State = enum.GitspaceInstanceStateError
s.emitGitspaceConfigEvent(ctxWithTimedOut, config, enum.GitspaceEventTypeGitspaceActionStartFailed)
updatedInstance.ErrorMessage = resumeStartErr.ErrorMessage
err = fmt.Errorf("failed to resume start gitspace: %w", resumeStartErr.Error)
}
instance = &updatedInstance
case enum.InfraEventStop:
instanceState, resumeStopErr := s.orchestrator.ResumeStopGitspace(ctxWithTimedOut, *config, payload.Infra)
if resumeStopErr != nil {
instance.State = enum.GitspaceInstanceStateError
s.emitGitspaceConfigEvent(ctxWithTimedOut, config, enum.GitspaceEventTypeGitspaceActionStopFailed)
instance.ErrorMessage = resumeStopErr.ErrorMessage
err = fmt.Errorf("failed to resume stop gitspace: %w", resumeStopErr.Error)
}
instance.State = instanceState
case enum.InfraEventDeprovision:
instanceState, resumeDeleteErr := s.orchestrator.ResumeDeleteGitspace(ctxWithTimedOut, *config, payload.Infra)
if resumeDeleteErr != nil {
instance.State = enum.GitspaceInstanceStateError
err = fmt.Errorf("failed to resume delete gitspace: %w", resumeDeleteErr)
} else if config.IsMarkedForDeletion {
config.IsDeleted = true
updateErr := s.gitspaceSvc.UpdateConfig(ctxWithTimedOut, config)
if updateErr != nil {
err = fmt.Errorf("failed to delete gitspace config with ID: %s %w", config.Identifier, updateErr)
}
}
instance.State = instanceState
case enum.InfraEventCleanup:
instanceState, resumeCleanupErr := s.orchestrator.ResumeCleanupInstanceResources(
ctxWithTimedOut, *config, payload.Infra)
if resumeCleanupErr != nil {
instance.State = enum.GitspaceInstanceStateError
s.emitGitspaceConfigEvent(ctxWithTimedOut, config, enum.GitspaceEventTypeInfraCleanupFailed)
err = fmt.Errorf("failed to resume cleanup gitspace: %w", resumeCleanupErr)
}
instance.State = instanceState
default:
instance.State = enum.GitspaceInstanceStateError
return fmt.Errorf("unknown event type: %s", event.Payload.Type)
}
if err != nil {
log.Err(err).Msgf("error while handling gitspace infra event")
}
return nil
}
func (s *Service) getConfig(
ctx context.Context,
spaceID int64,
identifier string,
) (*types.GitspaceConfig, error) {
config, err := s.gitspaceSvc.FindWithLatestInstance(ctx, spaceID, identifier)
if err != nil {
return nil, fmt.Errorf(
"failed to find gitspace config during infra event handling, identifier %s: %w", identifier, err)
}
return config, nil
}
func (s *Service) emitGitspaceConfigEvent(ctx context.Context,
config *types.GitspaceConfig,
eventType enum.GitspaceEventType,
) {
s.eventReporter.EmitGitspaceEvent(ctx, gitspaceEvents.GitspaceEvent, &gitspaceEvents.GitspaceEventPayload{
QueryKey: config.Identifier,
EntityID: config.ID,
EntityType: enum.GitspaceEntityTypeGitspaceConfig,
EventType: eventType,
Timestamp: time.Now().UnixNano(),
})
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/wire.go | app/services/infraprovider/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/infraprovider"
"github.com/harness/gitness/store/database/dbtx"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideInfraProvider,
)
func ProvideInfraProvider(
tx dbtx.Transactor,
gitspaceConfigStore store.GitspaceConfigStore,
resourceStore store.InfraProviderResourceStore,
configStore store.InfraProviderConfigStore,
templateStore store.InfraProviderTemplateStore,
infraProviderFactory infraprovider.Factory,
spaceFinder refcache.SpaceFinder,
gatewayStore store.CDEGatewayStore,
) *Service {
return NewService(tx, gitspaceConfigStore, resourceStore, configStore, templateStore, infraProviderFactory,
spaceFinder, gatewayStore)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/upsert.go | app/services/infraprovider/upsert.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"errors"
"fmt"
"github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/rs/zerolog/log"
)
func (c *Service) UpsertConfigAndResources(
ctx context.Context,
infraProviderConfig *types.InfraProviderConfig,
infraProviderResources []types.InfraProviderResource,
) error {
space, err := c.spaceFinder.FindByRef(ctx, infraProviderConfig.SpacePath)
if err != nil {
return fmt.Errorf("failed to find space by ref: %w", err)
}
err = c.tx.WithTx(ctx, func(ctx context.Context) error {
return c.upsertConfigAndResources(ctx, space, infraProviderConfig, infraProviderResources)
})
if err != nil {
return fmt.Errorf("failed to complete txn for the infraprovider: %w", err)
}
return nil
}
func (c *Service) upsertConfigAndResources(
ctx context.Context,
space *types.SpaceCore,
infraProviderConfig *types.InfraProviderConfig,
infraProviderResources []types.InfraProviderResource,
) error {
providerConfigInDB, err := c.Find(ctx, space, infraProviderConfig.Identifier)
var infraProviderConfigID int64
if errors.Is(err, store.ErrResourceNotFound) { // nolint:gocritic
configID, createErr := c.createConfig(ctx, infraProviderConfig)
if createErr != nil {
return fmt.Errorf("could not create the config: %q %w", infraProviderConfig.Identifier, err)
}
infraProviderConfigID = configID
log.Info().Msgf("created new infraconfig %s", infraProviderConfig.Identifier)
} else if err != nil {
return err
} else {
infraProviderConfigID = providerConfigInDB.ID
}
infraProviderConfig.ID = infraProviderConfigID
if err = c.UpdateConfig(ctx, infraProviderConfig); err != nil {
return fmt.Errorf("could not update the config %s: %w", infraProviderConfig.Identifier, err)
}
log.Info().Msgf("updated infraconfig %s", infraProviderConfig.Identifier)
// upsert instead of create
if err = c.upsertResources(
ctx,
infraProviderResources,
infraProviderConfigID,
space.ID,
*infraProviderConfig,
true,
); err != nil {
return err
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/create_resource.go | app/services/infraprovider/create_resource.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"errors"
"fmt"
"github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/rs/zerolog/log"
)
func (c *Service) CreateResources(
ctx context.Context,
spaceID int64,
resources []types.InfraProviderResource,
configIdentifier string,
) error {
config, err := c.infraProviderConfigStore.FindByIdentifier(ctx, spaceID, configIdentifier)
if err != nil {
return fmt.Errorf("failed to find config: %w", err)
}
err = c.tx.WithTx(ctx, func(ctx context.Context) error {
return c.upsertResources(ctx, resources, config.ID, spaceID, *config, false)
})
if err != nil {
return fmt.Errorf("failed to complete create txn for the infraprovider resource %w", err)
}
return nil
}
func (c *Service) upsertResources(
ctx context.Context,
resources []types.InfraProviderResource,
configID int64,
spaceID int64,
config types.InfraProviderConfig,
allowUpdates bool,
) error {
emptyStr := ""
for idx := range resources {
resource := &resources[idx]
resource.InfraProviderConfigID = configID
resource.SpaceID = spaceID
if resource.Network == nil {
resource.Network = &emptyStr
}
// updating metadata based on infra provider type
updatedMetadata, err := c.updateResourceMetadata(resource, config)
if err != nil {
return fmt.Errorf("creating missing infra resources: %w", err)
}
resource.Metadata = updatedMetadata
cpuStr := getMetadataVal(updatedMetadata, "cpu")
memoryStr := getMetadataVal(updatedMetadata, "memory")
if resource.CPU == nil || (resource.CPU != nil && *resource.CPU == "") {
resource.CPU = &cpuStr
}
if resource.Memory == nil || (resource.Memory != nil && *resource.Memory == "") {
resource.Memory = &memoryStr
}
if err := c.validateResource(ctx, resource); err != nil {
return err
}
existingResource, err := c.infraProviderResourceStore.FindByConfigAndIdentifier(ctx, resource.SpaceID,
configID, resource.UID)
if err != nil { //nolint:nestif
if !errors.Is(err, store.ErrResourceNotFound) {
return fmt.Errorf("failed to check existing resource %s: %w", resource.UID, err)
}
// Resource doesn't exist, create it
if err = c.infraProviderResourceStore.Create(ctx, resource); err != nil {
return fmt.Errorf("failed to create infraprovider resource for %s: %w", resource.UID, err)
}
} else {
// Resource exists
if allowUpdates {
if err := c.updateExistingResource(ctx, resource, existingResource); err != nil {
return fmt.Errorf("failed to update existing resource %s: %w", resource.UID, err)
}
log.Info().Msgf(
"updated existing resource %s/%s",
resource.InfraProviderConfigIdentifier,
resource.UID,
)
} else {
return fmt.Errorf("resource %s already exists", resource.UID)
}
}
}
return nil
}
func (c *Service) updateResourceMetadata(
resource *types.InfraProviderResource,
config types.InfraProviderConfig,
) (map[string]string, error) {
infraProvider, err := c.infraProviderFactory.GetInfraProvider(resource.InfraProviderType)
if err != nil {
return nil, fmt.Errorf("failed to fetch infra impl for type : %q %w", resource.InfraProviderType, err)
}
params, err := infraProvider.UpdateParams(toResourceParams(resource.Metadata), config.Metadata)
if err != nil {
return nil, err
}
return toMetadata(params), nil
}
func (c *Service) validateResource(ctx context.Context, resource *types.InfraProviderResource) error {
infraProvider, err := c.infraProviderFactory.GetInfraProvider(resource.InfraProviderType)
if err != nil {
return fmt.Errorf("failed to fetch infra impl for type : %q %w", resource.InfraProviderType, err)
}
if len(infraProvider.TemplateParams()) > 0 {
err = c.validateTemplates(ctx, infraProvider, *resource)
if err != nil {
return err
}
}
if len(resource.Metadata) > 0 && resource.Metadata["resource_name"] == "" {
resource.Metadata["resource_name"] = resource.Name
}
resourceParams := toResourceParams(resource.Metadata)
err = infraProvider.ValidateParams(resourceParams)
if err != nil {
return err
}
return err
}
func toResourceParams(metadata map[string]string) []types.InfraProviderParameter {
var infraResourceParams []types.InfraProviderParameter
for key, value := range metadata {
infraResourceParams = append(infraResourceParams, types.InfraProviderParameter{
Name: key,
Value: value,
})
}
return infraResourceParams
}
func toMetadata(params []types.InfraProviderParameter) map[string]string {
metadata := make(map[string]string)
for _, param := range params {
metadata[param.Name] = param.Value
}
return metadata
}
func getMetadataVal(metadata map[string]string, key string) string {
if val, ok := metadata[key]; ok {
return val
}
return ""
}
// updateExistingResource updates an existing resource with new information while preserving
// immutable fields.
func (c *Service) updateExistingResource(
ctx context.Context,
resource *types.InfraProviderResource,
existingResource *types.InfraProviderResource,
) error {
// Keep the ID and created timestamp from the existing resource
resource.ID = existingResource.ID
resource.Created = existingResource.Created
// Preserve immutable fields
resource.UID = existingResource.UID
resource.InfraProviderConfigID = existingResource.InfraProviderConfigID
resource.SpaceID = existingResource.SpaceID
resource.InfraProviderType = existingResource.InfraProviderType
// Update the resource
return c.infraProviderResourceStore.Update(ctx, resource)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/list_gateways.go | app/services/infraprovider/list_gateways.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/types"
)
func (c *Service) ListGateways(ctx context.Context, filter *types.CDEGatewayFilter) ([]*types.CDEGateway, error) {
if filter == nil || len(filter.InfraProviderConfigIDs) == 0 {
return nil, fmt.Errorf("cde-gateway filter is required")
}
if filter.HealthReportValidityInMins == 0 {
filter.HealthReportValidityInMins = 5
}
gateways, err := c.gatewayStore.List(ctx, filter)
if err != nil {
return nil, fmt.Errorf("failed to list gateways: %w", err)
}
infraProviderConfigMap := make(map[int64]string)
for _, gateway := range gateways {
if _, ok := infraProviderConfigMap[gateway.InfraProviderConfigID]; !ok {
infraProviderConfig, err := c.infraProviderConfigStore.Find(ctx, gateway.InfraProviderConfigID, false)
if err != nil {
return nil, fmt.Errorf("failed to find infra provider config %d while listing gateways: %w",
gateway.InfraProviderConfigID, err)
}
infraProviderConfigMap[gateway.InfraProviderConfigID] = infraProviderConfig.Identifier
}
gateway.InfraProviderConfigIdentifier = infraProviderConfigMap[gateway.InfraProviderConfigID]
spaceCore, err := c.spaceFinder.FindByID(ctx, gateway.SpaceID)
if err != nil {
return nil, fmt.Errorf("failed to find space %d while listing gateways: %w", gateway.SpaceID, err)
}
gateway.SpacePath = spaceCore.Path
if gateway.Updated < time.Now().Add(-time.Duration(filter.HealthReportValidityInMins)*time.Minute).UnixMilli() {
gateway.Health = types.GatewayHealthUnhealthy
gateway.EnvoyHealth = types.GatewayHealthUnknown
}
if gateway.Health != types.GatewayHealthHealthy || gateway.EnvoyHealth != types.GatewayHealthHealthy {
gateway.OverallHealth = types.GatewayHealthUnhealthy
} else {
gateway.OverallHealth = types.GatewayHealthHealthy
}
}
return gateways, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/report_stats.go | app/services/infraprovider/report_stats.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"time"
"github.com/harness/gitness/types"
)
func (c *Service) ReportStats(
ctx context.Context,
spaceCore *types.SpaceCore,
infraProviderConfig *types.InfraProviderConfig,
in *types.CDEGatewayStats,
) error {
gateway := types.CDEGateway{
InfraProviderConfigID: infraProviderConfig.ID,
InfraProviderConfigIdentifier: infraProviderConfig.Identifier,
SpaceID: spaceCore.ID,
SpacePath: spaceCore.Path,
}
gateway.Name = in.Name
gateway.GroupName = in.GroupName
gateway.Region = in.Region
gateway.Zone = in.Zone
gateway.Version = in.Version
gateway.Health = in.Health
gateway.EnvoyHealth = in.EnvoyHealth
gateway.Created = time.Now().UnixMilli()
gateway.Updated = gateway.Created
return c.gatewayStore.Upsert(ctx, &gateway)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/update_template.go | app/services/infraprovider/update_template.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"fmt"
"github.com/harness/gitness/types"
)
func (c *Service) UpdateTemplate(ctx context.Context, template types.InfraProviderTemplate) error {
err := c.tx.WithTx(ctx, func(ctx context.Context) error {
space, err := c.spaceFinder.FindByRef(ctx, template.SpacePath)
if err != nil {
return err
}
templateInDB, err := c.infraProviderTemplateStore.FindByIdentifier(ctx, space.ID, template.Identifier)
if err != nil {
return err
}
template.ID = templateInDB.ID
template.SpaceID = space.ID
if err = c.infraProviderTemplateStore.Update(ctx, &template); err != nil {
return err
}
return nil
})
if err != nil {
return fmt.Errorf("failed to complete update txn for the infraprovider template %w", err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/create_config.go | app/services/infraprovider/create_config.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"fmt"
"net/http"
"github.com/harness/gitness/app/api/usererror"
"github.com/harness/gitness/types"
)
func (c *Service) CreateConfig(
ctx context.Context,
infraProviderConfig *types.InfraProviderConfig,
) error {
err := c.tx.WithTx(ctx, func(ctx context.Context) error {
err := c.areNewConfigsAllowed(ctx, infraProviderConfig)
if err != nil {
return err
}
_, err = c.createConfig(ctx, infraProviderConfig)
if err != nil {
return fmt.Errorf("could not create the config: %q %w", infraProviderConfig.Identifier, err)
}
return nil
})
if err != nil {
return fmt.Errorf("failed to complete txn for the infraprovider %w", err)
}
return nil
}
func (c *Service) areNewConfigsAllowed(ctx context.Context, infraProviderConfig *types.InfraProviderConfig) error {
existingConfigs, err := c.fetchExistingConfigs(ctx, infraProviderConfig)
if err != nil {
return err
}
if len(existingConfigs) > 0 {
return usererror.NewWithPayload(http.StatusForbidden, fmt.Sprintf(
"%d infra configs for provider %s exist for this account. Only 1 is allowed",
len(existingConfigs), infraProviderConfig.Type))
}
return nil
}
func (c *Service) fetchExistingConfigs(
ctx context.Context,
infraProviderConfig *types.InfraProviderConfig,
) ([]*types.InfraProviderConfig, error) {
existingConfigs, err := c.infraProviderConfigStore.List(ctx, &types.InfraProviderConfigFilter{
SpaceIDs: []int64{infraProviderConfig.SpaceID},
Type: infraProviderConfig.Type,
})
if err != nil {
return nil, fmt.Errorf("failed to find existing infraprovider config for type %s & space %d: %w",
infraProviderConfig.Type, infraProviderConfig.SpaceID, err)
}
return existingConfigs, nil
}
func (c *Service) createConfig(ctx context.Context, infraProviderConfig *types.InfraProviderConfig) (int64, error) {
err := c.validateConfig(infraProviderConfig)
if err != nil {
return 0, err
}
infraProviderConfig, err = c.updateConfig(infraProviderConfig)
if err != nil {
return 0, err
}
err = c.infraProviderConfigStore.Create(ctx, infraProviderConfig)
if err != nil {
return 0, fmt.Errorf("failed to create infraprovider config for %s: %w", infraProviderConfig.Identifier, err)
}
newInfraProviderConfig, err := c.infraProviderConfigStore.FindByIdentifier(ctx, infraProviderConfig.SpaceID,
infraProviderConfig.Identifier)
if err != nil {
return 0, fmt.Errorf("failed to find newly created infraprovider config %s in space %d: %w",
infraProviderConfig.Identifier, infraProviderConfig.SpaceID, err)
}
return newInfraProviderConfig.ID, nil
}
func (c *Service) validateConfig(infraProviderConfig *types.InfraProviderConfig) error {
infraProvider, err := c.infraProviderFactory.GetInfraProvider(infraProviderConfig.Type)
if err != nil {
return fmt.Errorf("failed to fetch infra provider for type %s: %w", infraProviderConfig.Type, err)
}
err = infraProvider.ValidateConfig(infraProviderConfig)
if err != nil {
return err
}
return nil
}
func (c *Service) updateConfig(infraProviderConfig *types.InfraProviderConfig) (*types.InfraProviderConfig, error) {
infraProvider, err := c.infraProviderFactory.GetInfraProvider(infraProviderConfig.Type)
if err != nil {
return nil, fmt.Errorf("failed to fetch infra provider for type %s: %w", infraProviderConfig.Type, err)
}
updatedConfig, err := infraProvider.UpdateConfig(infraProviderConfig)
if err != nil {
return nil, err
}
return updatedConfig, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/create_config_and_resources.go | app/services/infraprovider/create_config_and_resources.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"fmt"
"github.com/harness/gitness/types"
)
func (c *Service) CreateConfigAndResources(
ctx context.Context,
infraProviderConfig *types.InfraProviderConfig,
) error {
err := c.tx.WithTx(ctx, func(ctx context.Context) error {
err := c.areNewConfigsAllowed(ctx, infraProviderConfig)
if err != nil {
return err
}
configID, err := c.createConfig(ctx, infraProviderConfig)
if err != nil {
return fmt.Errorf("could not create the config: %q %w", infraProviderConfig.Identifier, err)
}
err = c.upsertResources(ctx, infraProviderConfig.Resources, configID,
infraProviderConfig.SpaceID, *infraProviderConfig, false)
if err != nil {
return fmt.Errorf("could not create the resources: %v %w", infraProviderConfig.Resources, err)
}
return nil
})
if err != nil {
return fmt.Errorf("failed to complete txn for the infraprovider %w", err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/find.go | app/services/infraprovider/find.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"fmt"
"slices"
"github.com/harness/gitness/types"
)
func (c *Service) Find(
ctx context.Context,
space *types.SpaceCore,
identifier string,
) (*types.InfraProviderConfig, error) {
infraProviderConfig, err := c.infraProviderConfigStore.FindByIdentifier(ctx, space.ID, identifier)
if err != nil {
return nil, fmt.Errorf("failed to find infraprovider config: %q %w", identifier, err)
}
err = c.populateDetails(ctx, infraProviderConfig)
if err != nil {
return nil, err
}
return infraProviderConfig, nil
}
func (c *Service) populateDetails(
ctx context.Context,
infraProviderConfig *types.InfraProviderConfig,
) error {
resources, err := c.getResources(ctx, infraProviderConfig)
if err != nil {
return err
}
infraProviderConfig.Resources = resources
setupYAML, err := c.GetSetupYAML(infraProviderConfig)
if err != nil {
return err
}
infraProviderConfig.SetupYAML = setupYAML
return nil
}
func (c *Service) getResources(
ctx context.Context,
infraProviderConfig *types.InfraProviderConfig,
) ([]types.InfraProviderResource, error) {
resources, err := c.ListResources(ctx, infraProviderConfig.ID, types.ListQueryFilter{})
if err != nil {
return nil, err
}
var providerResources []types.InfraProviderResource
if len(resources) > 0 {
providerResources = make([]types.InfraProviderResource, len(resources))
for i, resource := range resources {
if resource != nil {
providerResources[i] = *resource
}
}
slices.SortFunc(providerResources, types.CompareInfraProviderResource)
}
return providerResources, nil
}
func (c *Service) ListResources(
ctx context.Context,
configID int64,
filter types.ListQueryFilter,
) ([]*types.InfraProviderResource, error) {
resources, err := c.infraProviderResourceStore.List(ctx, configID, filter)
if err != nil {
return nil, fmt.Errorf("failed to find infraprovider resources for config %d: %w",
configID, err)
}
return resources, nil
}
func (c *Service) GetSetupYAML(infraProviderConfig *types.InfraProviderConfig) (string, error) {
provider, err := c.infraProviderFactory.GetInfraProvider(infraProviderConfig.Type)
if err != nil {
return "", fmt.Errorf("failed to get infra provider of type %s before getting setup yaml for infra "+
"config %s: %w", infraProviderConfig.Type, infraProviderConfig.Identifier, err)
}
setupYAML, err := provider.GenerateSetupYAML(infraProviderConfig)
if err != nil {
return "", fmt.Errorf("failed to generate setup yaml for infra provider config %s: %w",
infraProviderConfig.Identifier, err)
}
return setupYAML, nil
}
func (c *Service) FindTemplate(
ctx context.Context,
space *types.SpaceCore,
identifier string,
) (*types.InfraProviderTemplate, error) {
infraProviderTemplate, err := c.infraProviderTemplateStore.FindByIdentifier(ctx, space.ID, identifier)
if err != nil {
return nil, fmt.Errorf("failed to find infraprovider template: %q %w", identifier, err)
}
return infraProviderTemplate, nil
}
func (c *Service) FindResourceByConfigAndIdentifier(
ctx context.Context,
spaceID int64,
infraProviderConfigIdentifier string,
identifier string,
) (*types.InfraProviderResource, error) {
infraProviderConfig, err := c.infraProviderConfigStore.FindByIdentifier(ctx, spaceID, infraProviderConfigIdentifier)
if err != nil {
return nil, fmt.Errorf("failed to find infraprovider config %s: %w", infraProviderConfigIdentifier, err)
}
return c.infraProviderResourceStore.FindByConfigAndIdentifier(ctx, spaceID, infraProviderConfig.ID, identifier)
}
func (c *Service) FindResource(ctx context.Context, id int64) (*types.InfraProviderResource, error) {
return c.infraProviderResourceStore.Find(ctx, id)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/delete_resource.go | app/services/infraprovider/delete_resource.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"fmt"
"net/http"
"github.com/harness/gitness/app/api/usererror"
)
func (c *Service) DeleteResource(
ctx context.Context,
spaceID int64,
infraProviderConfigIdentifier string,
identifier string,
useTransaction bool,
) error {
var err error
deleteFunc := func(ctx context.Context) error {
infraProviderConfig, err := c.infraProviderConfigStore.FindByIdentifier(ctx, spaceID,
infraProviderConfigIdentifier)
if err != nil {
return fmt.Errorf("failed to find infra config %s for deleting resource: %w",
infraProviderConfigIdentifier, err)
}
infraProviderResource, err := c.infraProviderResourceStore.FindByConfigAndIdentifier(ctx, spaceID,
infraProviderConfig.ID, identifier)
if err != nil {
return fmt.Errorf("failed to find infra resource %s with config %s for deleting resource: %w",
identifier, infraProviderConfigIdentifier, err)
}
activeGitspaces, err := c.gitspaceConfigStore.ListActiveConfigsForInfraProviderResource(ctx,
infraProviderResource.ID)
if err != nil {
return fmt.Errorf("failed to list active configs for infra resource %s for deleting resource: %w",
identifier, err)
}
if len(activeGitspaces) > 0 {
return usererror.NewWithPayload(http.StatusForbidden, fmt.Sprintf("There are %d active gitspace "+
"configs for infra resource %s, expected 0", len(activeGitspaces), identifier))
}
if err = c.infraProviderResourceStore.Delete(ctx, infraProviderResource.ID); err != nil {
return fmt.Errorf("failed to delete infra provider resource %s: %w", infraProviderResource.UID, err)
}
return nil
}
if useTransaction {
err = c.tx.WithTx(ctx, deleteFunc)
} else {
err = deleteFunc(ctx)
}
if err != nil {
return fmt.Errorf("failed to complete txn for deleting the infra resource %s: %w", identifier, err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/list.go | app/services/infraprovider/list.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"fmt"
"github.com/harness/gitness/types"
)
func (c *Service) List(
ctx context.Context,
filter *types.InfraProviderConfigFilter,
) ([]*types.InfraProviderConfig, error) {
infraProviderConfigs, err := c.infraProviderConfigStore.List(ctx, filter)
if err != nil {
return nil, fmt.Errorf("failed to list infraprovider configs: %w", err)
}
for _, infraProviderConfig := range infraProviderConfigs {
err = c.populateDetails(ctx, infraProviderConfig)
if err != nil {
return nil, err
}
}
return infraProviderConfigs, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/update_config.go | app/services/infraprovider/update_config.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/types"
)
func (c *Service) UpdateConfig(ctx context.Context, infraProviderConfig *types.InfraProviderConfig) error {
err := c.validateConfig(infraProviderConfig)
if err != nil {
return err
}
infraProviderConfig, err = c.updateConfig(infraProviderConfig)
if err != nil {
return err
}
existingConfig, err := c.infraProviderConfigStore.FindByIdentifier(ctx, infraProviderConfig.SpaceID,
infraProviderConfig.Identifier)
if err != nil {
return fmt.Errorf("could not find infraprovider config %s before updating: %w",
infraProviderConfig.Identifier, err)
}
infraProviderConfig.ID = existingConfig.ID
infraProviderConfig.Updated = time.Now().UnixMilli()
err = c.infraProviderConfigStore.Update(ctx, infraProviderConfig)
if err != nil {
return fmt.Errorf("failed to update infraprovider config for %s: %w", infraProviderConfig.Identifier, err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/infraprovider.go | app/services/infraprovider/infraprovider.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/infraprovider"
"github.com/harness/gitness/store/database/dbtx"
)
func NewService(
tx dbtx.Transactor,
gitspaceConfigStore store.GitspaceConfigStore,
resourceStore store.InfraProviderResourceStore,
configStore store.InfraProviderConfigStore,
templateStore store.InfraProviderTemplateStore,
factory infraprovider.Factory,
spaceFinder refcache.SpaceFinder,
gatewayStore store.CDEGatewayStore,
) *Service {
return &Service{
tx: tx,
infraProviderResourceStore: resourceStore,
infraProviderConfigStore: configStore,
infraProviderTemplateStore: templateStore,
infraProviderFactory: factory,
spaceFinder: spaceFinder,
gitspaceConfigStore: gitspaceConfigStore,
gatewayStore: gatewayStore,
}
}
type Service struct {
tx dbtx.Transactor
gitspaceConfigStore store.GitspaceConfigStore
infraProviderResourceStore store.InfraProviderResourceStore
infraProviderConfigStore store.InfraProviderConfigStore
infraProviderTemplateStore store.InfraProviderTemplateStore
infraProviderFactory infraprovider.Factory
spaceFinder refcache.SpaceFinder
gatewayStore store.CDEGatewayStore
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/delete_config.go | app/services/infraprovider/delete_config.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"fmt"
"net/http"
"github.com/harness/gitness/app/api/usererror"
"github.com/harness/gitness/types"
)
func (c *Service) DeleteConfig(
ctx context.Context,
space *types.SpaceCore,
identifier string,
useTransaction bool,
) error {
deleteFunc := func(ctx context.Context) error {
infraProviderConfig, err := c.Find(ctx, space, identifier)
if err != nil {
return fmt.Errorf("could not find infra provider config %s to delete: %w", identifier, err)
}
if len(infraProviderConfig.Resources) > 0 {
return usererror.Newf(http.StatusForbidden, "There are %d resources in this config. Deletion "+
"not allowed until all resources are deleted.", len(infraProviderConfig.Resources))
}
return c.infraProviderConfigStore.Delete(ctx, infraProviderConfig.ID)
}
if useTransaction {
return c.tx.WithTx(ctx, deleteFunc)
}
return deleteFunc(ctx)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/delete_all_for_spaces.go | app/services/infraprovider/delete_all_for_spaces.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"fmt"
"github.com/harness/gitness/types"
"github.com/rs/zerolog/log"
)
func (c *Service) DeleteAllForSpaces(ctx context.Context, spaces []*types.Space) error {
spaceIDsMap := make(map[int64]*types.SpaceCore)
spaceIDs := make([]int64, 0, len(spaces))
for _, space := range spaces {
spaceIDs = append(spaceIDs, space.ID)
spaceIDsMap[space.ID] = space.Core()
}
log.Debug().Msgf("Deleting all infra providers for spaces %+v", spaceIDs)
infraProviderConfigFilter := types.InfraProviderConfigFilter{SpaceIDs: spaceIDs}
infraProviderConfigs, err := c.List(ctx, &infraProviderConfigFilter)
if err != nil {
return fmt.Errorf("error while listing infra provider entities before deleting all for spaces: %w", err)
}
for _, infraProviderConfig := range infraProviderConfigs {
for _, infraProviderResource := range infraProviderConfig.Resources {
log.Debug().Msgf("Deleting infra resource %s for space %d", infraProviderResource.UID,
infraProviderResource.SpaceID)
err = c.DeleteResource(ctx, infraProviderConfig.SpaceID, infraProviderConfig.Identifier,
infraProviderResource.UID, false)
if err != nil {
return fmt.Errorf("error while deleting infra resource %s while deleting all for spaces: %w",
infraProviderResource.UID, err)
}
log.Debug().Msgf("Deleted infra resource %s for space %d", infraProviderResource.UID,
infraProviderResource.SpaceID)
}
log.Debug().Msgf("Deleting infra config %s for space %d", infraProviderConfig.Identifier,
infraProviderConfig.SpaceID)
err = c.DeleteConfig(ctx, spaceIDsMap[infraProviderConfig.SpaceID], infraProviderConfig.Identifier, false)
if err != nil {
return fmt.Errorf("error while deleting infra config %s while deleting all for spaces: %w",
infraProviderConfig.Identifier, err)
}
log.Debug().Msgf("Deleted infra config %s for space %d", infraProviderConfig.Identifier,
infraProviderConfig.SpaceID)
}
log.Debug().Msgf("Deleted all infra providers for spaces %+v", spaceIDs)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/infraprovider/create_template.go | app/services/infraprovider/create_template.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"github.com/harness/gitness/infraprovider"
"github.com/harness/gitness/types"
"github.com/rs/zerolog/log"
)
func (c *Service) CreateTemplate(
ctx context.Context,
template *types.InfraProviderTemplate,
) error {
return c.infraProviderTemplateStore.Create(ctx, template)
}
func (c *Service) validateTemplates(
ctx context.Context,
infraProvider infraprovider.InfraProvider,
res types.InfraProviderResource,
) error {
templateParams := infraProvider.TemplateParams()
for _, param := range templateParams {
key := param.Name
if res.Metadata[key] != "" {
templateIdentifier := res.Metadata[key]
_, err := c.infraProviderTemplateStore.FindByIdentifier(
ctx, res.SpaceID, templateIdentifier)
if err != nil {
log.Warn().Msgf("unable to get template params for ID : %s",
res.Metadata[key])
}
}
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/exporter/wire.go | app/services/exporter/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exporter
import (
"github.com/harness/gitness/app/sse"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/encrypt"
"github.com/harness/gitness/git"
"github.com/harness/gitness/job"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideSpaceExporter,
)
func ProvideSpaceExporter(
urlProvider url.Provider,
git git.Interface,
repoStore store.RepoStore,
scheduler *job.Scheduler,
executor *job.Executor,
encrypter encrypt.Encrypter,
sseStreamer sse.Streamer,
) (*Repository, error) {
exporter := &Repository{
urlProvider: urlProvider,
git: git,
repoStore: repoStore,
scheduler: scheduler,
encrypter: encrypter,
sseStreamer: sseStreamer,
}
err := executor.Register(jobType, exporter)
if err != nil {
return nil, err
}
return exporter, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/exporter/harness_code_client.go | app/services/exporter/harness_code_client.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exporter
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"github.com/harness/gitness/app/api/controller/repo"
"github.com/harness/gitness/types"
)
const (
pathCreateRepo = "/v1/accounts/%s/orgs/%s/projects/%s/repos"
pathDeleteRepo = "/v1/accounts/%s/orgs/%s/projects/%s/repos/%s"
//nolint:gosec // wrong flagging
headerAPIKey = "X-Api-Key"
routingID = "routingId"
)
var (
errHTTPNotFound = fmt.Errorf("not found")
errHTTPBadRequest = fmt.Errorf("bad request")
errHTTPInternal = fmt.Errorf("internal error")
errHTTPDuplicate = fmt.Errorf("resource already exists")
)
type harnessCodeClient struct {
client *client
}
type client struct {
baseURL string
httpClient http.Client
accountID string
orgID string
projectID string
token string
}
// newClient creates a new harness Client for interacting with the platforms APIs.
func newClient(baseURL string, accountID string, orgID string, projectID string, token string) (*client, error) {
if baseURL == "" {
return nil, fmt.Errorf("baseUrl required")
}
if accountID == "" {
return nil, fmt.Errorf("accountID required")
}
if orgID == "" {
return nil, fmt.Errorf("orgId required")
}
if projectID == "" {
return nil, fmt.Errorf("projectId required")
}
if token == "" {
return nil, fmt.Errorf("token required")
}
return &client{
baseURL: baseURL,
accountID: accountID,
orgID: orgID,
projectID: projectID,
token: token,
httpClient: http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: false,
MinVersion: tls.VersionTLS12,
},
},
},
}, nil
}
func newHarnessCodeClient(
baseURL string,
accountID string,
orgID string,
projectID string,
token string,
) (*harnessCodeClient, error) {
client, err := newClient(baseURL, accountID, orgID, projectID, token)
if err != nil {
return nil, err
}
return &harnessCodeClient{
client: client,
}, nil
}
func (c *harnessCodeClient) CreateRepo(ctx context.Context, input repo.CreateInput) (*types.Repository, error) {
path := fmt.Sprintf(pathCreateRepo, c.client.accountID, c.client.orgID, c.client.projectID)
bodyBytes, err := json.Marshal(input)
if err != nil {
return nil, fmt.Errorf("failed to serialize body: %w", err)
}
req, err := http.NewRequestWithContext(
ctx,
http.MethodPost,
appendPath(c.client.baseURL, path), bytes.NewBuffer(bodyBytes),
)
if err != nil {
return nil, fmt.Errorf("unable to create new http request : %w", err)
}
q := map[string]string{routingID: c.client.accountID}
addQueryParams(req, q)
req.Header.Add("Content-Type", "application/json")
req.ContentLength = int64(len(bodyBytes))
resp, err := c.client.Do(req)
if err != nil {
return nil, fmt.Errorf("request execution failed: %w", err)
}
if resp != nil && resp.Body != nil {
defer func() { _ = resp.Body.Close() }()
}
repository := new(types.Repository)
err = mapStatusCodeToError(resp.StatusCode)
if err != nil {
return nil, err
}
err = unmarshalResponse(resp, repository)
if err != nil {
return nil, err
}
return repository, err
}
func addQueryParams(req *http.Request, params map[string]string) {
if len(params) > 0 {
q := req.URL.Query()
for key, value := range params {
q.Add(key, value)
}
req.URL.RawQuery = q.Encode()
}
}
func (c *harnessCodeClient) DeleteRepo(ctx context.Context, repoIdentifier string) error {
path := fmt.Sprintf(pathDeleteRepo, c.client.accountID, c.client.orgID, c.client.projectID, repoIdentifier)
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, appendPath(c.client.baseURL, path), nil)
if err != nil {
return fmt.Errorf("unable to create new http request : %w", err)
}
q := map[string]string{routingID: c.client.accountID}
addQueryParams(req, q)
resp, err := c.client.Do(req)
if err != nil {
return fmt.Errorf("request execution failed: %w", err)
}
if resp != nil && resp.Body != nil {
defer func() { _ = resp.Body.Close() }()
}
return mapStatusCodeToError(resp.StatusCode)
}
func appendPath(uri string, path string) string {
if path == "" {
return uri
}
return strings.TrimRight(uri, "/") + "/" + strings.TrimLeft(path, "/")
}
func (c *client) Do(r *http.Request) (*http.Response, error) {
addAuthHeader(r, c.token)
return c.httpClient.Do(r)
}
// addAuthHeader adds the Authorization header to the request.
func addAuthHeader(req *http.Request, token string) {
req.Header.Add(headerAPIKey, token)
}
func unmarshalResponse(resp *http.Response, data any) error {
if resp == nil {
return fmt.Errorf("http response is empty")
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("error reading response body : %w", err)
}
err = json.Unmarshal(body, data)
if err != nil {
return fmt.Errorf("error deserializing response body : %w", err)
}
return nil
}
func mapStatusCodeToError(statusCode int) error {
switch {
case statusCode == 500:
return errHTTPInternal
case statusCode >= 500:
return fmt.Errorf("received server side error status code %d", statusCode)
case statusCode == 404:
return errHTTPNotFound
case statusCode == 400:
return errHTTPBadRequest
case statusCode == 409:
return errHTTPDuplicate
case statusCode >= 400:
return fmt.Errorf("received client side error status code %d", statusCode)
case statusCode >= 300:
return fmt.Errorf("received further action required status code %d", statusCode)
default:
// TODO: definitely more things to consider here ...
return nil
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/exporter/repository.go | app/services/exporter/repository.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exporter
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"net/url"
"strings"
"time"
"github.com/harness/gitness/app/api/controller/repo"
"github.com/harness/gitness/app/services/publicaccess"
"github.com/harness/gitness/app/sse"
"github.com/harness/gitness/app/store"
gitnessurl "github.com/harness/gitness/app/url"
"github.com/harness/gitness/encrypt"
"github.com/harness/gitness/git"
"github.com/harness/gitness/job"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
const (
// TODO: take as optional input from api input to allow exporting to SMP.
harnessCodeAPIURLRaw = "https://app.harness.io/gateway/code/api"
)
var (
// ErrNotFound is returned if no export data was found.
ErrNotFound = errors.New("export not found")
)
type Repository struct {
urlProvider gitnessurl.Provider
git git.Interface
repoStore store.RepoStore
scheduler *job.Scheduler
encrypter encrypt.Encrypter
sseStreamer sse.Streamer
publicAccess publicaccess.Service
}
type Input struct {
Identifier string `json:"identifier"`
ID int64 `json:"id"`
Description string `json:"description"`
IsPublic bool `json:"is_public"`
HarnessCodeInfo HarnessCodeInfo `json:"harness_code_info"`
}
type HarnessCodeInfo struct {
AccountID string `json:"account_id"`
ProjectIdentifier string `json:"project_identifier"`
OrgIdentifier string `json:"org_identifier"`
Token string `json:"token"`
}
var _ job.Handler = (*Repository)(nil)
const (
exportJobMaxRetries = 1
exportJobMaxDuration = 45 * time.Minute
exportRepoJobUID = "export_repo_%d"
exportSpaceJobUID = "export_space_%d"
jobType = "repository_export"
)
var ErrJobRunning = errors.New("an export job is already running")
func (r *Repository) Register(executor *job.Executor) error {
return executor.Register(jobType, r)
}
func (r *Repository) RunManyForSpace(
ctx context.Context,
spaceID int64,
repos []*types.Repository,
harnessCodeInfo *HarnessCodeInfo,
) error {
jobGroupID := getJobGroupID(spaceID)
jobs, err := r.scheduler.GetJobProgressForGroup(ctx, jobGroupID)
if err != nil {
return fmt.Errorf("cannot get job progress before starting. %w", err)
}
if len(jobs) > 0 {
err = checkJobAlreadyRunning(jobs)
if err != nil {
return err
}
n, err := r.scheduler.PurgeJobsByGroupID(ctx, jobGroupID)
if err != nil {
return err
}
log.Ctx(ctx).Info().Msgf("deleted %d old jobs", n)
}
jobDefinitions := make([]job.Definition, len(repos))
for i, repository := range repos {
isPublic, err := r.publicAccess.Get(ctx, enum.PublicResourceTypeRepo, repository.Path)
if err != nil {
return fmt.Errorf("failed to check repo public access: %w", err)
}
repoJobData := Input{
Identifier: repository.Identifier,
ID: repository.ID,
Description: repository.Description,
IsPublic: isPublic,
HarnessCodeInfo: *harnessCodeInfo,
}
data, err := json.Marshal(repoJobData)
if err != nil {
return fmt.Errorf("failed to marshal job input json: %w", err)
}
strData := strings.TrimSpace(string(data))
encryptedData, err := r.encrypter.Encrypt(strData)
if err != nil {
return fmt.Errorf("failed to encrypt job input: %w", err)
}
jobUID := fmt.Sprintf(exportRepoJobUID, repository.ID)
jobDefinitions[i] = job.Definition{
UID: jobUID,
Type: jobType,
MaxRetries: exportJobMaxRetries,
Timeout: exportJobMaxDuration,
Data: base64.StdEncoding.EncodeToString(encryptedData),
}
}
return r.scheduler.RunJobs(ctx, jobGroupID, jobDefinitions)
}
func checkJobAlreadyRunning(jobs []job.Progress) error {
if jobs == nil {
return nil
}
for _, j := range jobs {
if !j.State.IsCompleted() {
return ErrJobRunning
}
}
return nil
}
func getJobGroupID(spaceID int64) string {
return fmt.Sprintf(exportSpaceJobUID, spaceID)
}
// Handle is repository export background job handler.
func (r *Repository) Handle(ctx context.Context, data string, _ job.ProgressReporter) (string, error) {
input, err := r.getJobInput(data)
if err != nil {
return "", err
}
harnessCodeInfo := input.HarnessCodeInfo
client, err := newHarnessCodeClient(
harnessCodeAPIURLRaw,
harnessCodeInfo.AccountID,
harnessCodeInfo.OrgIdentifier,
harnessCodeInfo.ProjectIdentifier,
harnessCodeInfo.Token,
)
if err != nil {
return "", err
}
repository, err := r.repoStore.Find(ctx, input.ID)
if err != nil {
return "", err
}
remoteRepo, err := client.CreateRepo(ctx, repo.CreateInput{
Identifier: repository.Identifier,
DefaultBranch: repository.DefaultBranch,
Description: repository.Description,
IsPublic: false, // TODO: replace with publicaccess service response once deployed on HC.
})
if err != nil {
r.sseStreamer.Publish(ctx, repository.ParentID, enum.SSETypeRepositoryExportCompleted, repository)
return "", err
}
urlWithToken, err := modifyURL(remoteRepo.GitURL, harnessCodeInfo.Token)
if err != nil {
return "", err
}
err = r.git.PushRemote(ctx, &git.PushRemoteParams{
ReadParams: git.ReadParams{RepoUID: repository.GitUID},
RemoteURL: urlWithToken,
})
if err != nil && !strings.Contains(err.Error(), "empty") {
errDelete := client.DeleteRepo(ctx, remoteRepo.Identifier)
if errDelete != nil {
log.Ctx(ctx).Err(errDelete).Msgf("failed to delete repo '%s' on harness", remoteRepo.Identifier)
}
r.sseStreamer.Publish(ctx, repository.ParentID, enum.SSETypeRepositoryExportCompleted, repository)
return "", err
}
log.Ctx(ctx).Info().Msgf("completed exporting repository '%s' to harness", repository.Identifier)
r.sseStreamer.Publish(ctx, repository.ParentID, enum.SSETypeRepositoryExportCompleted, repository)
return "", nil
}
func (r *Repository) getJobInput(data string) (Input, error) {
encrypted, err := base64.StdEncoding.DecodeString(data)
if err != nil {
return Input{}, fmt.Errorf("failed to base64 decode job input: %w", err)
}
decrypted, err := r.encrypter.Decrypt(encrypted)
if err != nil {
return Input{}, fmt.Errorf("failed to decrypt job input: %w", err)
}
var input Input
err = json.NewDecoder(strings.NewReader(decrypted)).Decode(&input)
if err != nil {
return Input{}, fmt.Errorf("failed to unmarshal job input json: %w", err)
}
return input, nil
}
func (r *Repository) GetProgressForSpace(ctx context.Context, spaceID int64) ([]job.Progress, error) {
groupID := getJobGroupID(spaceID)
progress, err := r.scheduler.GetJobProgressForGroup(ctx, groupID)
if err != nil {
return nil, fmt.Errorf("failed to get job progress for group: %w", err)
}
if len(progress) == 0 {
return nil, ErrNotFound
}
return progress, nil
}
func modifyURL(u string, token string) (string, error) {
parsedURL, err := url.Parse(u)
if err != nil {
return "", fmt.Errorf("failed to parse URL '%s': %w", u, err)
}
// Set the username and password in the URL
parsedURL.User = url.UserPassword("token", token)
return parsedURL.String(), nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/publickey/wire.go | app/services/publickey/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package publickey
import (
"github.com/harness/gitness/app/services/keyfetcher"
"github.com/harness/gitness/app/store"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideSSHAuthService,
ProvideSignatureVerifyService,
)
func ProvideSSHAuthService(
publicKeyStore store.PublicKeyStore,
pCache store.PrincipalInfoCache,
) SSHAuthService {
return NewSSHAuthService(publicKeyStore, pCache)
}
func ProvideSignatureVerifyService(
principalStore store.PrincipalStore,
keyFetcher keyfetcher.Service,
gitSignatureResultStore store.GitSignatureResultStore,
) SignatureVerifyService {
return NewSignatureVerifyService(
principalStore,
keyFetcher,
gitSignatureResultStore)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/publickey/service_ssh.go | app/services/publickey/service_ssh.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package publickey
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/services/publickey/keyssh"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/gliderlabs/ssh"
)
type SSHAuthService interface {
ValidateKey(ctx context.Context,
username string,
publicKey ssh.PublicKey,
) (*types.PrincipalInfo, error)
}
func NewSSHAuthService(
publicKeyStore store.PublicKeyStore,
pCache store.PrincipalInfoCache,
) SSHAuthService {
return sshAuthService{
publicKeyStore: publicKeyStore,
pCache: pCache,
}
}
type sshAuthService struct {
publicKeyStore store.PublicKeyStore
pCache store.PrincipalInfoCache
}
// ValidateKey tries to match the provided SSH key to one of the keys in the database.
// It updates the verified timestamp of the matched key to mark it as used.
func (s sshAuthService) ValidateKey(
ctx context.Context,
_ string,
publicKey ssh.PublicKey,
) (*types.PrincipalInfo, error) {
key := keyssh.FromSSH(publicKey)
fingerprint := key.Fingerprint()
existingKeys, err := s.publicKeyStore.ListByFingerprint(
ctx,
fingerprint,
nil,
[]enum.PublicKeyUsage{enum.PublicKeyUsageAuth, enum.PublicKeyUsageAuthSign},
[]enum.PublicKeyScheme{enum.PublicKeySchemeSSH},
)
if err != nil {
return nil, fmt.Errorf("failed to read keys by fingerprint: %w", err)
}
var selectedKey *types.PublicKey
for _, existingKey := range existingKeys {
if key.Matches(existingKey.Content) {
selectedKey = &existingKey
break
}
}
if selectedKey == nil {
return nil, errors.NotFound("Unrecognized key")
}
if rev := selectedKey.RevocationReason; rev != nil && *rev == enum.RevocationReasonCompromised {
return nil, errors.Forbidden("Key has been revoked")
}
now := time.Now().UnixMilli()
if t := selectedKey.ValidFrom; t != nil && now < *t {
return nil, errors.Forbidden("Key not valid")
}
if t := selectedKey.ValidTo; t != nil && now > *t {
if selectedKey.RevocationReason != nil {
return nil, errors.Forbidden("Key has been revoked")
}
return nil, errors.Forbidden("Key has expired")
}
pInfo, err := s.pCache.Get(ctx, selectedKey.PrincipalID)
if err != nil {
return nil, fmt.Errorf("failed to pull principal info by public key's principal ID: %w", err)
}
err = s.publicKeyStore.MarkAsVerified(ctx, selectedKey.ID, now)
if err != nil {
return nil, fmt.Errorf("failed mark key as verified: %w", err)
}
return pInfo, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/publickey/service_verify.go | app/services/publickey/service_verify.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package publickey
import (
"context"
"errors"
"fmt"
"maps"
"slices"
"time"
"github.com/harness/gitness/app/services/keyfetcher"
"github.com/harness/gitness/app/services/publickey/keypgp"
"github.com/harness/gitness/app/services/publickey/keyssh"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/git/sha"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
type SignatureVerifyService struct {
principalStore store.PrincipalStore
keyFetcher keyfetcher.Service
gitSignatureResultStore store.GitSignatureResultStore
}
func NewSignatureVerifyService(
principalStore store.PrincipalStore,
keyFetcher keyfetcher.Service,
gitSignatureResultStore store.GitSignatureResultStore,
) SignatureVerifyService {
return SignatureVerifyService{
principalStore: principalStore,
keyFetcher: keyFetcher,
gitSignatureResultStore: gitSignatureResultStore,
}
}
// NewVerifySession creates a new session for git object signature verification.
// The session holds a small cache for users and signing keys.
func (s SignatureVerifyService) NewVerifySession(repoID int64) *VerifySession {
return &VerifySession{
SignatureVerifyService: s,
repoID: repoID,
principalIDCache: make(map[string]int64),
keyCache: make(map[personalKey]*types.PublicKey),
}
}
func (s SignatureVerifyService) VerifyCommitTags(ctx context.Context, repoID int64, tags []*types.CommitTag) error {
session := s.NewVerifySession(repoID)
if err := verifyObjects(ctx, session, tags); err != nil {
return err
}
session.StoreSignatures(ctx)
return nil
}
func (s SignatureVerifyService) VerifyCommits(ctx context.Context, repoID int64, commits []*types.Commit) error {
session := s.NewVerifySession(repoID)
if err := verifyObjects(ctx, session, commits); err != nil {
return err
}
session.StoreSignatures(ctx)
return nil
}
func (s *VerifySession) VerifyCommitTags(ctx context.Context, tags []*types.CommitTag) error {
return verifyObjects(ctx, s, tags)
}
func (s *VerifySession) VerifyCommits(ctx context.Context, commits []*types.Commit) error {
return verifyObjects(ctx, s, commits)
}
// VerifySession holds short time caches for a single iteration of verifyObject function.
type VerifySession struct {
SignatureVerifyService
repoID int64
// principalIDCache is cache of principal IDs. The key is email address.
principalIDCache map[string]int64
// keyCache is cache of personal keys. The map key holds principalID, key ID and fingerprint.
keyCache map[personalKey]*types.PublicKey
// sigResults are git signature verification results that should be stored to the database.
sigResults []*types.GitSignatureResult
}
// personalKey is cache key for the cache of public keys.
type personalKey struct {
principalID int64
keyID string
keyFingerprint string
}
func (s *VerifySession) principalByEmail(ctx context.Context, email string) (int64, error) {
if principalID, ok := s.principalIDCache[email]; ok {
return principalID, nil
}
principal, err := s.principalStore.FindByEmail(ctx, email)
if err != nil {
if errors.Is(err, gitness_store.ErrResourceNotFound) {
s.principalIDCache[email] = 0
return 0, nil
}
return 0, err
}
s.principalIDCache[email] = principal.ID
return principal.ID, nil
}
func (s *VerifySession) fetchKey(
ctx context.Context,
v verifier,
k personalKey,
principalID int64,
) (*types.PublicKey, error) {
key, ok := s.keyCache[k]
if ok {
return key, nil
}
key, err := v.Key(ctx, s.keyFetcher, principalID)
if err != nil {
return nil, fmt.Errorf("failed to get public key from verifier: %w", err)
}
s.keyCache[k] = key // We also store nils here to avoid searching for a non-existing key multiple times.
return key, nil
}
func (s *VerifySession) StoreSignatures(ctx context.Context) {
err := s.gitSignatureResultStore.TryCreateAll(ctx, s.sigResults)
if err != nil {
log.Ctx(ctx).Warn().Err(err).
Msg("failed to create git signature results")
}
}
func verifyObjects[T signedObject](ctx context.Context, session *VerifySession, objects []T) error {
// Fill objects' signature data from the DB,
// and get a map of objects without signature data in the DB.
objectMap, err := fillSignatureFromDB(ctx, &session.SignatureVerifyService, session.repoID, objects)
if err != nil {
return fmt.Errorf("failed to backfill object signature info from the DB: %w", err)
}
for _, object := range objectMap {
sigResult, err := verifyGitObjectSignature(ctx, session, object)
if err != nil {
return fmt.Errorf("failed to verify object signature: %w", err)
}
object.SetSignature(sigResult)
// These we don't store to the database: Invalid, Unsupported and Unverified.
// An invalid signature can mean not just that the signature contains garbage data, but also that
// we failed to verify it because of a bug. So, we deliberately don't store them to the DB.
if result := sigResult.Result; result == enum.GitSignatureInvalid ||
result == enum.GitSignatureUnsupported ||
result == enum.GitSignatureUnverified {
continue
}
session.sigResults = append(session.sigResults, sigResult)
}
return nil
}
// fillSignatureFromDB reads git object signatures from the DB,
// updates the elements of the provided slice,
// and return a map of objects that do not yet have a signature in the DB.
func fillSignatureFromDB[T signedObject](
ctx context.Context,
s *SignatureVerifyService,
repoID int64,
objects []T,
) (map[sha.SHA]T, error) {
objectMap := make(map[sha.SHA]T)
for i := range objects {
if objects[i].GetSignedData() != nil {
objectMap[objects[i].GetSHA()] = objects[i]
}
}
if len(objectMap) == 0 {
return objectMap, nil
}
// Get slice of SHAs from the map.
objectSHAs := slices.AppendSeq[[]sha.SHA](make([]sha.SHA, 0, len(objectMap)), maps.Keys(objectMap))
// Read signature data from the tags from the DB.
objectSignatureMap, err := s.gitSignatureResultStore.Map(ctx, repoID, objectSHAs)
if err != nil {
return nil, fmt.Errorf("failed to read commit signatures: %w", err)
}
// Update the objects found in the database and remove them from the map.
for objectSHA, objectSignature := range objectSignatureMap {
object := objectMap[objectSHA]
object.SetSignature(&objectSignature)
delete(objectMap, objectSHA)
}
return objectMap, nil
}
func verifyGitObjectSignature[T signedObject](
ctx context.Context,
s *VerifySession,
object T,
) (*types.GitSignatureResult, error) {
signedData := object.GetSignedData()
if signedData == nil {
return &sigVerUnverified, nil
}
var v verifier
switch signedData.Type {
case keyssh.SignatureType:
v = &keyssh.Verify{}
case keypgp.SignatureType:
v = &keypgp.Verify{}
default:
return &sigVerUnsupported, nil // We mark unsupported signature types as unsupported.
}
// Get the object's signer email address - the committer for commits, the tagger for annotated tags.
signer := object.GetSigner()
if signer == nil {
return &sigVerUnverified, nil
}
objectTime := signer.When.UnixMilli()
email := signer.Identity.Email
// Find the principal by the signer's email address.
// If principal is not found the signature is unverified.
principalID, err := s.principalByEmail(ctx, email)
if err != nil {
return nil, fmt.Errorf("failed to find principal by email: %w", err)
}
if principalID == 0 {
return &sigVerUnverified, nil
}
// Find the key info from the signature.
if result := v.Parse(ctx, object.GetSignedData().Signature, object.GetSHA()); result != "" {
//nolint:exhaustive
switch result {
case enum.GitSignatureInvalid:
return &sigVerInvalid, nil
case enum.GitSignatureUnsupported:
return &sigVerUnsupported, nil
default:
// Should not happen.
return nil, fmt.Errorf("unexpected signature verification result=%q after signature parsing", result)
}
}
// Fetch the key from the DB. If it's not there, the signature is unverified.
key, err := s.fetchKey(ctx, v, personalKey{
principalID: principalID,
keyID: v.KeyID(),
keyFingerprint: v.KeyFingerprint(),
}, principalID)
if err != nil {
return nil, fmt.Errorf("failed to get public key: %w", err)
}
if key == nil {
return &sigVerUnverified, nil
}
now := time.Now().UnixMilli()
sigResult := &types.GitSignatureResult{
RepoID: s.repoID,
ObjectSHA: object.GetSHA(),
ObjectTime: objectTime,
Created: now,
Updated: now,
Result: "", // the result will be set later
PrincipalID: principalID,
KeyScheme: v.KeyScheme(),
KeyID: v.KeyID(),
KeyFingerprint: v.KeyFingerprint(),
}
// Using the key's properties, if possible override the verification result.
switch {
case key.RevocationReason != nil:
sigResult.Result = enum.GitSignatureRevoked
return sigResult, nil
case key.ValidFrom != nil && objectTime < *key.ValidFrom:
sigResult.Result = enum.GitSignatureKeyExpired
return sigResult, nil
case key.ValidTo != nil && objectTime > *key.ValidTo:
sigResult.Result = enum.GitSignatureKeyExpired
return sigResult, nil
}
// Verify the git object signature using the key from the database.
sigResult.Result = v.Verify(
ctx,
[]byte(key.Content),
object.GetSignedData().SignedContent,
object.GetSHA(),
*signer)
return sigResult, nil
}
// verifier is interface to verify a git object signature.
// It's implemented by keypgp.Verify and keyssh.Verify.
type verifier interface {
// Parse parses the provided signature and extracts info about the signing key (ID/Fingerprint).
Parse(
ctx context.Context,
signature []byte,
objectSHA sha.SHA,
) enum.GitSignatureResult
// Key fetches the key from the DB.
Key(
ctx context.Context,
keyFetcher keyfetcher.Service,
principalID int64,
) (*types.PublicKey, error)
// Verify checks if the signed content matches signature.
Verify(
ctx context.Context,
key []byte,
signedContent []byte,
objectSHA sha.SHA,
committer types.Signature,
) enum.GitSignatureResult
// KeyScheme returns the signing key's scheme.
KeyScheme() enum.PublicKeyScheme
// KeyID returns the signing key ID. Use after a call to the Parse method
KeyID() string
// KeyFingerprint returns the signing key fingerprint. Use after a call to the Parse method
KeyFingerprint() string
}
// signedObject is interface used to verify signature.
// It's implemented by types.Commit and types.CommitTag.
type signedObject interface {
GetSHA() sha.SHA
SetSignature(sig *types.GitSignatureResult)
GetSigner() *types.Signature
GetSignedData() *types.SignedData
}
var sigVerUnverified = types.GitSignatureResult{
Result: enum.GitSignatureUnverified,
}
var sigVerUnsupported = types.GitSignatureResult{
Result: enum.GitSignatureUnsupported,
}
var sigVerInvalid = types.GitSignatureResult{
Result: enum.GitSignatureInvalid,
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/publickey/parse.go | app/services/publickey/parse.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package publickey
import (
"encoding/json"
"fmt"
"strings"
"github.com/harness/gitness/app/services/publickey/keypgp"
"github.com/harness/gitness/app/services/publickey/keyssh"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
type KeyInfo interface {
Matches(s string) bool
Fingerprint() string
Type() string
Scheme() enum.PublicKeyScheme
Comment() string
ValidFrom() *int64
ValidTo() *int64
Identities() []types.Identity
RevocationReason() *enum.RevocationReason
Metadata() json.RawMessage
// KeyIDs returns all key IDs: the primary key ID and all signing sub-key IDs.
KeyIDs() []string
// CompromisedIDs returns all key IDs that are revoked with reason=compromised.
CompromisedIDs() []string
}
func ParseString(keyData string, principal *types.Principal) (KeyInfo, error) {
if len(keyData) == 0 {
return nil, errors.InvalidArgument("empty key")
}
const pgpHeader = "-----BEGIN PGP PUBLIC KEY BLOCK-----"
const pgpFooter = "-----END PGP PUBLIC KEY BLOCK-----"
if strings.HasPrefix(keyData, pgpHeader) && strings.HasSuffix(keyData, pgpFooter) {
key, err := keypgp.Parse(strings.NewReader(keyData), principal)
if err != nil {
return nil, fmt.Errorf("failed to parse PGP key: %w", err)
}
return key, nil
}
key, err := keyssh.Parse([]byte(keyData))
if err != nil {
return nil, fmt.Errorf("failed to parse SSH key: %w", err)
}
return key, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/publickey/validity/validity_test.go | app/services/publickey/validity/validity_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validity
import (
"testing"
"time"
"github.com/ProtonMail/go-crypto/openpgp/packet"
)
func TestPeriodRevoke(t *testing.T) {
reasonRetired := packet.KeyRetired
reasonCompromised := packet.KeyCompromised
date0 := time.Date(2010, time.February, 14, 0, 0, 0, 0, time.UTC)
date1 := time.Date(2010, time.February, 19, 0, 0, 0, 0, time.UTC)
dateNeg := time.Date(2010, time.January, 1, 0, 0, 0, 0, time.UTC)
tests := []struct {
name string
validity Period
revocations []*packet.Signature
expected Period
}{
{
name: "no-expiration",
validity: Period{
CreatedAt: date0,
},
revocations: []*packet.Signature{
{
CreationTime: date1,
RevocationReason: &reasonRetired,
},
},
expected: Period{
CreatedAt: date0,
Duration: date1.Sub(date0),
},
},
{
name: "7-day-expiration",
validity: Period{
CreatedAt: date0,
Duration: 7 * 24 * time.Hour,
},
revocations: []*packet.Signature{
{
CreationTime: date1,
RevocationReason: &reasonRetired,
},
},
expected: Period{
CreatedAt: date0,
Duration: date1.Sub(date0),
},
},
{
name: "1-day-expiration",
validity: Period{
CreatedAt: date0,
Duration: 24 * time.Hour,
},
revocations: []*packet.Signature{
{
CreationTime: date1,
RevocationReason: &reasonRetired,
},
},
expected: Period{
CreatedAt: date0,
Duration: 24 * time.Hour,
},
},
{
name: "revocation-in-past",
validity: Period{
CreatedAt: date0,
Duration: 24 * time.Hour,
},
revocations: []*packet.Signature{
{
CreationTime: dateNeg,
RevocationReason: &reasonRetired,
},
},
expected: Period{
Invalid: true,
CreatedAt: date0,
},
},
{
name: "compromised",
validity: Period{
CreatedAt: date0,
},
revocations: []*packet.Signature{
{
CreationTime: date1,
RevocationReason: &reasonCompromised,
},
},
expected: Period{
Invalid: true,
CreatedAt: date0,
},
},
{
name: "compromised",
validity: Period{
Invalid: true,
},
revocations: []*packet.Signature{
{
CreationTime: date1,
RevocationReason: &reasonRetired,
},
},
expected: Period{
Invalid: true,
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
test.validity.Revoke(test.revocations)
if want, got := test.expected, test.validity; want != got {
t.Errorf("failed: want=%v got=%v", want, got)
}
})
}
}
func TestPeriodIntersect(t *testing.T) {
date0 := time.Date(2010, time.October, 14, 0, 0, 0, 0, time.UTC)
date1 := time.Date(2010, time.October, 19, 0, 0, 0, 0, time.UTC)
date2 := time.Date(2010, time.October, 21, 0, 0, 0, 0, time.UTC)
date3 := time.Date(2010, time.October, 28, 0, 0, 0, 0, time.UTC)
tests := []struct {
name string
validity Period
validity2 Period
expected Period
}{
{
name: "no-expiration-v2-after-v1",
validity: Period{CreatedAt: date0}, // v1: 1111111
validity2: Period{CreatedAt: date1}, // v2: ...2222
expected: Period{CreatedAt: date1}, // res: ...3333
},
{
name: "no-expiration-v1-after-v2",
validity: Period{CreatedAt: date1}, // v1: ...1111
validity2: Period{CreatedAt: date0}, // v2: 2222222
expected: Period{CreatedAt: date1}, // res: ...3333
},
{
name: "v2-overlaps-at-the-end",
validity: fromTimes(date0, date2), // v1: 11111..
validity2: fromTimes(date1, date3), // v2: ...2222
expected: fromTimes(date1, date2), // res: ...33..
},
{
name: "v2-overlaps-at-the-start",
validity: fromTimes(date1, date3), // v1: ...1111
validity2: fromTimes(date0, date2), // v2: 22222..
expected: fromTimes(date1, date2), // res: ...33..
},
{
name: "v1-no-exp;v2-after",
validity: Period{CreatedAt: date0}, // v1: 1111111
validity2: fromTimes(date1, date3), // v2: ...2222
expected: fromTimes(date1, date3), // res: ...3333
},
{
name: "v1-no-exp;v2-before",
validity: Period{CreatedAt: date1}, // v1: ..11111
validity2: fromTimes(date0, date3), // v2: 2222222
expected: fromTimes(date1, date3), // res: ..33333
},
{
name: "v2-no-exp;v2-after",
validity: fromTimes(date0, date3), // v1: 1111111
validity2: Period{CreatedAt: date1}, // v2: ..22222
expected: fromTimes(date1, date3), // res: ..33333
},
{
name: "v2-no-exp;v2-before",
validity: fromTimes(date1, date3), // v1: ..11111
validity2: Period{CreatedAt: date0}, // v2: 2222222
expected: fromTimes(date1, date3), // res: ..33333
},
{
name: "v2-is-subperiod",
validity: fromTimes(date0, date3), // v1: 1111111
validity2: fromTimes(date1, date2), // v2: ..222..
expected: fromTimes(date1, date2), // res: ..333..
},
{
name: "v2-is-superperiod",
validity: fromTimes(date1, date2), // v1: ..111..
validity2: fromTimes(date0, date3), // v2: 2222222
expected: fromTimes(date1, date2), // res: ..333..
},
{
name: "no-overlap",
validity: fromTimes(date0, date1), // v1: 111....
validity2: fromTimes(date2, date3), // v2: ....222
expected: Period{Invalid: true, CreatedAt: date0},
},
{
name: "v1-invalid",
validity: Period{Invalid: true, CreatedAt: date1},
validity2: fromTimes(date0, date3),
expected: Period{Invalid: true, CreatedAt: date1},
},
{
name: "v2-invalid",
validity: fromTimes(date0, date3),
validity2: Period{Invalid: true, CreatedAt: date1},
expected: Period{Invalid: true, CreatedAt: date0},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
test.validity.Intersect(test.validity2)
if want, got := test.expected, test.validity; want != got {
t.Errorf("failed: want=%v got=%v", want, got)
}
})
}
}
func fromTimes(from, to time.Time) Period {
dur := to.Sub(from)
if dur <= 0 {
return Period{Invalid: true}
}
return Period{
CreatedAt: from,
Duration: dur,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/publickey/validity/validity.go | app/services/publickey/validity/validity.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validity
import (
"strings"
"time"
"github.com/ProtonMail/go-crypto/openpgp/packet"
"github.com/gotidy/ptr"
)
type Period struct {
Invalid bool
CreatedAt time.Time
Duration time.Duration
}
func FromSignature(sig *packet.Signature) Period {
v := Period{CreatedAt: sig.CreationTime}
if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 {
v.Duration = time.Duration(*sig.SigLifetimeSecs) * time.Second
}
return v
}
func FromPublicKey(key *packet.PublicKey, sig *packet.Signature) Period {
v := Period{CreatedAt: key.CreationTime}
if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 {
v.Duration = time.Duration(*sig.KeyLifetimeSecs) * time.Second
}
return v
}
func (v *Period) Invalidate() {
v.Invalid = true
v.Duration = 0
}
func (v *Period) Intersect(v2 Period) {
if v.Invalid {
return
}
if v2.Invalid {
v.Invalidate()
return
}
createdAt := v.CreatedAt
if createdAt.Before(v2.CreatedAt) {
createdAt = v2.CreatedAt
}
if v.Duration == 0 && v2.Duration == 0 {
v.CreatedAt = createdAt
return
}
var duration time.Duration
switch {
case v.Duration == 0:
duration = v2.CreatedAt.Add(v2.Duration).Sub(createdAt)
case v2.Duration == 0:
duration = v.CreatedAt.Add(v.Duration).Sub(createdAt)
default:
end1 := v.CreatedAt.Add(v.Duration)
end2 := v2.CreatedAt.Add(v2.Duration)
if end1.After(end2) {
duration = end2.Sub(createdAt)
} else {
duration = end1.Sub(createdAt)
}
}
if duration < 0 {
v.Invalidate()
return
}
v.CreatedAt = createdAt
v.Duration = duration
}
func (v *Period) Revoke(revocations []*packet.Signature) {
if v.Invalid {
return // The period is already invalid - nothing to do.
}
for _, rev := range revocations {
if rev.RevocationReason != nil && *rev.RevocationReason == packet.KeyCompromised {
// If the key is compromised, the key is considered revoked even before the revocation date.
v.Invalidate()
return
}
revokedFrom := rev.CreationTime // Note: Lifetime (rev.SigLifetimeSecs) isn't used in revocations.
duration := revokedFrom.Sub(v.CreatedAt)
if duration <= 0 {
v.Invalidate()
return
}
if v.Duration == 0 {
v.Duration = duration
} else if v.Duration > duration {
v.Duration = duration
}
}
}
func (v *Period) String() string {
if v.Invalid {
return "not-valid"
}
var sb strings.Builder
sb.WriteString("from=")
sb.WriteString(v.CreatedAt.Format(time.RFC3339))
if v.Duration > 0 {
sb.WriteString(" to=")
sb.WriteString(v.CreatedAt.Add(v.Duration).Format(time.RFC3339))
}
return sb.String()
}
func (v *Period) Milliseconds() (int64, *int64) {
var (
validFrom int64
validTo *int64
)
validFrom = v.CreatedAt.UnixMilli()
if v.Invalid {
return validFrom, &validFrom // zero duration
}
if v.Duration > 0 {
validTo = ptr.Int64(v.CreatedAt.Add(v.Duration).UnixMilli())
}
return validFrom, validTo
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/publickey/keyssh/verify_ssh.go | app/services/publickey/keyssh/verify_ssh.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keyssh
import (
"bytes"
"context"
"crypto/sha256"
"crypto/sha512"
"encoding/pem"
"fmt"
"hash"
"github.com/harness/gitness/app/services/keyfetcher"
"github.com/harness/gitness/git/sha"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
"golang.org/x/crypto/ssh"
)
const SignatureType = "SSH SIGNATURE"
// signatureBlob describes a lightweight SSH signature format.
// https://github.com/openssh/openssh-portable/blob/V_9_9_P2/PROTOCOL.sshsig#L34
type signatureBlob struct {
MagicPreamble [6]byte
Version uint32
PublicKey []byte
Namespace string
Reserved string
HashAlgorithm string
Signature []byte
}
// messageWrapper represents SSH signed data.
// https://github.com/openssh/openssh-portable/blob/V_9_9_P2/PROTOCOL.sshsig#L81
type messageWrapper struct {
Namespace string
Reserved string
HashAlgorithm string
Hash []byte
}
// hashFunc returns hash function used for SSH signature verification.
// Data to be signed is first hashed with the specified hash_algorithm.
// This is done to limit the amount of data presented to the signature
// operation, which may be of concern if the signing key is held in limited
// or slow hardware or on a remote ssh-agent. The supported hash algorithms
// are "sha256" and "sha512".
// https://github.com/openssh/openssh-portable/blob/V_9_9_P2/PROTOCOL.sshsig#L63
func hashFunc(hashAlgorithm string) hash.Hash {
switch hashAlgorithm {
case "sha256":
return sha256.New()
case "sha512":
return sha512.New()
}
return nil
}
const (
sshMagicPreamble = "SSHSIG"
sshNamespace = "git"
)
type Verify struct {
hashAlgorithm string
signatureBytes []byte
publicKey []byte
keyFingerprint string
}
// Parse parses the provided ASCII-armored signature and returns fingerprint of the key used to sign it.
// Also, it updates the internal object fields required for key validation.
func (v *Verify) Parse(
ctx context.Context,
signature []byte,
objectSHA sha.SHA,
) enum.GitSignatureResult {
block, _ := pem.Decode(signature)
if block == nil {
log.Ctx(ctx).Warn().
Str("object_sha", objectSHA.String()).
Msg("failed to decode signature")
return enum.GitSignatureInvalid
}
if block.Type != SignatureType {
log.Ctx(ctx).Warn().
Str("signature_type", block.Type).
Str("object_sha", objectSHA.String()).
Msg("unexpected SSH signature block type")
return enum.GitSignatureInvalid
}
var blob signatureBlob
if err := ssh.Unmarshal(block.Bytes, &blob); err != nil {
log.Ctx(ctx).Warn().
Err(err).
Str("object_sha", objectSHA.String()).
Msg("failed to unmarshal SSH signature")
return enum.GitSignatureInvalid
}
// The preamble is the six-byte sequence "SSHSIG". It is included to
// ensure that manual signatures can never be confused with any message
// signed during SSH user or host authentication.
// https://github.com/openssh/openssh-portable/blob/V_9_9_P2/PROTOCOL.sshsig#L89
if !bytes.Equal(blob.MagicPreamble[:], []byte(sshMagicPreamble)) {
log.Ctx(ctx).Warn().
Str("object_sha", objectSHA.String()).
Msg("invalid SSH signature magic preamble")
return enum.GitSignatureInvalid
}
// Verifiers MUST reject signatures with versions greater than those they support.
// https://github.com/openssh/openssh-portable/blob/V_9_9_P2/PROTOCOL.sshsig#L50
if blob.Version > 1 {
return enum.GitSignatureUnsupported
}
// The purpose of the namespace value is to specify a unambiguous
// interpretation domain for the signature, e.g. file signing.
// This prevents cross-protocol attacks caused by signatures
// intended for one intended domain being accepted in another.
// https://github.com/openssh/openssh-portable/blob/V_9_9_P2/PROTOCOL.sshsig#L53
if blob.Namespace != sshNamespace {
log.Ctx(ctx).Warn().
Str("namespace", blob.Namespace).
Str("object_sha", objectSHA.String()).
Msg("SSH signature namespace mismatch")
return enum.GitSignatureInvalid
}
publicKey, err := ssh.ParsePublicKey(blob.PublicKey)
if err != nil {
log.Ctx(ctx).Warn().
Err(err).
Str("object_sha", objectSHA.String()).
Msg("invalid SSH signature public key")
return enum.GitSignatureInvalid
}
v.hashAlgorithm = blob.HashAlgorithm
v.signatureBytes = blob.Signature
v.publicKey = blob.PublicKey
v.keyFingerprint = ssh.FingerprintSHA256(publicKey)
return ""
}
func (v *Verify) Key(
ctx context.Context,
keyFetcher keyfetcher.Service,
principalID int64,
) (*types.PublicKey, error) {
schemes := []enum.PublicKeyScheme{enum.PublicKeySchemeSSH}
usages := []enum.PublicKeyUsage{enum.PublicKeyUsageSign, enum.PublicKeyUsageAuthSign}
keys, err := keyFetcher.FetchByFingerprint(ctx, v.KeyFingerprint(), principalID, usages, schemes)
if err != nil {
return nil, fmt.Errorf("failed to list SSH public keys by fingerprint: %w", err)
}
if len(keys) == 0 {
//nolint:nilnil
return nil, nil // No key is available and there is no error.
}
return &keys[0], nil
}
func (v *Verify) Verify(
ctx context.Context,
publicKeyRaw []byte,
signedContent []byte,
objectSHA sha.SHA,
_ types.Signature,
) enum.GitSignatureResult {
publicKey, _, _, _, err := ssh.ParseAuthorizedKey(publicKeyRaw)
if err != nil {
log.Ctx(ctx).Warn().
Err(err).
Str("object_sha", objectSHA.String()).
Msg("failed to parse SSH key")
return enum.GitSignatureUnverified
}
hashAlgorithm := v.hashAlgorithm
signatureBytes := v.signatureBytes
h := hashFunc(hashAlgorithm)
if h == nil {
log.Ctx(ctx).Warn().
Str("hash_algorithm", v.hashAlgorithm).
Str("object_sha", objectSHA.String()).
Msg("unrecognized SSH signature algorithm")
return enum.GitSignatureInvalid
}
h.Write(signedContent)
hashSum := h.Sum(nil)
sig := ssh.Signature{}
if err := ssh.Unmarshal(signatureBytes, &sig); err != nil {
log.Ctx(ctx).Warn().
Err(err).
Str("object_sha", objectSHA.String()).
Msg("failed to unmarshal SSH signature")
return enum.GitSignatureInvalid
}
signedMessage := ssh.Marshal(messageWrapper{
Namespace: sshNamespace,
HashAlgorithm: hashAlgorithm,
Hash: hashSum,
})
buf := bytes.NewBuffer(nil)
_, _ = buf.WriteString(sshMagicPreamble)
_, _ = buf.Write(signedMessage)
err = publicKey.Verify(buf.Bytes(), &sig)
if err != nil {
return enum.GitSignatureBad
}
return enum.GitSignatureGood
}
func (v *Verify) KeyScheme() enum.PublicKeyScheme {
return enum.PublicKeySchemeSSH
}
func (v *Verify) KeyID() string {
return ""
}
func (v *Verify) KeyFingerprint() string {
return v.keyFingerprint
}
func (v *Verify) SignaturePublicKey() []byte {
return v.publicKey
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/publickey/keyssh/parse_ssh.go | app/services/publickey/keyssh/parse_ssh.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keyssh
import (
"crypto/sha256"
"encoding/base64"
"encoding/json"
"slices"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/gliderlabs/ssh"
gossh "golang.org/x/crypto/ssh"
)
func FromSSH(key gossh.PublicKey) KeyInfo {
return KeyInfo{
PublicKey: key,
KeyComment: "",
}
}
func Parse(keyData []byte) (KeyInfo, error) {
publicKey, comment, _, _, err := gossh.ParseAuthorizedKey(keyData)
if err != nil {
return KeyInfo{}, errors.InvalidArgumentf("invalid SSH key data: %s", err.Error())
}
keyType := publicKey.Type()
// explicitly disallowed
if slices.Contains(DisallowedTypes, keyType) {
return KeyInfo{}, errors.InvalidArgumentf("keys of type %s are not allowed", keyType)
}
// only allowed
if !slices.Contains(AllowedTypes, keyType) {
return KeyInfo{}, errors.InvalidArgumentf("allowed key types are %v", AllowedTypes)
}
return KeyInfo{
PublicKey: publicKey,
KeyComment: comment,
}, nil
}
var AllowedTypes = []string{
gossh.KeyAlgoRSA,
gossh.KeyAlgoECDSA256,
gossh.KeyAlgoECDSA384,
gossh.KeyAlgoECDSA521,
gossh.KeyAlgoED25519,
gossh.KeyAlgoSKECDSA256,
gossh.KeyAlgoSKED25519,
}
//nolint:staticcheck
var DisallowedTypes = []string{
gossh.KeyAlgoDSA,
}
type KeyInfo struct {
PublicKey gossh.PublicKey
KeyComment string
}
func (key KeyInfo) Matches(s string) bool {
otherKey, _, _, _, err := gossh.ParseAuthorizedKey([]byte(s))
if err != nil {
return false
}
return key.matchesKey(otherKey)
}
func (key KeyInfo) matchesKey(otherKey gossh.PublicKey) bool {
return ssh.KeysEqual(key.PublicKey, otherKey)
}
func (key KeyInfo) Fingerprint() string {
sum := sha256.New()
sum.Write(key.PublicKey.Marshal())
return "SHA256:" + base64.RawStdEncoding.EncodeToString(sum.Sum(nil))
}
func (key KeyInfo) Type() string {
return key.PublicKey.Type()
}
func (key KeyInfo) Scheme() enum.PublicKeyScheme {
return enum.PublicKeySchemeSSH
}
func (key KeyInfo) Comment() string {
return key.KeyComment
}
func (key KeyInfo) ValidFrom() *int64 {
return nil // SSH keys do not have validity period
}
func (key KeyInfo) ValidTo() *int64 {
return nil // SSH keys do not have validity period
}
func (key KeyInfo) Identities() []types.Identity {
return nil // SSH keys do not have identities
}
func (key KeyInfo) RevocationReason() *enum.RevocationReason {
return nil // SSH keys do not have revocations
}
func (key KeyInfo) Metadata() json.RawMessage {
return json.RawMessage("{}")
}
func (key KeyInfo) KeyIDs() []string {
return nil // SSH keys do not have subkeys
}
func (key KeyInfo) CompromisedIDs() []string {
return nil // SSH keys do not have a revocation reason
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/publickey/keypgp/parse_pgp.go | app/services/publickey/keypgp/parse_pgp.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keypgp
import (
"bytes"
"encoding/json"
"fmt"
"io"
"strings"
"github.com/harness/gitness/app/services/publickey/validity"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/ProtonMail/go-crypto/openpgp/packet"
"golang.org/x/exp/slices"
)
type KeyMetadata struct {
// ID of the key.
ID string `json:"id"`
// Fingerprint is a hash of the key data.
Fingerprint string `json:"fingerprint"`
// RevocationReason is there only for information and displaying purposes.
// To determine if the key can be used or not ValidFrom and ValidTo should be used.
RevocationReason *enum.RevocationReason `json:"revocation_reason,omitempty"`
// ValidFrom is timestamp (unix time millis) from which the key can be used.
ValidFrom int64 `json:"valid_from,omitempty"`
// ValidTo is timestamp (unix time millis) until the key can be used.
// After this, the key should be considered expired or revoked (if RevocationReason has a value).
// Nil value means that the key doesn't expire.
ValidTo *int64 `json:"valid_to,omitempty"`
Algorithm string `json:"algorithm"`
BitLength uint16 `json:"bit_length"`
}
type EntityMetadata struct {
PrimaryIdentity *types.Identity `json:"primary_identity,omitempty"`
Identities []types.Identity `json:"identities,omitempty"`
PrimaryKey KeyMetadata `json:"primary_key"`
SubKeys []KeyMetadata `json:"sub_keys,omitempty"`
}
func Parse(r io.Reader, principal *types.Principal) (KeyInfo, error) {
keyRing, err := openpgp.ReadArmoredKeyRing(r)
if err != nil {
return KeyInfo{}, errors.InvalidArgumentf("failed to read PGP key ring: %s", err.Error())
}
if len(keyRing) == 0 {
return KeyInfo{}, errors.InvalidArgument("PGP key ring contains no keys")
}
if len(keyRing) > 1 {
return KeyInfo{}, errors.InvalidArgument("can't accept a PGP key ring with multiple primary keys")
}
keyEntity := keyRing[0]
if keyEntity == nil || keyEntity.PrimaryKey == nil {
// Should not happen.
return KeyInfo{}, errors.InvalidArgument("PGP key ring entity is nil")
}
if keyEntity.PrivateKey != nil {
return KeyInfo{}, errors.InvalidArgument("refusing to accept private key: please upload a public key")
}
primarySignature, primaryIdentity := keyEntity.PrimarySelfSignature()
if primarySignature == nil {
// Should not happen.
return KeyInfo{}, errors.InvalidArgument("PGP key entity is missing primary signature")
}
// Extract the validity period from the key's primary signature.
validityKey := validity.FromPublicKey(keyEntity.PrimaryKey, primarySignature)
validityKey.Revoke(keyEntity.Revocations)
var identity *types.Identity
var comment string
// If principal is nil, it means that no particular principal is needed.
// By `foundPrincipal = true` we declare that we have "found" it.
foundPrincipal := principal == nil
// Process the primary identity (name and email address for the key) if it exists.
// The identity can also have revocations. We ignore the revocation reason, but honor
// the validity period. The final validity period for the key is intersection between
// the validity period of the key and the validity period of the identity.
if primaryIdentity != nil {
validityIdent := validity.FromSignature(primarySignature)
validityIdent.Revoke(primaryIdentity.Revocations)
validityKey.Intersect(validityIdent)
identity = &types.Identity{
Name: primaryIdentity.UserId.Name,
Email: primaryIdentity.UserId.Email,
}
foundPrincipal = foundPrincipal || strings.EqualFold(identity.Email, principal.Email)
comment = primaryIdentity.UserId.Comment
}
var identities []types.Identity
for _, ident := range keyEntity.Identities {
identities = append(identities, types.Identity{
Name: ident.UserId.Name,
Email: ident.UserId.Email,
})
foundPrincipal = foundPrincipal || strings.EqualFold(ident.UserId.Email, principal.Email)
}
// PGP keys can have multiple identities and one of those must match the current user's.
// The email address must match, the name can be different.
if !foundPrincipal {
return KeyInfo{}, errors.InvalidArgument("key identities don't contain the user's email address")
}
var subKeys []KeyMetadata
for _, subKey := range keyEntity.Subkeys {
if subKey.PublicKey == nil || subKey.Sig == nil {
return KeyInfo{}, errors.InvalidArgument("found a subkey without public key")
}
// We'll only consider keys than can be used for signing
if !subKey.PublicKey.CanSign() {
continue
}
validitySubkey := validity.FromSignature(subKey.Sig)
validitySubkey.Revoke(subKey.Revocations)
validitySubkey.Intersect(validityKey)
subKeyValidFrom, subKeyValidTo := validitySubkey.Milliseconds()
bits, _ := subKey.PublicKey.BitLength()
subKeys = append(subKeys, KeyMetadata{
ID: subKey.PublicKey.KeyIdString(),
Fingerprint: fmt.Sprintf("%X", subKey.PublicKey.Fingerprint),
RevocationReason: getRevocationReason(subKey.Revocations),
ValidFrom: subKeyValidFrom,
ValidTo: subKeyValidTo,
Algorithm: pgpAlgo(subKey.PublicKey.PubKeyAlgo),
BitLength: bits,
})
}
keyValidFrom, keyValidTo := validityKey.Milliseconds()
bits, _ := keyEntity.PrimaryKey.BitLength()
metadata := EntityMetadata{
PrimaryIdentity: identity,
Identities: identities,
PrimaryKey: KeyMetadata{
ID: keyEntity.PrimaryKey.KeyIdString(),
Fingerprint: fmt.Sprintf("%X", keyEntity.PrimaryKey.Fingerprint),
RevocationReason: getRevocationReason(keyEntity.Revocations),
ValidFrom: keyValidFrom,
ValidTo: keyValidTo,
Algorithm: pgpAlgo(keyEntity.PrimaryKey.PubKeyAlgo),
BitLength: bits,
},
SubKeys: subKeys,
}
keyInfo := KeyInfo{
entity: keyEntity,
metadata: metadata,
validFrom: keyValidFrom,
validTo: keyValidTo,
comment: comment,
}
return keyInfo, nil
}
type KeyInfo struct {
// entity holds the original PGP key
entity *openpgp.Entity
// metadata holds additional key info
metadata EntityMetadata
validFrom int64
validTo *int64
comment string
}
func (key KeyInfo) Matches(s string) bool {
otherKey, err := Parse(strings.NewReader(s), nil)
if err != nil {
return false
}
if key.entity.PrimaryKey.KeyId != otherKey.entity.PrimaryKey.KeyId {
return false
}
buf1 := &bytes.Buffer{}
buf2 := &bytes.Buffer{}
_ = key.entity.Serialize(buf1)
_ = otherKey.entity.Serialize(buf2)
return slices.Equal(buf1.Bytes(), buf2.Bytes())
}
func (key KeyInfo) Fingerprint() string {
return key.metadata.PrimaryKey.Fingerprint
}
func (key KeyInfo) Type() string {
return pgpAlgo(key.entity.PrimaryKey.PubKeyAlgo)
}
func (key KeyInfo) Scheme() enum.PublicKeyScheme {
return enum.PublicKeySchemePGP
}
func (key KeyInfo) Comment() string {
return key.comment
}
func (key KeyInfo) ValidFrom() *int64 {
return &key.validFrom
}
func (key KeyInfo) ValidTo() *int64 {
return key.validTo
}
func (key KeyInfo) Identities() []types.Identity {
return key.metadata.Identities
}
func (key KeyInfo) RevocationReason() *enum.RevocationReason {
return key.metadata.PrimaryKey.RevocationReason
}
func (key KeyInfo) Metadata() json.RawMessage {
data, _ := json.Marshal(key.metadata)
return data
}
func (key KeyInfo) KeyIDs() []string {
subKeyIDs := make([]string, 0)
subKeyIDs = append(subKeyIDs, key.entity.PrimaryKey.KeyIdString())
for i := range key.entity.Subkeys {
if key.entity.Subkeys[i].PublicKey.CanSign() {
subKeyIDs = append(subKeyIDs, key.entity.Subkeys[i].PublicKey.KeyIdString())
}
}
return subKeyIDs
}
func (key KeyInfo) CompromisedIDs() []string {
var revokedIDs []string
var primaryRevoked bool
revocationReason := getRevocationReason(key.entity.Revocations)
if revocationReason != nil && *revocationReason == enum.RevocationReasonCompromised {
revokedIDs = append(revokedIDs, key.entity.PrimaryKey.KeyIdString())
primaryRevoked = true
}
for i := range key.entity.Subkeys {
if !key.entity.Subkeys[i].PublicKey.CanSign() {
continue
}
if primaryRevoked {
revokedIDs = append(revokedIDs, key.entity.Subkeys[i].PublicKey.KeyIdString())
continue
}
revocationReason = getRevocationReason(key.entity.Subkeys[i].Revocations)
if revocationReason != nil && *revocationReason == enum.RevocationReasonCompromised {
revokedIDs = append(revokedIDs, key.entity.Subkeys[i].PublicKey.KeyIdString())
}
}
return revokedIDs
}
func pgpAlgo(algorithm packet.PublicKeyAlgorithm) string {
switch algorithm {
case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSASignOnly, packet.PubKeyAlgoRSAEncryptOnly:
return "RSA"
case packet.PubKeyAlgoElGamal:
return "ElGamal"
case packet.PubKeyAlgoDSA:
return "DSA"
case packet.PubKeyAlgoECDH:
return "ECDH"
case packet.PubKeyAlgoECDSA:
return "ECDSA"
case packet.PubKeyAlgoEdDSA:
return "EdDSA"
case packet.PubKeyAlgoX25519:
return "X25519"
case packet.PubKeyAlgoX448:
return "X448"
case packet.PubKeyAlgoEd25519:
return "Ed25519"
case packet.PubKeyAlgoEd448:
return "Ed448"
}
return ""
}
func getRevocationReason(revocations []*packet.Signature) *enum.RevocationReason {
if len(revocations) == 0 {
return nil
}
reason := enum.RevocationReasonUnknown
for _, revocation := range revocations {
if revocation == nil || revocation.RevocationReason == nil {
continue
}
if *revocation.RevocationReason == packet.KeyCompromised {
reason = enum.RevocationReasonCompromised
return &reason
}
if *revocation.RevocationReason == packet.KeyRetired {
reason = enum.RevocationReasonRetired
continue
}
if *revocation.RevocationReason == packet.KeySuperseded {
reason = enum.RevocationReasonSuperseded
continue
}
}
return &reason
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/publickey/keypgp/verify_pgp.go | app/services/publickey/keypgp/verify_pgp.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keypgp
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"strings"
"time"
"github.com/harness/gitness/app/services/keyfetcher"
"github.com/harness/gitness/git/sha"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/ProtonMail/go-crypto/openpgp/armor"
pgperrors "github.com/ProtonMail/go-crypto/openpgp/errors"
"github.com/ProtonMail/go-crypto/openpgp/packet"
"github.com/rs/zerolog/log"
)
const (
SignatureType = "PGP SIGNATURE"
)
type Verify struct {
signature []byte
keyID string
keyFingerprint string
}
func (v *Verify) Parse(
ctx context.Context,
signature []byte,
objectSHA sha.SHA,
) enum.GitSignatureResult {
block, err := armor.Decode(bytes.NewReader(signature))
if err != nil || block == nil {
log.Ctx(ctx).Warn().
Err(err).
Str("object_sha", objectSHA.String()).
Msg("failed to decode signature")
return enum.GitSignatureInvalid
}
if block.Type != openpgp.SignatureType {
log.Ctx(ctx).Warn().
Str("signature_type", block.Type).
Str("object_sha", objectSHA.String()).
Msg("unexpected PGP signature block type")
return enum.GitSignatureInvalid
}
reader := packet.NewReader(block.Body)
sig, err := reader.Next()
if err != nil {
log.Ctx(ctx).Warn().
Err(err).
Str("object_sha", objectSHA.String()).
Msg("failed to read PGP signature")
return enum.GitSignatureInvalid
}
p, ok := sig.(*packet.Signature)
if !ok {
log.Ctx(ctx).Warn().
Str("signature_type", fmt.Sprintf("%T", sig)).
Str("object_sha", objectSHA.String()).
Msg("signature type mismatch")
return enum.GitSignatureInvalid
}
if p.IssuerKeyId == nil {
log.Ctx(ctx).Warn().
Str("object_sha", objectSHA.String()).
Msg("no public key ID in PGP signature")
return enum.GitSignatureInvalid
}
v.signature = signature
v.keyID = fmt.Sprintf("%016X", *p.IssuerKeyId)
v.keyFingerprint = fmt.Sprintf("%X", p.IssuerFingerprint)
return ""
}
func (v *Verify) Key(
ctx context.Context,
keyFetcher keyfetcher.Service,
principalID int64,
) (*types.PublicKey, error) {
schemes := []enum.PublicKeyScheme{enum.PublicKeySchemePGP}
usages := []enum.PublicKeyUsage{enum.PublicKeyUsageSign}
keys, err := keyFetcher.FetchBySubKeyID(ctx, v.KeyID(), principalID, usages, schemes)
if err != nil {
return nil, fmt.Errorf("failed to list PGP public keys by subkey ID: %w", err)
}
if len(keys) == 0 {
//nolint:nilnil
return nil, nil // No key is available and there is no error.
}
return &keys[0], nil
}
func (v *Verify) Verify(
ctx context.Context,
armoredPublicKey []byte,
signedContent []byte,
objectSHA sha.SHA,
committer types.Signature,
) enum.GitSignatureResult {
keyRingReader := bytes.NewReader(armoredPublicKey)
keyRing, err := openpgp.ReadArmoredKeyRing(keyRingReader)
if err != nil {
log.Ctx(ctx).Warn().
Err(err).
Str("object_sha", objectSHA.String()).
Msg("failed to read key ring")
return enum.GitSignatureUnverified
}
block, err := armor.Decode(bytes.NewReader(v.signature))
if err != nil {
return enum.GitSignatureInvalid
}
if block.Type != openpgp.SignatureType {
return enum.GitSignatureInvalid
}
signatureRaw, err := io.ReadAll(block.Body)
if err != nil {
return enum.GitSignatureInvalid
}
// signingTime is the time when the signature has been created.
// We use this time to verify the signature. We are checking for historical validity
// (Was this entity valid at the time it was signed, regardless of what happened to the signing key later).
// We shouldn't use committer time, because this time can be forged easily (GIT_COMMITTER_DATE).
var signingTime time.Time
packets := packet.NewReader(bytes.NewReader(signatureRaw))
for {
p, err := packets.Next()
if errors.Is(err, io.EOF) {
// signature packet not found while reading git signature
return enum.GitSignatureBad
}
if err != nil {
return enum.GitSignatureInvalid
}
sig, ok := p.(*packet.Signature)
if !ok || sig.IssuerKeyId == nil {
// we expect only signature packets in the packets of a git signature
// and every signature must have key ID
return enum.GitSignatureInvalid
}
if !hasSigningKey(keyRing, *sig.IssuerKeyId) {
continue
}
signingTime = sig.CreationTime
break
}
// CheckArmoredDetachedSignature returns an error if:
// - The signature (or one of the binding signatures mentioned below)
// has a unknown critical notation data subpacket
// - The primary key of the signing entity is revoked
// - The primary identity is revoked
// - The signature is expired
// - The primary key of the signing entity is expired according to the
// primary identity binding signature
//
// ... or, if the signature was signed by a subkey and:
// - The signing subkey is revoked
// - The signing subkey is expired according to the subkey binding signature
// - The signing subkey binding signature is expired
// - The signing subkey cross-signature is expired
//
// NOTE: The order of these checks is important, as the caller may choose to
// ignore ErrSignatureExpired or ErrKeyExpired errors, but should never
// ignore any other errors.
// NOTE 2: The comment above is copied from the openpgp library.
signer, err := openpgp.CheckDetachedSignature(
keyRing,
bytes.NewReader(signedContent),
bytes.NewReader(signatureRaw),
&packet.Config{
Time: func() time.Time { return signingTime },
},
)
// If error happened, try to convert it to one of the enum values.
//nolint:nestif
if err != nil {
var errUnsupported pgperrors.UnsupportedError
if errors.As(err, &errUnsupported); errUnsupported != "" {
return enum.GitSignatureUnsupported
}
if errors.Is(err, pgperrors.ErrKeyRevoked) {
return enum.GitSignatureRevoked
}
if errors.Is(err, pgperrors.ErrUnknownIssuer) {
// This shouldn't happen because we fetched the key by ID,
// so we are using the correct key with the correct identity.
return enum.GitSignatureBad
}
if errors.Is(err, pgperrors.ErrKeyExpired) {
return enum.GitSignatureKeyExpired
}
if errors.Is(err, pgperrors.ErrSignatureExpired) {
return enum.GitSignatureBad
}
log.Ctx(ctx).Warn().
Err(err).
Str("error_type", fmt.Sprintf("%T", err)).
Str("object_sha", objectSHA.String()).
Msg("unrecognized error")
return enum.GitSignatureInvalid
}
var signatureIdentity *openpgp.Identity
for _, identity := range signer.Identities {
if strings.EqualFold(committer.Identity.Email, identity.UserId.Email) {
signatureIdentity = identity
}
}
if signatureIdentity == nil {
return enum.GitSignatureBad
}
if signatureIdentity.Revoked(signingTime) {
return enum.GitSignatureRevoked
}
return enum.GitSignatureGood
}
func (v *Verify) KeyScheme() enum.PublicKeyScheme {
return enum.PublicKeySchemePGP
}
func (v *Verify) KeyID() string {
return v.keyID
}
func (v *Verify) KeyFingerprint() string {
return v.keyFingerprint
}
// hasSigningKey returns true if the provided key ring contains a key with provided ID that has signing capability.
// The function verifies every key in the key ring. The signing key can be either the primary key or a sub key.
func hasSigningKey(keyRing openpgp.EntityList, issuerKeyID uint64) bool {
for _, e := range keyRing {
if e.PrimaryKey.KeyId == issuerKeyID {
selfSig, _ := e.PrimarySelfSignature()
if selfSig != nil && selfSig.FlagSign {
return true
}
}
for _, subKey := range e.Subkeys {
if subKey.PublicKey.KeyId == issuerKeyID && subKey.Sig != nil && subKey.Sig.FlagSign {
return true
}
}
}
return false
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/locker/wire.go | app/services/locker/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package locker
import (
"github.com/harness/gitness/lock"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideLocker,
)
func ProvideLocker(mtxManager lock.MutexManager) *Locker {
return NewLocker(mtxManager)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/locker/registryasynctask.go | app/services/locker/registryasynctask.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package locker
import (
"context"
"fmt"
"time"
)
func (l Locker) LockResource(
ctx context.Context,
key string,
expiry time.Duration,
) (func(), error) {
unlockFn, err := l.lock(ctx, namespaceRegistry, key, expiry)
if err != nil {
return nil, fmt.Errorf("failed to lock mutex for key [%s]: %w", key, err)
}
return unlockFn, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/locker/locker.go | app/services/locker/locker.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package locker
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/contextutil"
"github.com/harness/gitness/lock"
"github.com/harness/gitness/logging"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
const namespaceRepo = "repo"
const namespaceRegistry = "registry"
type Locker struct {
mtxManager lock.MutexManager
}
func NewLocker(mtxManager lock.MutexManager) *Locker {
return &Locker{
mtxManager: mtxManager,
}
}
func (l Locker) lock(
ctx context.Context,
namespace string,
key string,
expiry time.Duration,
) (func(), error) {
// annotate logs for easier debugging of lock related issues
ctx = logging.NewContext(ctx, func(zc zerolog.Context) zerolog.Context {
return zc.
Str("key", key).
Str("namespace", namespace).
Str("expiry", expiry.String())
})
mutext, err := l.mtxManager.NewMutex(
key,
lock.WithNamespace(namespace),
lock.WithExpiry(expiry),
lock.WithTimeoutFactor(4/expiry.Seconds()), // 4s
)
if err != nil {
return nil, fmt.Errorf("failed to create new mutex: %w", err)
}
log.Ctx(ctx).Debug().Msg("attempting to acquire lock")
err = mutext.Lock(ctx)
if err != nil {
return nil, fmt.Errorf("failed to lock the mutex: %w", err)
}
log.Ctx(ctx).Debug().Msgf("successfully locked (expiry: %s)", expiry)
unlockFn := func() {
// always unlock independent of whether source context got canceled or not
ctx, cancel := contextutil.WithNewTimeout(ctx, 30*time.Second)
defer cancel()
err := mutext.Unlock(ctx)
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msg("failed to unlock")
} else {
log.Ctx(ctx).Debug().Msg("successfully unlocked")
}
}
return unlockFn, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/locker/pullreq.go | app/services/locker/pullreq.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package locker
import (
"context"
"fmt"
"strconv"
"time"
)
func (l Locker) LockPR(
ctx context.Context,
repoID int64,
prNum int64,
expiry time.Duration,
) (func(), error) {
key := fmt.Sprintf("%d/pulls", repoID)
if prNum != 0 {
key += "/" + strconv.FormatInt(prNum, 10)
}
unlockFn, err := l.lock(ctx, namespaceRepo, key, expiry)
if err != nil {
return nil, fmt.Errorf("failed to lock mutex for pr %d in repo %d: %w", prNum, repoID, err)
}
return unlockFn, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/locker/repo.go | app/services/locker/repo.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package locker
import (
"context"
"fmt"
"strconv"
"time"
"github.com/rs/zerolog/log"
)
func (l Locker) LockDefaultBranch(
ctx context.Context,
repoID int64,
branchName string,
expiry time.Duration,
) (func(), error) {
key := strconv.FormatInt(repoID, 10) + "/defaultBranch"
log.Ctx(ctx).Info().Msg("attempting to lock to update the repo default branch")
unlockFn, err := l.lock(ctx, namespaceRepo, key, expiry)
if err != nil {
return nil, fmt.Errorf("failed to lock repo to update default branch to %s: %w", branchName, err)
}
return unlockFn, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/trigger/wire.go | app/services/trigger/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trigger
import (
"context"
gitevents "github.com/harness/gitness/app/events/git"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/app/pipeline/commit"
"github.com/harness/gitness/app/pipeline/triggerer"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/events"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideService,
)
func ProvideService(
ctx context.Context,
config Config,
triggerStore store.TriggerStore,
commitSvc commit.Service,
pullReqStore store.PullReqStore,
repoFinder refcache.RepoFinder,
pipelineStore store.PipelineStore,
triggerSvc triggerer.Triggerer,
gitReaderFactory *events.ReaderFactory[*gitevents.Reader],
pullReqEvFactory *events.ReaderFactory[*pullreqevents.Reader],
) (*Service, error) {
return New(ctx, config, triggerStore, pullReqStore, repoFinder, pipelineStore, triggerSvc,
commitSvc, gitReaderFactory, pullReqEvFactory)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/trigger/handler_branch.go | app/services/trigger/handler_branch.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trigger
import (
"context"
"fmt"
"strings"
"github.com/harness/gitness/app/bootstrap"
gitevents "github.com/harness/gitness/app/events/git"
"github.com/harness/gitness/app/pipeline/triggerer"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types/enum"
)
// TODO: This can be moved to SCM library
func ExtractBranch(ref string) string {
return strings.TrimPrefix(ref, "refs/heads/")
}
func (s *Service) handleEventBranchCreated(ctx context.Context,
event *events.Event[*gitevents.BranchCreatedPayload]) error {
hook := &triggerer.Hook{
Trigger: enum.TriggerHook,
Action: enum.TriggerActionBranchCreated,
Ref: event.Payload.Ref,
Source: ExtractBranch(event.Payload.Ref),
TriggeredBy: bootstrap.NewSystemServiceSession().Principal.ID,
Target: ExtractBranch(event.Payload.Ref),
After: event.Payload.SHA,
}
err := s.augmentCommitInfo(ctx, hook, event.Payload.RepoID, event.Payload.SHA)
if err != nil {
return fmt.Errorf("could not augment commit info: %w", err)
}
return s.trigger(ctx, event.Payload.RepoID, enum.TriggerActionBranchCreated, hook)
}
func (s *Service) handleEventBranchUpdated(ctx context.Context,
event *events.Event[*gitevents.BranchUpdatedPayload]) error {
hook := &triggerer.Hook{
Trigger: enum.TriggerHook,
Action: enum.TriggerActionBranchUpdated,
Ref: event.Payload.Ref,
Before: event.Payload.OldSHA,
After: event.Payload.NewSHA,
TriggeredBy: bootstrap.NewSystemServiceSession().Principal.ID,
Source: ExtractBranch(event.Payload.Ref),
Target: ExtractBranch(event.Payload.Ref),
}
err := s.augmentCommitInfo(ctx, hook, event.Payload.RepoID, event.Payload.NewSHA)
if err != nil {
return fmt.Errorf("could not augment commit info: %w", err)
}
return s.trigger(ctx, event.Payload.RepoID, enum.TriggerActionBranchUpdated, hook)
}
// augmentCommitInfo adds information about the commit to the hook by interacting with
// the commit service.
func (s *Service) augmentCommitInfo(
ctx context.Context,
hook *triggerer.Hook,
repoID int64,
sha string,
) error {
repo, err := s.repoFinder.FindByID(ctx, repoID)
if err != nil {
return fmt.Errorf("could not find repo: %w", err)
}
commit, err := s.commitSvc.FindCommit(ctx, repo, sha)
if err != nil {
return fmt.Errorf("could not find commit info")
}
hook.AuthorName = commit.Author.Identity.Name
hook.Title = commit.Title
hook.Timestamp = commit.Committer.When.UnixMilli()
hook.AuthorLogin = commit.Author.Identity.Name
hook.AuthorEmail = commit.Author.Identity.Email
hook.Message = commit.Message
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/trigger/handler_pullreq.go | app/services/trigger/handler_pullreq.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trigger
import (
"context"
"fmt"
"github.com/harness/gitness/app/bootstrap"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/app/pipeline/triggerer"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types/enum"
)
func (s *Service) handleEventPullReqCreated(ctx context.Context,
event *events.Event[*pullreqevents.CreatedPayload]) error {
hook := &triggerer.Hook{
Trigger: enum.TriggerHook,
Action: enum.TriggerActionPullReqCreated,
TriggeredBy: bootstrap.NewSystemServiceSession().Principal.ID,
After: event.Payload.SourceSHA,
}
err := s.augmentPullReqInfo(ctx, hook, event.Payload.PullReqID)
if err != nil {
return fmt.Errorf("could not augment pull request info: %w", err)
}
return s.trigger(ctx, event.Payload.TargetRepoID, enum.TriggerActionPullReqCreated, hook)
}
func (s *Service) handleEventPullReqReopened(ctx context.Context,
event *events.Event[*pullreqevents.ReopenedPayload]) error {
hook := &triggerer.Hook{
Trigger: enum.TriggerHook,
Action: enum.TriggerActionPullReqReopened,
TriggeredBy: bootstrap.NewSystemServiceSession().Principal.ID,
After: event.Payload.SourceSHA,
}
err := s.augmentPullReqInfo(ctx, hook, event.Payload.PullReqID)
if err != nil {
return fmt.Errorf("could not augment pull request info: %w", err)
}
return s.trigger(ctx, event.Payload.TargetRepoID, enum.TriggerActionPullReqReopened, hook)
}
func (s *Service) handleEventPullReqBranchUpdated(ctx context.Context,
event *events.Event[*pullreqevents.BranchUpdatedPayload]) error {
hook := &triggerer.Hook{
Trigger: enum.TriggerHook,
Action: enum.TriggerActionPullReqBranchUpdated,
TriggeredBy: bootstrap.NewSystemServiceSession().Principal.ID,
After: event.Payload.NewSHA,
}
err := s.augmentPullReqInfo(ctx, hook, event.Payload.PullReqID)
if err != nil {
return fmt.Errorf("could not augment pull request info: %w", err)
}
return s.trigger(ctx, event.Payload.TargetRepoID, enum.TriggerActionPullReqBranchUpdated, hook)
}
func (s *Service) handleEventPullReqClosed(ctx context.Context,
event *events.Event[*pullreqevents.ClosedPayload]) error {
hook := &triggerer.Hook{
Trigger: enum.TriggerHook,
Action: enum.TriggerActionPullReqClosed,
TriggeredBy: bootstrap.NewSystemServiceSession().Principal.ID,
After: event.Payload.SourceSHA,
}
err := s.augmentPullReqInfo(ctx, hook, event.Payload.PullReqID)
if err != nil {
return fmt.Errorf("could not augment pull request info: %w", err)
}
return s.trigger(ctx, event.Payload.TargetRepoID, enum.TriggerActionPullReqClosed, hook)
}
func (s *Service) handleEventPullReqMerged(
ctx context.Context,
event *events.Event[*pullreqevents.MergedPayload],
) error {
hook := &triggerer.Hook{
Trigger: enum.TriggerHook,
Action: enum.TriggerActionPullReqMerged,
TriggeredBy: bootstrap.NewSystemServiceSession().Principal.ID,
After: event.Payload.SourceSHA,
}
err := s.augmentPullReqInfo(ctx, hook, event.Payload.PullReqID)
if err != nil {
return fmt.Errorf("could not augment pull request info: %w", err)
}
return s.trigger(ctx, event.Payload.TargetRepoID, enum.TriggerActionPullReqMerged, hook)
}
// augmentPullReqInfo adds in information into the hook pertaining to the pull request
// by querying the database.
func (s *Service) augmentPullReqInfo(
ctx context.Context,
hook *triggerer.Hook,
pullReqID int64,
) error {
pullreq, err := s.pullReqStore.Find(ctx, pullReqID)
if err != nil {
return fmt.Errorf("could not find pull request: %w", err)
}
hook.Title = pullreq.Title
hook.Timestamp = pullreq.Created
hook.AuthorLogin = pullreq.Author.UID
hook.AuthorName = pullreq.Author.DisplayName
hook.AuthorEmail = pullreq.Author.Email
hook.Message = pullreq.Description
hook.Before = pullreq.MergeBaseSHA
hook.Target = pullreq.TargetBranch
hook.Source = pullreq.SourceBranch
// expand the branch to a git reference.
hook.Ref = fmt.Sprintf("refs/pullreq/%d/head", pullreq.Number)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/trigger/handler_tag.go | app/services/trigger/handler_tag.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trigger
import (
"context"
"fmt"
"github.com/harness/gitness/app/bootstrap"
gitevents "github.com/harness/gitness/app/events/git"
"github.com/harness/gitness/app/pipeline/triggerer"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types/enum"
)
func (s *Service) handleEventTagCreated(ctx context.Context,
event *events.Event[*gitevents.TagCreatedPayload]) error {
hook := &triggerer.Hook{
Trigger: enum.TriggerHook,
Action: enum.TriggerActionTagCreated,
TriggeredBy: bootstrap.NewSystemServiceSession().Principal.ID,
Ref: event.Payload.Ref,
Before: event.Payload.SHA,
After: event.Payload.SHA,
Source: event.Payload.Ref,
Target: event.Payload.Ref,
}
err := s.augmentCommitInfo(ctx, hook, event.Payload.RepoID, event.Payload.SHA)
if err != nil {
return fmt.Errorf("could not augment commit info: %w", err)
}
return s.trigger(ctx, event.Payload.RepoID, enum.TriggerActionTagCreated, hook)
}
func (s *Service) handleEventTagUpdated(ctx context.Context,
event *events.Event[*gitevents.TagUpdatedPayload]) error {
hook := &triggerer.Hook{
Trigger: enum.TriggerHook,
Action: enum.TriggerActionTagUpdated,
TriggeredBy: bootstrap.NewSystemServiceSession().Principal.ID,
Ref: event.Payload.Ref,
Before: event.Payload.OldSHA,
After: event.Payload.NewSHA,
Source: event.Payload.Ref,
Target: event.Payload.Ref,
}
err := s.augmentCommitInfo(ctx, hook, event.Payload.RepoID, event.Payload.NewSHA)
if err != nil {
return fmt.Errorf("could not augment commit info: %w", err)
}
return s.trigger(ctx, event.Payload.RepoID, enum.TriggerActionTagUpdated, hook)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/trigger/service.go | app/services/trigger/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trigger
import (
"context"
"errors"
"fmt"
"slices"
"time"
gitevents "github.com/harness/gitness/app/events/git"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/app/pipeline/commit"
"github.com/harness/gitness/app/pipeline/triggerer"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/events"
"github.com/harness/gitness/stream"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/hashicorp/go-multierror"
)
const (
eventsReaderGroupName = "gitness:trigger"
)
type Config struct {
EventReaderName string
Concurrency int
MaxRetries int
}
func (c *Config) Prepare() error {
if c == nil {
return errors.New("config is required")
}
if c.EventReaderName == "" {
return errors.New("config.EventReaderName is required")
}
if c.Concurrency < 1 {
return errors.New("config.Concurrency has to be a positive number")
}
if c.MaxRetries < 0 {
return errors.New("config.MaxRetries can't be negative")
}
return nil
}
type Service struct {
triggerStore store.TriggerStore
pullReqStore store.PullReqStore
repoFinder refcache.RepoFinder
pipelineStore store.PipelineStore
triggerSvc triggerer.Triggerer
commitSvc commit.Service
}
func New(
ctx context.Context,
config Config,
triggerStore store.TriggerStore,
pullReqStore store.PullReqStore,
repoFinder refcache.RepoFinder,
pipelineStore store.PipelineStore,
triggerSvc triggerer.Triggerer,
commitSvc commit.Service,
gitReaderFactory *events.ReaderFactory[*gitevents.Reader],
pullreqEvReaderFactory *events.ReaderFactory[*pullreqevents.Reader],
) (*Service, error) {
if err := config.Prepare(); err != nil {
return nil, fmt.Errorf("provided trigger service config is invalid: %w", err)
}
service := &Service{
triggerStore: triggerStore,
pullReqStore: pullReqStore,
repoFinder: repoFinder,
commitSvc: commitSvc,
pipelineStore: pipelineStore,
triggerSvc: triggerSvc,
}
_, err := gitReaderFactory.Launch(ctx, eventsReaderGroupName, config.EventReaderName,
func(r *gitevents.Reader) error {
const idleTimeout = 1 * time.Minute
r.Configure(
stream.WithConcurrency(config.Concurrency),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(config.MaxRetries),
))
_ = r.RegisterBranchCreated(service.handleEventBranchCreated)
_ = r.RegisterBranchUpdated(service.handleEventBranchUpdated)
_ = r.RegisterTagCreated(service.handleEventTagCreated)
_ = r.RegisterTagUpdated(service.handleEventTagUpdated)
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to launch git events reader: %w", err)
}
_, err = pullreqEvReaderFactory.Launch(ctx, eventsReaderGroupName, config.EventReaderName,
func(r *pullreqevents.Reader) error {
const idleTimeout = 1 * time.Minute
r.Configure(
stream.WithConcurrency(config.Concurrency),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
// retries not needed for builds which failed to trigger, can be adjusted when needed
stream.WithMaxRetries(0),
))
_ = r.RegisterCreated(service.handleEventPullReqCreated)
_ = r.RegisterBranchUpdated(service.handleEventPullReqBranchUpdated)
_ = r.RegisterReopened(service.handleEventPullReqReopened)
_ = r.RegisterClosed(service.handleEventPullReqClosed)
_ = r.RegisterMerged(service.handleEventPullReqMerged)
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to launch pr events reader: %w", err)
}
return service, nil
}
// trigger a build given an action on a repo and a hook.
// It tries to find all enabled triggers, see if the action is the same
// as the trigger action - and if so, find the pipeline for the trigger
// and fire an execution.
func (s *Service) trigger(ctx context.Context, repoID int64,
action enum.TriggerAction, hook *triggerer.Hook) error {
// Get all enabled triggers for a repo.
ret, err := s.triggerStore.ListAllEnabled(ctx, repoID)
if err != nil {
return fmt.Errorf("failed to list all enabled triggers: %w", err)
}
validTriggers := []*types.Trigger{}
// Check which triggers are eligible to be fired
for _, t := range ret {
if slices.Contains(t.Actions, action) {
validTriggers = append(validTriggers, t)
}
}
var errs error
for _, t := range validTriggers {
// TODO: We can make a minor optimization here to not fetch a pipeline each time
// since there could be multiple triggers for a pipeline.
pipeline, err := s.pipelineStore.Find(ctx, t.PipelineID)
if err != nil {
errs = multierror.Append(errs, err)
continue
}
// Don't fire triggers for disabled pipelines
if pipeline.Disabled {
continue
}
_, err = s.triggerSvc.Trigger(ctx, pipeline, hook)
if err != nil {
errs = multierror.Append(errs, err)
}
}
return errs
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/remoteauth/wire.go | app/services/remoteauth/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remoteauth
import (
"github.com/harness/gitness/app/store"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideRemoteAuth,
)
func ProvideRemoteAuth(
tokenStore store.TokenStore,
principalStore store.PrincipalStore,
) Service {
return NewService(tokenStore, principalStore)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/remoteauth/user_jwt_provider.go | app/services/remoteauth/user_jwt_provider.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remoteauth
import (
"context"
"fmt"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/token"
"github.com/harness/gitness/types/enum"
)
type Service interface {
// GenerateToken generates a jwt for the given principle to access the resource (for git-lfs-authenticate response)
GenerateToken(
ctx context.Context,
principalID int64,
principalType enum.PrincipalType,
resource string,
) (string, error)
}
func NewService(tokenStore store.TokenStore, principalStore store.PrincipalStore) LocalService {
return LocalService{
tokenStore: tokenStore,
principalStore: principalStore,
}
}
type LocalService struct {
tokenStore store.TokenStore
principalStore store.PrincipalStore
}
func (s LocalService) GenerateToken(
ctx context.Context,
principalID int64,
_ enum.PrincipalType,
_ string,
) (string, error) {
identifier := token.GenerateIdentifier("remoteAuth")
principal, err := s.principalStore.Find(ctx, principalID)
if err != nil {
return "", fmt.Errorf("failed to find principal %d: %w", principalID, err)
}
_, jwt, err := token.CreateRemoteAuthToken(ctx, s.tokenStore, principal, identifier)
if err != nil {
return "", fmt.Errorf("failed to create a remote auth token: %w", err)
}
return jwt, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/space/wire.go | app/services/space/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package space
import (
"github.com/harness/gitness/app/api/controller/repo"
"github.com/harness/gitness/app/services/gitspace"
"github.com/harness/gitness/app/services/infraprovider"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/encrypt"
"github.com/harness/gitness/job"
"github.com/harness/gitness/store/database/dbtx"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideService,
)
func ProvideService(
tx dbtx.Transactor,
scheduler *job.Scheduler,
executor *job.Executor,
encrypter encrypt.Encrypter,
repoStore store.RepoStore,
spaceStore store.SpaceStore,
spacePathStore store.SpacePathStore,
labelStore store.LabelStore,
rulesStore store.RuleStore,
webhookStore store.WebhookStore,
spaceFinder refcache.SpaceFinder,
gitspaceSvs *gitspace.Service,
infraProviderSvc *infraprovider.Service,
repoCtrl *repo.Controller,
) (*Service, error) {
service := NewService(
tx,
scheduler,
encrypter,
repoStore,
spaceStore,
spacePathStore,
labelStore,
rulesStore,
webhookStore,
spaceFinder,
gitspaceSvs,
infraProviderSvc,
repoCtrl,
)
err := executor.Register(jobType, service)
if err != nil {
return nil, err
}
return service, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/space/move.go | app/services/space/move.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package space
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/harness/gitness/app/auth"
"github.com/harness/gitness/app/bootstrap"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/app/services/protection"
"github.com/harness/gitness/job"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
const (
moveJobMaxRetries = 3
moveJobMaxDuration = 10 * time.Minute
jobType = "space_move"
)
var _ job.Handler = (*Service)(nil)
type Input struct {
SourceSpacePath string `json:"source_space_path"`
DestinationSpacePath string `json:"destination_space_path"`
}
func (s *Service) Register(executor *job.Executor) error {
return executor.Register(jobType, s)
}
func (s *Service) Run(
ctx context.Context,
srcIdentifier string,
dstIdentifier string,
) error {
jobDef, err := s.getJobDef(s.JobUIDFromSpacePath(srcIdentifier), Input{
SourceSpacePath: srcIdentifier,
DestinationSpacePath: dstIdentifier,
})
if err != nil {
return err
}
return s.scheduler.RunJob(ctx, jobDef)
}
// Handle is space move background job handler.
func (s *Service) Handle(
ctx context.Context,
data string,
_ job.ProgressReporter,
) (string, error) {
input, err := s.getJobInput(data)
if err != nil {
return "", err
}
if input.SourceSpacePath == "" {
return "", fmt.Errorf("source space path is required")
}
if input.DestinationSpacePath == "" {
return "", fmt.Errorf("destination space path is required")
}
log.Ctx(ctx).Debug().Msgf("space move job started for source space '%s' to destination space '%s'",
input.SourceSpacePath, input.DestinationSpacePath)
srcSpace, err := s.spaceStore.FindByRef(ctx, input.SourceSpacePath)
if errors.Is(err, gitness_store.ErrResourceNotFound) {
log.Ctx(ctx).Info().Str("space.path", input.SourceSpacePath).
Msg("source space not found, nothing to move")
return "", nil
}
if err != nil {
return "", fmt.Errorf("failed to find source space '%s': %w", input.SourceSpacePath, err)
}
dstSpace, err := s.spaceStore.FindByRef(ctx, input.DestinationSpacePath)
// if dstSpace doesn't exist, update the srcSpace parent to match the dstSpace path
if errors.Is(err, gitness_store.ErrResourceNotFound) {
parentSpace, _, err := paths.DisectLeaf(input.DestinationSpacePath)
if err != nil {
return "", fmt.Errorf("failed to disect destination space path: %w", err)
}
log.Ctx(ctx).Info().Msgf("moving space %s by updating the parent space to %s", srcSpace.Identifier, parentSpace)
err = s.MoveNoAuth(
ctx,
bootstrap.NewSystemServiceSession(),
srcSpace,
nil,
parentSpace,
)
if err != nil {
return "", fmt.Errorf("failed to move space: %w", err)
}
log.Ctx(ctx).Info().
Msgf("space %s moved to %s", srcSpace.Identifier, parentSpace)
s.spaceFinder.MarkChanged(ctx, srcSpace.Core())
return "", nil
}
if err != nil {
return "", fmt.Errorf("failed to find destination space for move: %w", err)
}
// when dstSpace exists, update the srcSpace resources parent to the dstSpace
output, err := s.moveSpaceResourcesInTx(ctx, srcSpace, dstSpace)
if err != nil {
return "", fmt.Errorf("failed to move space resources: %w", err)
}
log.Ctx(ctx).Info().
Int64("repo_count", output.RepoCount).
Int64("label_count", output.LabelCount).
Int64("rule_count", output.RuleCount).
Int64("webhook_count", output.WebhookCount).
Msgf("space resources moved from %s to %s",
srcSpace.Identifier, dstSpace.Identifier)
return "", nil
}
func (s *Service) MoveNoAuth(
ctx context.Context,
session *auth.Session,
space *types.Space,
inIdentifier *string,
inParentRef string,
) error {
return s.tx.WithTx(ctx, func(ctx context.Context) error {
_, err := s.spaceStore.FindForUpdate(ctx, space.ID)
if err != nil {
return fmt.Errorf("failed to lock the space for update: %w", err)
}
parentSpace, err := s.spaceStore.FindByRef(ctx, inParentRef)
if err != nil {
return fmt.Errorf("failed to find space by ID: %w", err)
}
// delete old primary segment
err = s.spacePathStore.DeletePrimarySegment(ctx, space.ID)
if err != nil {
return fmt.Errorf("failed to delete primary path segment: %w", err)
}
// update space with move inputs
if inIdentifier != nil {
space.Identifier = *inIdentifier
}
space.ParentID = parentSpace.ID
// add new primary segment using updated space data
now := time.Now().UnixMilli()
newPrimarySegment := &types.SpacePathSegment{
ParentID: parentSpace.ID,
Identifier: space.Identifier,
SpaceID: space.ID,
IsPrimary: true,
CreatedBy: session.Principal.ID,
Created: now,
Updated: now,
}
err = s.spacePathStore.InsertSegment(ctx, newPrimarySegment)
if err != nil {
return fmt.Errorf("failed to create new primary path segment: %w", err)
}
if err := s.cleanUpStaleSpaceResources(ctx, space); err != nil {
return fmt.Errorf("failed to clean up stale space resources: %w", err)
}
// update space itself
err = s.spaceStore.Update(ctx, space)
if err != nil {
return fmt.Errorf("failed to update the space in the db: %w", err)
}
return nil
})
}
type MoveResourcesOutput struct {
RepoCount int64 `json:"repo_count"`
LabelCount int64 `json:"label_count"`
RuleCount int64 `json:"rule_count"`
WebhookCount int64 `json:"webhook_count"`
}
// MoveResources moves space resources to a new parent space individually and soft delete the source space.
func (s *Service) moveSpaceResourcesInTx(
ctx context.Context,
sourceSpace *types.Space,
targetSpace *types.Space,
) (MoveResourcesOutput, error) {
log.Ctx(ctx).Info().
Msgf("moving space resources individually as target space %s exists", targetSpace.Identifier)
var output MoveResourcesOutput
if sourceSpace.ID == targetSpace.ID {
return output, fmt.Errorf("source and target spaces cannot be the same")
}
if err := s.tx.WithTx(ctx, func(ctx context.Context) error {
var err error
_, err = s.spaceStore.FindForUpdate(ctx, sourceSpace.ID)
if err != nil {
return fmt.Errorf("failed to lock the space for update: %w", err)
}
_, err = s.spaceStore.FindForUpdate(ctx, targetSpace.ID)
if err != nil {
return fmt.Errorf("failed to lock the space for update: %w", err)
}
output.RepoCount, err = s.repoStore.UpdateParent(ctx, sourceSpace.ID, targetSpace.ID)
if err != nil {
return fmt.Errorf("failed to move repos: %w", err)
}
output.LabelCount, err = s.labelStore.UpdateParentSpace(ctx, sourceSpace.ID, targetSpace.ID)
if err != nil {
return fmt.Errorf("failed to update labels: %w", err)
}
output.RuleCount, err = s.rulesStore.UpdateParentSpace(ctx, sourceSpace.ID, targetSpace.ID)
if err != nil {
return fmt.Errorf("failed to update rules: %w", err)
}
output.WebhookCount, err = s.webhookStore.UpdateParentSpace(ctx, sourceSpace.ID, targetSpace.ID)
if err != nil {
return fmt.Errorf("failed to update webhooks: %w", err)
}
if err := s.cleanUpStaleSpaceResources(ctx, sourceSpace); err != nil {
return fmt.Errorf("failed to clean up parent space resources: %w", err)
}
if err := s.SoftDeleteInner(
ctx,
bootstrap.NewSystemServiceSession(),
sourceSpace,
time.Now().Unix(),
); err != nil {
return fmt.Errorf("failed to soft delete source space: %w", err)
}
return nil
}); err != nil {
return output, err
}
s.spaceFinder.MarkChanged(ctx, sourceSpace.Core())
return output, nil
}
// cleanUpStaleSpaceResources removes the resources of the parent space that will be moved.
func (s *Service) cleanUpStaleSpaceResources(ctx context.Context, space *types.Space) error {
ancestors, err := s.spaceStore.GetAncestors(ctx, space.ID)
if err != nil {
return fmt.Errorf("failed to get ancestors: %w", err)
}
// exclude the root space from cleanup
rootSpace, err := s.spaceStore.GetRootSpace(ctx, space.ID)
if err != nil {
return fmt.Errorf("failed to get root space: %w", err)
}
descendantSpaceIDs, err := s.spaceStore.GetDescendantsIDs(ctx, space.ID)
if err != nil {
return fmt.Errorf("failed to get descendant space IDs: %w", err)
}
descendantSpaceIDs = append(descendantSpaceIDs, space.ID)
descendantSpaceIDSet := make(map[int64]struct{}, len(descendantSpaceIDs))
for _, id := range descendantSpaceIDs {
descendantSpaceIDSet[id] = struct{}{}
}
for _, ancestor := range ancestors {
if ancestor.ID == rootSpace.ID || ancestor.ID == space.ID {
continue
}
rules, err := s.rulesStore.List(ctx, []types.RuleParentInfo{
{
Type: enum.RuleParentSpace,
ID: ancestor.ID,
},
}, &types.RuleFilter{})
if err != nil {
return fmt.Errorf("failed to list rules for space %d: %w", ancestor.ID, err)
}
for _, rule := range rules {
modified, err := s.cleanUpRuleRepoTargets(ctx, &rule, descendantSpaceIDSet)
if err != nil {
return fmt.Errorf("failed to clean up rule %d: %w", rule.ID, err)
}
if modified {
log.Ctx(ctx).Info().Msgf("cleaning up rule %d target repos due to moving space %d", rule.ID, space.ID)
if err := s.rulesStore.Update(ctx, &rule); err != nil {
return fmt.Errorf("failed to update rule %d: %w", rule.ID, err)
}
log.Ctx(ctx).Info().Msgf("updated rule %d target repos due to moving space %d", rule.ID, space.ID)
}
}
}
return nil
}
// cleanUpRuleRepoTargets removes repository IDs from a rule's RepoTarget if they belong to descendant spaces.
func (s *Service) cleanUpRuleRepoTargets(
ctx context.Context,
rule *types.Rule,
descendantSpaceIDSet map[int64]struct{},
) (bool, error) {
if len(rule.RepoTarget) == 0 {
return false, nil
}
var repoTarget protection.RepoTarget
if err := json.Unmarshal(rule.RepoTarget, &repoTarget); err != nil {
return false, fmt.Errorf("failed to unmarshal repo target: %w", err)
}
modified := false
if len(repoTarget.Include.IDs) > 0 {
filteredIncludeIDs, err := s.filterRepoIDs(ctx, repoTarget.Include.IDs, descendantSpaceIDSet)
if err != nil {
return false, fmt.Errorf("failed to filter include repo IDs: %w", err)
}
if len(filteredIncludeIDs) != len(repoTarget.Include.IDs) {
repoTarget.Include.IDs = filteredIncludeIDs
modified = true
}
}
if len(repoTarget.Exclude.IDs) > 0 {
filteredExcludeIDs, err := s.filterRepoIDs(ctx, repoTarget.Exclude.IDs, descendantSpaceIDSet)
if err != nil {
return false, fmt.Errorf("failed to filter exclude repo IDs: %w", err)
}
if len(filteredExcludeIDs) != len(repoTarget.Exclude.IDs) {
repoTarget.Exclude.IDs = filteredExcludeIDs
modified = true
}
}
if modified {
newRepoTarget, err := json.Marshal(repoTarget)
if err != nil {
return false, fmt.Errorf("failed to marshal updated repo target: %w", err)
}
rule.RepoTarget = newRepoTarget
}
return modified, nil
}
// filterRepoIDs filters out repository IDs that belong to descendant spaces.
func (s *Service) filterRepoIDs(
ctx context.Context,
repoIDs []int64,
descendantSpaceIDSet map[int64]struct{},
) ([]int64, error) {
filtered := make([]int64, 0, len(repoIDs))
for _, repoID := range repoIDs {
repo, err := s.repoStore.Find(ctx, repoID)
if err != nil {
if errors.Is(err, gitness_store.ErrResourceNotFound) {
continue
}
return nil, fmt.Errorf("failed to find repository %d: %w", repoID, err)
}
if _, isDescendant := descendantSpaceIDSet[repo.ParentID]; !isDescendant {
filtered = append(filtered, repoID)
}
}
return filtered, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/space/service.go | app/services/space/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package space
import (
"github.com/harness/gitness/app/api/controller/repo"
"github.com/harness/gitness/app/services/gitspace"
"github.com/harness/gitness/app/services/infraprovider"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/encrypt"
"github.com/harness/gitness/job"
"github.com/harness/gitness/store/database/dbtx"
)
type Service struct {
tx dbtx.Transactor
scheduler *job.Scheduler
encrypter encrypt.Encrypter
repoStore store.RepoStore
spaceStore store.SpaceStore
spacePathStore store.SpacePathStore
labelStore store.LabelStore
rulesStore store.RuleStore
webhookStore store.WebhookStore
spaceFinder refcache.SpaceFinder
gitspaceSvs *gitspace.Service
infraProviderSvc *infraprovider.Service
repoCtrl *repo.Controller
}
func NewService(
tx dbtx.Transactor,
scheduler *job.Scheduler,
encrypter encrypt.Encrypter,
repoStore store.RepoStore,
spaceStore store.SpaceStore,
spacePathStore store.SpacePathStore,
labelStore store.LabelStore,
rulesStore store.RuleStore,
webhookStore store.WebhookStore,
spaceFinder refcache.SpaceFinder,
gitspaceSvs *gitspace.Service,
infraProviderSvc *infraprovider.Service,
repoCtrl *repo.Controller,
) *Service {
return &Service{
tx: tx,
scheduler: scheduler,
encrypter: encrypter,
repoStore: repoStore,
spaceStore: spaceStore,
spacePathStore: spacePathStore,
labelStore: labelStore,
rulesStore: rulesStore,
webhookStore: webhookStore,
spaceFinder: spaceFinder,
gitspaceSvs: gitspaceSvs,
infraProviderSvc: infraProviderSvc,
repoCtrl: repoCtrl,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/space/soft_delete.go | app/services/space/soft_delete.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package space
import (
"context"
"fmt"
"math"
"github.com/harness/gitness/app/auth"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
func (s *Service) SoftDeleteInner(
ctx context.Context,
session *auth.Session,
space *types.Space,
deletedAt int64,
) error {
filter := &types.SpaceFilter{
Page: 1,
Size: math.MaxInt,
Query: "",
Order: enum.OrderAsc,
Sort: enum.SpaceAttrCreated,
DeletedBeforeOrAt: nil, // only filter active subspaces
Recursive: true,
}
subSpaces, err := s.spaceStore.List(ctx, space.ID, filter)
if err != nil {
return fmt.Errorf("failed to list space %d sub spaces recursively: %w", space.ID, err)
}
allSpaces := []*types.Space{space}
allSpaces = append(allSpaces, subSpaces...)
if s.gitspaceSvs != nil {
err = s.gitspaceSvs.DeleteAllForSpaces(ctx, allSpaces)
if err != nil {
return fmt.Errorf("failed to soft delete gitspaces of space %d: %w", space.ID, err)
}
}
if s.infraProviderSvc != nil {
err = s.infraProviderSvc.DeleteAllForSpaces(ctx, allSpaces)
if err != nil {
return fmt.Errorf("failed to soft delete infra providers of space %d: %w", space.ID, err)
}
}
for _, space := range subSpaces {
_, err := s.spaceStore.FindForUpdate(ctx, space.ID)
if err != nil {
return fmt.Errorf("failed to lock the space for update: %w", err)
}
if err := s.spaceStore.SoftDelete(ctx, space, deletedAt); err != nil {
return fmt.Errorf("failed to soft delete subspace: %w", err)
}
}
if s.repoStore != nil && s.repoCtrl != nil {
err = s.softDeleteRepositoriesNoAuth(ctx, session, space.ID, deletedAt)
if err != nil {
return fmt.Errorf("failed to soft delete repositories of space %d: %w", space.ID, err)
}
}
if err = s.spaceStore.SoftDelete(ctx, space, deletedAt); err != nil {
return fmt.Errorf("spaceStore failed to soft delete space: %w", err)
}
err = s.spacePathStore.DeletePathsAndDescendandPaths(ctx, space.ID)
if err != nil {
return fmt.Errorf("spacePathStore failed to delete descendant paths of %d: %w", space.ID, err)
}
return nil
}
// softDeleteRepositoriesNoAuth soft deletes all repositories in a space - no authorization is verified.
// WARNING For internal calls only.
func (s *Service) softDeleteRepositoriesNoAuth(
ctx context.Context,
session *auth.Session,
spaceID int64,
deletedAt int64,
) error {
filter := &types.RepoFilter{
Page: 1,
Size: int(math.MaxInt),
Query: "",
Order: enum.OrderAsc,
Sort: enum.RepoAttrNone,
DeletedBeforeOrAt: nil, // only filter active repos
Recursive: true,
}
repos, err := s.repoStore.List(ctx, spaceID, filter)
if err != nil {
return fmt.Errorf("failed to list space repositories: %w", err)
}
for _, repo := range repos {
err = s.repoCtrl.SoftDeleteNoAuth(ctx, session, repo, deletedAt)
if err != nil {
return fmt.Errorf("failed to soft delete repository: %w", err)
}
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/space/id.go | app/services/space/id.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package space
import (
"encoding/base64"
"encoding/json"
"fmt"
"strings"
"github.com/harness/gitness/job"
)
const jobUIDPrefix = "space-move-"
func (s *Service) getJobDef(jobUID string, input Input) (job.Definition, error) {
data, err := json.Marshal(input)
if err != nil {
return job.Definition{}, fmt.Errorf("failed to marshal job input json: %w", err)
}
strData := strings.TrimSpace(string(data))
encryptedData, err := s.encrypter.Encrypt(strData)
if err != nil {
return job.Definition{}, fmt.Errorf("failed to encrypt job input: %w", err)
}
return job.Definition{
UID: jobUID,
Type: jobType,
MaxRetries: moveJobMaxRetries,
Timeout: moveJobMaxDuration,
Data: base64.StdEncoding.EncodeToString(encryptedData),
}, nil
}
func (s *Service) getJobInput(data string) (Input, error) {
encrypted, err := base64.StdEncoding.DecodeString(data)
if err != nil {
return Input{}, fmt.Errorf("failed to base64 decode job input: %w", err)
}
decrypted, err := s.encrypter.Decrypt(encrypted)
if err != nil {
return Input{}, fmt.Errorf("failed to decrypt job input: %w", err)
}
var input Input
err = json.NewDecoder(strings.NewReader(decrypted)).Decode(&input)
if err != nil {
return Input{}, fmt.Errorf("failed to unmarshal job input json: %w", err)
}
return input, nil
}
func (s *Service) JobUIDFromSpacePath(srcSpacePath string) string {
return jobUIDPrefix + srcSpacePath
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/usage/mocks.go | app/services/usage/mocks.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package usage
import (
"context"
"github.com/harness/gitness/types"
)
const (
sampleText = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789 "
sampleLength = len(sampleText)
spaceRef = "space1%2fspace2%2fspace3"
)
type mockInterface struct {
SendFunc func(
ctx context.Context,
payload Metric,
) error
}
func (i *mockInterface) Send(
ctx context.Context,
payload Metric,
) error {
return i.SendFunc(ctx, payload)
}
type SpaceFinderMock struct {
FindByRefFn func(
ctx context.Context,
spaceRef string,
) (*types.SpaceCore, error)
}
func (s *SpaceFinderMock) FindByRef(
ctx context.Context,
spaceRef string,
) (*types.SpaceCore, error) {
return s.FindByRefFn(ctx, spaceRef)
}
type RepoFinderMock struct {
FindByIDFn func(
ctx context.Context,
id int64,
) (*types.RepositoryCore, error)
}
func (r *RepoFinderMock) FindByID(
ctx context.Context,
id int64,
) (*types.RepositoryCore, error) {
return r.FindByIDFn(ctx, id)
}
type MetricsMock struct {
UpsertOptimisticFn func(ctx context.Context, in *types.UsageMetric) error
GetMetricsFn func(
ctx context.Context,
rootSpaceID int64,
startDate int64,
endDate int64,
) (*types.UsageMetric, error)
ListFn func(
ctx context.Context,
start int64,
end int64,
) ([]types.UsageMetric, error)
}
func (m *MetricsMock) GetMetrics(
ctx context.Context,
rootSpaceID int64,
startDate int64,
endDate int64,
) (*types.UsageMetric, error) {
return m.GetMetricsFn(ctx, rootSpaceID, startDate, endDate)
}
func (m *MetricsMock) UpsertOptimistic(
ctx context.Context,
in *types.UsageMetric,
) error {
return m.UpsertOptimisticFn(ctx, in)
}
func (m *MetricsMock) List(
ctx context.Context,
start int64,
end int64,
) ([]types.UsageMetric, error) {
return m.ListFn(ctx, start, end)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/usage/wire.go | app/services/usage/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package usage
import (
"context"
"fmt"
repoevents "github.com/harness/gitness/app/events/repo"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideMediator,
)
func ProvideMediator(
ctx context.Context,
config *types.Config,
spaceFinder refcache.SpaceFinder,
repoFinder refcache.RepoFinder,
metricsStore store.UsageMetricStore,
repoEvReaderFactory *events.ReaderFactory[*repoevents.Reader],
) (Sender, error) {
if !config.UsageMetrics.Enabled {
return &Noop{}, nil
}
m := NewMediator(
ctx,
spaceFinder,
metricsStore,
NewConfig(config),
)
if err := RegisterEventListeners(ctx, config.InstanceID, m, repoEvReaderFactory, repoFinder); err != nil {
return nil, fmt.Errorf("failed to register event listeners: %w", err)
}
return m, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/usage/middleware.go | app/services/usage/middleware.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package usage
import (
"net/http"
"github.com/harness/gitness/app/api/request"
"github.com/harness/gitness/app/paths"
"github.com/rs/zerolog/log"
)
func Middleware(intf Sender) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ref, err := request.GetRepoRefFromPath(r)
if err != nil {
log.Ctx(r.Context()).Warn().Err(err).Msg("unable to get space ref")
next.ServeHTTP(w, r)
return
}
rootSpace, _, err := paths.DisectRoot(ref)
if err != nil {
log.Ctx(r.Context()).Warn().Err(err).Msg("unable to get root space")
next.ServeHTTP(w, r)
return
}
writer := newWriter(w)
reader := newReader(r.Body)
r.Body = reader
next.ServeHTTP(writer, r)
// send usage metrics
m := Metric{
SpaceRef: rootSpace,
Bandwidth: Bandwidth{
Out: writer.n,
In: reader.n,
},
}
err = intf.Send(r.Context(), m)
if err != nil {
log.Ctx(r.Context()).Warn().Err(err).Msg("unable to send usage metric")
return
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/usage/middleware_test.go | app/services/usage/middleware_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package usage
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/harness/gitness/app/api/request"
"github.com/go-chi/chi/v5"
"github.com/stretchr/testify/require"
)
func TestMiddleware(t *testing.T) {
var m Metric
mock := &mockInterface{
SendFunc: func(_ context.Context, payload Metric) error {
m.Out += payload.Out
m.In += payload.In
return nil
},
}
r := chi.NewRouter()
r.Route(fmt.Sprintf("/testing/{%s}", request.PathParamRepoRef), func(r chi.Router) {
r.Use(Middleware(mock))
r.Post("/", func(w http.ResponseWriter, r *http.Request) {
// read from body
_, _ = io.Copy(io.Discard, r.Body)
// write to response
_, _ = w.Write([]byte(sampleText))
})
})
ts := httptest.NewServer(r)
defer ts.Close()
body := []byte(sampleText)
_ = testRequest(t, ts, http.MethodPost, "/testing/"+spaceRef, bytes.NewReader(body))
require.Equal(t, int64(sampleLength), m.Out)
require.Equal(t, int64(sampleLength), m.In)
}
func testRequest(t *testing.T, ts *httptest.Server, method, path string, body io.Reader) string {
t.Helper()
req, err := http.NewRequest(method, ts.URL+path, body) //nolint: noctx
if err != nil {
t.Fatal(err)
return ""
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
return ""
}
defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
return ""
}
return string(respBody)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/usage/io_test.go | app/services/usage/io_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package usage
import (
"bytes"
"io"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/require"
)
func Test_writeCounter_Write(t *testing.T) {
size := 1 << 16
// Create a buffer to hold the payload.
buffer := httptest.NewRecorder()
writer := newWriter(buffer)
expected := &bytes.Buffer{}
for i := 0; i < size; i += sampleLength {
if size-i < sampleLength {
// Write only the remaining characters to reach the exact size.
_, _ = writer.Write([]byte(sampleText[:size-i]))
expected.WriteString(sampleText[:size-i])
break
}
_, _ = writer.Write([]byte(sampleText))
expected.WriteString(sampleText)
}
require.Equal(t, int64(size), writer.n, "expected %d, got %d", size, writer.n)
require.Equal(t, expected.Bytes(), buffer.Body.Bytes())
}
func Test_readCounter_Read(t *testing.T) {
size := 1 << 16
buffer := &bytes.Buffer{}
reader := newReader(io.NopCloser(buffer))
for i := 0; i < size; i += sampleLength {
if size-i < sampleLength {
// Write only the remaining characters to reach the exact size.
buffer.WriteString(sampleText[:size-i])
break
}
buffer.WriteString(sampleText)
}
expected := buffer.Bytes()
got := &bytes.Buffer{}
_, err := io.Copy(got, reader)
require.NoError(t, err)
require.Equal(t, int64(size), reader.n, "expected %d, got %d", size, reader.n)
require.Equal(t, expected, got.Bytes())
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/usage/usage_test.go | app/services/usage/usage_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package usage
import (
"context"
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
repoevents "github.com/harness/gitness/app/events/repo"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types"
"github.com/stretchr/testify/require"
)
func TestMediator_basic(t *testing.T) {
space := &types.SpaceCore{
ID: 1,
Identifier: "space",
}
spaceFinderMock := &SpaceFinderMock{
FindByRefFn: func(context.Context, string) (*types.SpaceCore, error) {
return space, nil
},
}
repo := &types.RepositoryCore{
ID: 2,
Path: "space/repo",
}
repoFinderMock := &RepoFinderMock{
FindByIDFn: func(_ context.Context, id int64) (*types.RepositoryCore, error) {
if id != repo.ID {
return nil, fmt.Errorf("expected id to be %d, got %d", repo.ID, id)
}
return repo, nil
},
}
eventSystem, err := events.ProvideSystem(events.Config{
Mode: events.ModeInMemory,
MaxStreamLength: 100,
}, nil)
if err != nil {
t.Fatalf("failed to create event system: %v", err)
}
repoEvReaderFactory, err := repoevents.NewReaderFactory(eventSystem)
if err != nil {
t.Fatalf("failed to create repo event reader factory: %v", err)
}
repoEvReporter, err := repoevents.NewReporter(eventSystem)
if err != nil {
t.Fatalf("failed to create repo event reporter: %v", err)
}
out := atomic.Int64{}
in := atomic.Int64{}
pushes := atomic.Int64{}
usageMock := &MetricsMock{
UpsertOptimisticFn: func(_ context.Context, metric *types.UsageMetric) error {
if metric.RootSpaceID != space.ID {
return fmt.Errorf("expected root space id to be %d, got %d", space.ID, metric.RootSpaceID)
}
out.Add(metric.BandwidthOut)
in.Add(metric.BandwidthIn)
pushes.Add(metric.Pushes)
return nil
},
GetMetricsFn: func(
context.Context,
int64, // spaceID
int64, // startDate
int64, // endDate
) (*types.UsageMetric, error) {
return &types.UsageMetric{
BandwidthOut: out.Load(),
BandwidthIn: in.Load(),
}, nil
},
ListFn: func(context.Context, int64, int64) ([]types.UsageMetric, error) {
return []types.UsageMetric{}, nil
},
}
numBandwidthRoutines := 10
numEventsCreated := 4
numEventsPushed := 5
defaultSize := 512
mediator := NewMediator(
context.Background(),
spaceFinderMock,
usageMock,
Config{
MaxWorkers: 5,
},
)
err = RegisterEventListeners(context.Background(), "test", mediator, repoEvReaderFactory, repoFinderMock)
if err != nil {
t.Fatalf("failed to register event listeners: %v", err)
}
wg := sync.WaitGroup{}
for range numBandwidthRoutines {
wg.Add(1)
go func() {
defer wg.Done()
_ = mediator.Send(context.Background(), Metric{
SpaceRef: space.Identifier,
Bandwidth: Bandwidth{
Out: int64(defaultSize),
In: int64(defaultSize),
},
})
}()
}
wg.Wait()
for range numEventsCreated {
repoEvReporter.Created(context.Background(), &repoevents.CreatedPayload{
Base: repoevents.Base{
RepoID: repo.ID,
},
})
}
for range numEventsPushed {
repoEvReporter.Pushed(context.Background(), &repoevents.PushedPayload{
Base: repoevents.Base{
RepoID: repo.ID,
},
})
}
// todo: add ability to wait for event system to complete
time.Sleep(200 * time.Millisecond)
mediator.Wait()
require.Equal(t, int64(numBandwidthRoutines*defaultSize), out.Load())
require.Equal(t, int64(numBandwidthRoutines*defaultSize), in.Load())
require.Equal(t, int64(numEventsCreated+numEventsPushed), pushes.Load())
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/usage/config.go | app/services/usage/config.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package usage
import (
"github.com/harness/gitness/types"
)
type Config struct {
MaxWorkers int
}
func NewConfig(global *types.Config) Config {
cfg := Config{
MaxWorkers: global.UsageMetrics.MaxWorkers,
}
if cfg.MaxWorkers == 0 {
cfg.MaxWorkers = 5
}
return cfg
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/usage/interface.go | app/services/usage/interface.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package usage
import "context"
type Sender interface {
Send(ctx context.Context, payload Metric) error
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/usage/io.go | app/services/usage/io.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package usage
import (
"io"
"net/http"
)
type writeCounter struct {
w http.ResponseWriter
n int64
}
func newWriter(w http.ResponseWriter) *writeCounter {
return &writeCounter{
w: w,
}
}
func (c *writeCounter) Write(data []byte) (n int, err error) {
n, err = c.w.Write(data)
c.n += int64(n)
return n, err
}
func (c *writeCounter) Header() http.Header {
return c.w.Header()
}
func (c *writeCounter) WriteHeader(statusCode int) {
c.w.WriteHeader(statusCode)
}
type readCounter struct {
n int64
r io.ReadCloser
}
func newReader(r io.ReadCloser) *readCounter {
return &readCounter{
r: r,
}
}
func (c *readCounter) Read(p []byte) (int, error) {
n, err := c.r.Read(p)
c.n += int64(n)
return n, err
}
func (c *readCounter) Close() error {
return c.r.Close()
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/usage/middleware_int_test.go | app/services/usage/middleware_int_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build integration
// +build integration
package usage_test
import (
"bytes"
"context"
"fmt"
"io"
"mime/multipart"
"net/http"
"sync"
"testing"
"time"
"github.com/harness/gitness/app/api/request"
"github.com/harness/gitness/app/services/usage"
"github.com/harness/gitness/types"
"github.com/go-chi/chi/v5"
"github.com/stretchr/testify/assert"
)
const (
numRequests = 50
fileSize = 100 * 1024 * 1024 // 100 MB
url = "http://localhost:8080"
spaceRef = "root"
)
var httpClient = &http.Client{
Transport: &http.Transport{
MaxIdleConns: 100, // Allow up to 100 idle connections
MaxConnsPerHost: 100, // Maximum concurrent connections per host
IdleConnTimeout: 30 * time.Second, // Keep idle connections open for reuse
DisableKeepAlives: false, // Allow connection reuse
},
}
// Helper function to generate random file data
func generateRandomData(size int) []byte {
data := make([]byte, size)
for i := range data {
data[i] = byte(i % 256)
}
return data
}
// Simulates an upload request with proper multipart form boundary
func simulateUploadRequest(t *testing.T) {
fileData := generateRandomData(fileSize)
// Create multipart form file
body := &bytes.Buffer{}
writer := multipart.NewWriter(body) // Create a multipart writer
part, err := writer.CreateFormFile("file", "testing.dat")
if err != nil {
t.Fatalf("Failed to create form file: %v", err)
}
_, _ = part.Write(fileData)
_ = writer.Close() // Must close the writer to finalize the boundary
// Create request and set Content-Type properly
req, err := http.NewRequest(http.MethodPost, url+"/testing/"+spaceRef, body)
if err != nil {
t.Fatalf("Failed to create upload request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType()) // Correctly sets boundary
resp, err := httpClient.Do(req)
if err != nil {
t.Fatalf("Upload request failed: %v", err)
}
defer func() {
_ = resp.Body.Close()
}()
bodyResp, _ := io.ReadAll(resp.Body)
assert.Equal(t, http.StatusOK, resp.StatusCode, "Expected HTTP 200 for upload")
assert.Contains(t, string(bodyResp), "File uploaded successfully", "Upload should be successful")
}
// Simulate a download request
func simulateDownloadRequest(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, url+"/testing/"+spaceRef, nil)
if err != nil {
t.Fatalf("Failed to create download request: %v", err)
}
resp, err := httpClient.Do(req)
if err != nil {
t.Fatalf("Failed to send download request: %v", err)
}
defer func() {
_ = resp.Body.Close()
}()
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, http.StatusOK, resp.StatusCode, "Expected HTTP 200 OK for download")
assert.NotEmpty(t, body, "Expected non-empty response body")
}
// File upload handler
func uploadHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Invalid request method", http.StatusMethodNotAllowed)
return
}
// Simulate file processing
file, _, err := r.FormFile("file")
if err != nil {
http.Error(w, "Failed to read file", http.StatusBadRequest)
return
}
defer func() {
_ = file.Close()
}()
// For testing, we're just reading the file content (simulation)
_, err = io.ReadAll(file)
if err != nil {
http.Error(w, "Failed to process file", http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("File uploaded successfully"))
}
// File download handler (simulating a simple file download)
func downloadHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Invalid request method", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Content-Type", "application/octet-stream")
_, _ = w.Write([]byte("This is a dummy file content"))
}
// Test function to run multiple uploads and downloads concurrently
func TestUploadDownloadMiddleware(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
spaceStore := &usage.SpaceStoreMock{
FindByRefFn: func(ctx context.Context, spaceRef string) (*types.Space, error) {
return &types.Space{
ID: 1,
Version: 1,
ParentID: 0,
Path: "",
Identifier: "root",
Description: "",
CreatedBy: 0,
Created: 0,
Updated: 0,
Deleted: nil,
}, nil
},
FindByIDsFn: func(ctx context.Context, spaceIDs ...int64) ([]*types.Space, error) {
return []*types.Space{}, nil
},
}
metricsMock := &usage.MetricsMock{
UpsertOptimisticFn: func(ctx context.Context, in *types.UsageMetric) error {
time.Sleep(100 * time.Millisecond)
return nil
},
GetMetricsFn: func(ctx context.Context, rootSpaceID int64, startDate int64, endDate int64) (*types.UsageMetric, error) {
return &types.UsageMetric{}, nil
},
ListFn: func(ctx context.Context, start int64, end int64) ([]types.UsageMetric, error) {
return []types.UsageMetric{}, nil
},
}
mediator := usage.NewMediator(ctx, spaceStore, metricsMock, usage.Config{})
// Start the server in a goroutine
go func() {
r := chi.NewRouter()
r.Get("/health", func(writer http.ResponseWriter, r *http.Request) {
writer.WriteHeader(http.StatusOK)
})
r.Route(fmt.Sprintf("/testing/{%s}", request.PathParamRepoRef), func(r chi.Router) {
r.Use(usage.Middleware(mediator, true))
r.Post("/", uploadHandler)
r.Get("/", downloadHandler)
})
t.Log(http.ListenAndServe(":8080", r))
}()
// Allow the server to start before running tests
waitServer(t)
// Run the upload and download requests in parallel
t.Run("UploadDownloadTest", func(t *testing.T) {
t.Parallel() // Run tests in parallel
// Create a WaitGroup for syncing concurrent requests
var wg sync.WaitGroup
for i := 0; i < numRequests; i++ {
wg.Add(2)
// Simulate upload request
go func() {
defer wg.Done()
simulateUploadRequest(t)
}()
// Simulate download request
go func() {
defer wg.Done()
simulateDownloadRequest(t)
}()
}
// Wait for all requests to finish
wg.Wait()
})
}
func waitServer(t *testing.T) {
t.Helper()
req, err := http.NewRequest(http.MethodGet, url+"/health", nil)
if err != nil {
t.Fatalf("failed to create health request: %v", err)
return
}
for attempt := 1; attempt <= 5; attempt++ {
resp, err := httpClient.Do(req)
if err != nil {
t.Logf("Failed to send health request after %d attempt with error: %v", attempt, err)
time.Sleep(time.Duration(attempt) * time.Second)
continue
}
defer func() {
_ = resp.Body.Close()
}()
if resp.StatusCode != http.StatusOK {
t.Logf("Expected HTTP 200 OK, got %d, attempt=%d, retrying...", resp.StatusCode, attempt)
continue
}
// If it's a success break out of the loop
break
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/usage/event_handlers.go | app/services/usage/event_handlers.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package usage
import (
"context"
"fmt"
"time"
repoevents "github.com/harness/gitness/app/events/repo"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/events"
"github.com/harness/gitness/stream"
"github.com/harness/gitness/types"
)
type RepoFinder interface {
FindByID(ctx context.Context, id int64) (*types.RepositoryCore, error)
}
func RegisterEventListeners(
ctx context.Context,
instanceID string,
sender Sender,
repoEvReaderFactory *events.ReaderFactory[*repoevents.Reader],
repoFinder RepoFinder,
) error {
// repo events
const groupRepo = "gitness:usage:repo"
_, err := repoEvReaderFactory.Launch(ctx, groupRepo, instanceID,
func(r *repoevents.Reader) error {
const idleTimeout = 10 * time.Second
r.Configure(
stream.WithConcurrency(1),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(2),
))
_ = r.RegisterCreated(repoCreateHandler(sender, repoFinder))
_ = r.RegisterPushed(repoPushHandler(sender, repoFinder))
return nil
})
if err != nil {
return fmt.Errorf("failed to launch repo event reader: %w", err)
}
return nil
}
func repoCreateHandler(sender Sender, repoFinder RepoFinder) events.HandlerFunc[*repoevents.CreatedPayload] {
return func(ctx context.Context, event *events.Event[*repoevents.CreatedPayload]) error {
return sendRepoPushUsage(ctx, sender, repoFinder, event.Payload.RepoID)
}
}
func repoPushHandler(sender Sender, repoFinder RepoFinder) events.HandlerFunc[*repoevents.PushedPayload] {
return func(ctx context.Context, event *events.Event[*repoevents.PushedPayload]) error {
return sendRepoPushUsage(ctx, sender, repoFinder, event.Payload.RepoID)
}
}
func sendRepoPushUsage(ctx context.Context, sender Sender, repoFinder RepoFinder, repoID int64) error {
repo, err := repoFinder.FindByID(ctx, repoID)
if err != nil {
return fmt.Errorf("failed to find repo with id %d: %w", repoID, err)
}
rootSpace, _, err := paths.DisectRoot(repo.Path)
if err != nil {
return fmt.Errorf("failed to disect repo path %q: %w", repo.Path, err)
}
m := Metric{
SpaceRef: rootSpace,
Pushes: 1,
}
if err := sender.Send(ctx, m); err != nil {
return fmt.Errorf("failed to send usage metric: %w", err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/usage/usage.go | app/services/usage/usage.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package usage
import (
"context"
"fmt"
"sync"
"time"
"github.com/harness/gitness/types"
"github.com/rs/zerolog/log"
)
var (
days30 = time.Duration(30*24) * time.Hour
)
type Bandwidth struct {
Out int64
In int64
}
type Metric struct {
Time time.Time
SpaceRef string
Bandwidth
StorageTotal int64
LFSStorageTotal int64
Pushes int64
}
type SpaceFinder interface {
FindByRef(ctx context.Context, spaceRef string) (*types.SpaceCore, error)
}
type MetricStore interface {
UpsertOptimistic(ctx context.Context, in *types.UsageMetric) error
GetMetrics(
ctx context.Context,
rootSpaceID int64,
startDate int64,
endDate int64,
) (*types.UsageMetric, error)
List(
ctx context.Context,
start int64,
end int64,
) ([]types.UsageMetric, error)
}
type Mediator struct {
queue *queue
workers []*worker
spaceFinder SpaceFinder
metricsStore MetricStore
wg sync.WaitGroup
config Config
}
func NewMediator(
ctx context.Context,
spaceFinder SpaceFinder,
usageMetricsStore MetricStore,
config Config,
) *Mediator {
m := &Mediator{
queue: newQueue(),
spaceFinder: spaceFinder,
metricsStore: usageMetricsStore,
workers: make([]*worker, config.MaxWorkers),
config: config,
}
m.Start(ctx)
return m
}
func (m *Mediator) Start(ctx context.Context) {
for i := range m.workers {
w := newWorker(i, m.queue)
go w.start(ctx, m.process)
m.workers[i] = w
}
}
func (m *Mediator) Stop() {
for i := range m.workers {
m.workers[i].stop()
}
}
func (m *Mediator) Send(ctx context.Context, payload Metric) error {
m.wg.Add(1)
m.queue.Add(ctx, payload)
return nil
}
func (m *Mediator) Wait() {
m.wg.Wait()
}
func (m *Mediator) Size(ctx context.Context, spaceRef string) (Bandwidth, error) {
space, err := m.spaceFinder.FindByRef(ctx, spaceRef)
if err != nil {
return Bandwidth{}, fmt.Errorf("could not find space: %w", err)
}
now := time.Now()
metric, err := m.metricsStore.GetMetrics(ctx, space.ID, now.Add(-days30).UnixMilli(), now.UnixMilli())
if err != nil {
return Bandwidth{}, err
}
return Bandwidth{
Out: metric.BandwidthOut,
In: metric.BandwidthIn,
}, nil
}
func (m *Mediator) process(ctx context.Context, payload *Metric) {
defer m.wg.Done()
space, err := m.spaceFinder.FindByRef(ctx, payload.SpaceRef)
if err != nil {
log.Ctx(ctx).Err(err).Msg("failed to find space")
return
}
if err = m.metricsStore.UpsertOptimistic(ctx, &types.UsageMetric{
Date: payload.Time,
RootSpaceID: space.ID,
BandwidthOut: payload.Out,
BandwidthIn: payload.In,
StorageTotal: payload.StorageTotal,
LFSStorageTotal: payload.LFSStorageTotal,
Pushes: payload.Pushes,
}); err != nil {
log.Ctx(ctx).Err(err).Msg("failed to upsert usage metrics")
}
}
type worker struct {
id int
queue *queue
stopCh chan struct{}
}
func newWorker(id int, queue *queue) *worker {
return &worker{
id: id,
queue: queue,
stopCh: make(chan struct{}),
}
}
func (w *worker) start(ctx context.Context, fn func(context.Context, *Metric)) {
log.Ctx(ctx).Info().Int("usage-worker", w.id).Msg("usage metrics starting worker")
for {
select {
case <-ctx.Done():
log.Ctx(ctx).Err(ctx.Err()).Msg("context canceled")
return
case <-w.stopCh:
log.Ctx(ctx).Warn().Int("usage-worker", w.id).Msg("worker is stopped")
return
default:
payload, err := w.queue.Pop(ctx)
if err != nil {
log.Ctx(ctx).Err(err).Int("usage-worker", w.id).Msg("failed to consume the queue")
return
}
fn(ctx, payload)
}
}
}
func (w *worker) stop() {
defer close(w.stopCh)
w.stopCh <- struct{}{}
}
type Noop struct{}
func (n *Noop) Send(
context.Context,
Metric,
) error {
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/usage/queue.go | app/services/usage/queue.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package usage
import (
"context"
"github.com/rs/zerolog/log"
)
type queue struct {
ch chan Metric
}
func newQueue() *queue {
return &queue{
ch: make(chan Metric, 256),
}
}
func (q *queue) Add(ctx context.Context, payload Metric) {
select {
case <-ctx.Done():
return
case q.ch <- payload:
default:
// queue is full then wait in new go routine
// until one of consumer read from channel,
// we dont want to block caller goroutine
log.Ctx(ctx).Warn().Msg("usage metric queue full")
go func() {
q.ch <- payload
}()
}
}
func (q *queue) Pop(ctx context.Context) (*Metric, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
case payload := <-q.ch:
return &payload, nil
}
}
func (q *queue) Close() {
close(q.ch)
}
func (q *queue) Len() int {
return len(q.ch)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/migrate/wire.go | app/services/migrate/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migrate
import (
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/services/webhook"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/git"
"github.com/harness/gitness/lock"
"github.com/harness/gitness/store/database/dbtx"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvidePullReqImporter,
ProvideRuleImporter,
ProvideWebhookImporter,
ProvideLabelImporter,
)
func ProvidePullReqImporter(
urlProvider url.Provider,
git git.Interface,
principalStore store.PrincipalStore,
spaceStore store.SpaceStore,
repoStore store.RepoStore,
pullReqStore store.PullReqStore,
pullReqActStore store.PullReqActivityStore,
labelStore store.LabelStore,
labelValueStore store.LabelValueStore,
pullReqLabelAssignmentStore store.PullReqLabelAssignmentStore,
pullReqReviewerStore store.PullReqReviewerStore,
pullReqReviewStore store.PullReqReviewStore,
repoFinder refcache.RepoFinder,
tx dbtx.Transactor,
mtxManager lock.MutexManager,
) *PullReq {
return NewPullReq(
urlProvider, git, principalStore, spaceStore, repoStore, pullReqStore, pullReqActStore,
labelStore, labelValueStore, pullReqLabelAssignmentStore, pullReqReviewerStore, pullReqReviewStore,
repoFinder, tx, mtxManager)
}
func ProvideRuleImporter(
ruleStore store.RuleStore,
tx dbtx.Transactor,
principalStore store.PrincipalStore,
) *Rule {
return NewRule(ruleStore, tx, principalStore)
}
func ProvideWebhookImporter(
config webhook.Config,
tx dbtx.Transactor,
webhookStore store.WebhookStore,
) *Webhook {
return NewWebhook(config, tx, webhookStore)
}
func ProvideLabelImporter(
tx dbtx.Transactor,
labelStore store.LabelStore,
labelValueStore store.LabelValueStore,
spaceStore store.SpaceStore,
) *Label {
return NewLabel(labelStore, labelValueStore, spaceStore, tx)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/migrate/webhook_type.go | app/services/migrate/webhook_type.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migrate
import migratetypes "github.com/harness/harness-migrate/types"
type (
ExternalWebhook = migratetypes.Hook
)
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/migrate/webhook.go | app/services/migrate/webhook.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migrate
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/services/webhook"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/check"
"github.com/harness/gitness/types/enum"
)
// Webhook is webhook migrate.
type Webhook struct {
// webhook configs
allowLoopback bool
allowPrivateNetwork bool
tx dbtx.Transactor
webhookStore store.WebhookStore
}
func NewWebhook(
config webhook.Config,
tx dbtx.Transactor,
webhookStore store.WebhookStore,
) *Webhook {
return &Webhook{
allowLoopback: config.AllowLoopback,
allowPrivateNetwork: config.AllowPrivateNetwork,
tx: tx,
webhookStore: webhookStore,
}
}
func (migrate Webhook) Import(
ctx context.Context,
migrator types.Principal,
repo *types.RepositoryCore,
extWebhooks []*ExternalWebhook,
) ([]*types.Webhook, error) {
now := time.Now().UnixMilli()
hooks := make([]*types.Webhook, len(extWebhooks))
// sanitize and convert webhooks
for i, whook := range extWebhooks {
triggers := webhook.ConvertTriggers(whook.Events)
err := sanitizeWebhook(whook, triggers, migrate.allowLoopback, migrate.allowPrivateNetwork)
if err != nil {
return nil, fmt.Errorf("failed to sanitize external webhook input: %w", err)
}
// create new webhook object
hook := &types.Webhook{
ID: 0, // the ID will be populated in the data layer
Version: 0, // the Version will be populated in the data layer
CreatedBy: migrator.ID,
Created: now,
Updated: now,
ParentID: repo.ID,
ParentType: enum.WebhookParentRepo,
// user input
Identifier: whook.Identifier,
DisplayName: whook.Identifier,
URL: whook.Target,
Enabled: whook.Active,
Insecure: whook.SkipVerify,
Triggers: webhook.DeduplicateTriggers(triggers),
LatestExecutionResult: nil,
}
hooks[i] = hook
}
err := migrate.tx.WithTx(ctx, func(ctx context.Context) error {
for _, hook := range hooks {
err := migrate.webhookStore.Create(ctx, hook)
if err != nil {
return fmt.Errorf("failed to store webhook: %w", err)
}
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to store external webhooks: %w", err)
}
return hooks, nil
}
func sanitizeWebhook(
in *ExternalWebhook,
triggers []enum.WebhookTrigger,
allowLoopback bool,
allowPrivateNetwork bool,
) error {
if err := check.Identifier(in.Identifier); err != nil {
return err
}
if err := webhook.CheckURL(in.Target, allowLoopback, allowPrivateNetwork, false); err != nil {
return err
}
if err := webhook.CheckTriggers(triggers); err != nil { //nolint:revive
return err
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/migrate/label_types.go | app/services/migrate/label_types.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migrate
import migratetypes "github.com/harness/harness-migrate/types"
type ExternalLabel = migratetypes.Label
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/migrate/pullreq_test.go | app/services/migrate/pullreq_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migrate
import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
)
func TestGenerateThreads(t *testing.T) {
// comments with treelike structure
t0 := time.Now()
comments := []ExternalComment{
/* 0 */ {ID: 1, Body: "A", ParentID: 0},
/* 1 */ {ID: 2, Body: "B", ParentID: 0},
/* 2 */ {ID: 3, Body: "A1", ParentID: 1},
/* 3 */ {ID: 4, Body: "B1", ParentID: 2},
/* 4 */ {ID: 5, Body: "A2", ParentID: 1},
/* 5 */ {ID: 6, Body: "A2X", ParentID: 5},
/* 6 */ {ID: 7, Body: "A1X", ParentID: 3},
/* 7 */ {ID: 8, Body: "B1X", ParentID: 4},
/* 8 */ {ID: 9, Body: "C", ParentID: 0},
/* 9 */ {ID: 10, Body: "D1", ParentID: 11}, // Wrong order - a reply before its parent
/* 10 */ {ID: 11, Body: "D", ParentID: 0},
{ID: 20, Body: "Self-parent", ParentID: 20}, // Invalid
{ID: 30, Body: "Crosslinked-X", ParentID: 31}, // Invalid
{ID: 31, Body: "Crosslinked-Y", ParentID: 30}, // Invalid
}
for i := range comments {
comments[i].Created = t0.Add(time.Duration(i) * time.Minute)
}
// flattened threads with top level comments and a list of replies to each of them
wantThreads := []*externalCommentThread{
{
TopLevel: comments[0], // A
Replies: []ExternalComment{comments[2], comments[4], comments[5], comments[6]}, // A1, A2, A2X, A1X
},
{
TopLevel: comments[1], // B
Replies: []ExternalComment{comments[3], comments[7]}, // B1, B1X
},
{
TopLevel: comments[8], // C
Replies: []ExternalComment{},
},
{
TopLevel: comments[10], // D
Replies: []ExternalComment{comments[9]}, // D1
},
}
gotThreads := generateThreads(comments)
if diff := cmp.Diff(gotThreads, wantThreads); diff != "" {
t.Error(diff)
}
}
func TestTimestampMillis(t *testing.T) {
tests := []struct {
name string
input time.Time
fallback int64
want int64
}{
{
name: "valid time",
input: time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC),
fallback: 0,
want: time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC).UnixMilli(),
},
{
name: "zero time",
input: time.Time{},
fallback: 123456789,
want: 123456789,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := timestampMillis(tt.input, tt.fallback)
if got != tt.want {
t.Errorf("timestampMillis() = %v, want %v", got, tt.want)
}
})
}
}
// TestActivitySeqOrdering tests that ActivitySeq is properly incremented across.
// reviewer activities, review activities, and comments to prevent UNIQUE constraint violations.
func TestActivitySeqOrdering(t *testing.T) {
tests := []struct {
name string
reviewerCount int
reviewCount int
commentCount int
wantMinSeq int64 // minimum ActivitySeq after all activities
}{
{
name: "single reviewer, single review, single comment",
reviewerCount: 1,
reviewCount: 1,
commentCount: 1,
wantMinSeq: 3, // 1 reviewer activity + 1 review activity + 1 comment
},
{
name: "multiple reviewers, multiple reviews, multiple comments",
reviewerCount: 3,
reviewCount: 2,
commentCount: 5,
wantMinSeq: 8, // 1 reviewer activity (batched) + 2 review activities + 5 comments
},
{
name: "no reviewers, multiple reviews and comments",
reviewerCount: 0,
reviewCount: 3,
commentCount: 2,
wantMinSeq: 5, // 0 reviewer activities + 3 review activities + 2 comments
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Simulate ActivitySeq progression as it would happen in migration
activitySeq := int64(0)
// Reviewer activity (batched for all reviewers)
if tt.reviewerCount > 0 {
activitySeq++ // One activity for all reviewers
}
// Review activities (one per review)
activitySeq += int64(tt.reviewCount)
// Comment activities (starts from current ActivitySeq + 1)
// Comments use: order := int(pullReq.ActivitySeq) + idxTopLevel + 1
if tt.commentCount > 0 {
finalCommentOrder := activitySeq + int64(tt.commentCount)
activitySeq = finalCommentOrder
}
if activitySeq < tt.wantMinSeq {
t.Errorf("ActivitySeq ordering failed: got %d, want at least %d", activitySeq, tt.wantMinSeq)
}
})
}
}
// TestReviewerActivityPayloadStructure tests that reviewer activity payloads.
// contain the expected fields to prevent marshaling/unmarshaling issues.
func TestReviewerActivityPayloadStructure(t *testing.T) {
// This test ensures the payload structure matches what CreateWithPayload expects
reviewerIDs := []int64{123, 456, 789}
// Simulate creating the payload as done in createReviewerActivity
payload := struct {
ReviewerType string `json:"reviewer_type"`
PrincipalIDs []int64 `json:"principal_ids"`
}{
ReviewerType: "requested",
PrincipalIDs: reviewerIDs,
}
// Verify critical fields are populated
if payload.ReviewerType == "" {
t.Error("ReviewerType must not be empty")
}
if len(payload.PrincipalIDs) != len(reviewerIDs) {
t.Errorf("PrincipalIDs length mismatch: got %d, want %d", len(payload.PrincipalIDs), len(reviewerIDs))
}
for i, id := range reviewerIDs {
if payload.PrincipalIDs[i] != id {
t.Errorf("PrincipalID[%d] mismatch: got %d, want %d", i, payload.PrincipalIDs[i], id)
}
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/migrate/pullreq.go | app/services/migrate/pullreq.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migrate
import (
"context"
"encoding/json"
"fmt"
"sort"
"strings"
"time"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/git"
"github.com/harness/gitness/git/parser"
"github.com/harness/gitness/lock"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
// PullReq is pull request migrate.
type PullReq struct {
urlProvider url.Provider
git git.Interface
principalStore store.PrincipalStore
spaceStore store.SpaceStore
repoStore store.RepoStore
pullReqStore store.PullReqStore
pullReqActStore store.PullReqActivityStore
labelStore store.LabelStore
labelValueStore store.LabelValueStore
pullReqLabelAssignmentStore store.PullReqLabelAssignmentStore
pullReqReviewerStore store.PullReqReviewerStore
pullReqReviewStore store.PullReqReviewStore
repoFinder refcache.RepoFinder
tx dbtx.Transactor
mtxManager lock.MutexManager
}
func NewPullReq(
urlProvider url.Provider,
git git.Interface,
principalStore store.PrincipalStore,
spaceStore store.SpaceStore,
repoStore store.RepoStore,
pullReqStore store.PullReqStore,
pullReqActStore store.PullReqActivityStore,
labelStore store.LabelStore,
labelValueStore store.LabelValueStore,
pullReqLabelAssignmentStore store.PullReqLabelAssignmentStore,
pullReqReviewerStore store.PullReqReviewerStore,
pullReqReviewStore store.PullReqReviewStore,
repoFinder refcache.RepoFinder,
tx dbtx.Transactor,
mtxManager lock.MutexManager,
) *PullReq {
return &PullReq{
urlProvider: urlProvider,
git: git,
principalStore: principalStore,
spaceStore: spaceStore,
repoStore: repoStore,
pullReqStore: pullReqStore,
pullReqActStore: pullReqActStore,
labelStore: labelStore,
labelValueStore: labelValueStore,
pullReqLabelAssignmentStore: pullReqLabelAssignmentStore,
pullReqReviewerStore: pullReqReviewerStore,
pullReqReviewStore: pullReqReviewStore,
repoFinder: repoFinder,
tx: tx,
mtxManager: mtxManager,
}
}
type repoImportState struct {
git git.Interface
readParams git.ReadParams
principalStore store.PrincipalStore
spaceStore store.SpaceStore
pullReqStore store.PullReqStore
pullReqActivityStore store.PullReqActivityStore
labelStore store.LabelStore
labelValueStore store.LabelValueStore
pullReqLabelAssignmentStore store.PullReqLabelAssignmentStore
pullReqReviewerStore store.PullReqReviewerStore
pullReqReviewStore store.PullReqReviewStore
branchCheck map[string]*git.Branch
principals map[string]*types.Principal
unknownEmails map[int]map[string]bool
labels map[string]int64 // map for labels {"label.key":label.id,}
labelValues map[int64]map[string]*int64 // map for label values {label.id:{"value-key":value-id,}}
migrator types.Principal
scope int64 // depth of space used for labels
}
// Import load provided pull requests in go-scm format and imports them.
//
//nolint:gocognit
func (migrate PullReq) Import(
ctx context.Context,
migrator types.Principal,
repo *types.RepositoryCore,
extPullReqs []*ExternalPullRequest,
) ([]*types.PullReq, error) {
readParams := git.ReadParams{RepoUID: repo.GitUID}
repoState := repoImportState{
git: migrate.git,
readParams: readParams,
principalStore: migrate.principalStore,
spaceStore: migrate.spaceStore,
pullReqStore: migrate.pullReqStore,
pullReqActivityStore: migrate.pullReqActStore,
labelStore: migrate.labelStore,
labelValueStore: migrate.labelValueStore,
pullReqLabelAssignmentStore: migrate.pullReqLabelAssignmentStore,
pullReqReviewerStore: migrate.pullReqReviewerStore,
pullReqReviewStore: migrate.pullReqReviewStore,
branchCheck: map[string]*git.Branch{},
principals: map[string]*types.Principal{},
unknownEmails: map[int]map[string]bool{},
labels: map[string]int64{},
labelValues: map[int64]map[string]*int64{},
migrator: migrator,
scope: 0,
}
pullReqUnique := map[int]ExternalPullRequest{}
pullReqComments := map[*types.PullReq][]ExternalComment{}
pullReqs := make([]*types.PullReq, 0, len(extPullReqs))
// create the PR objects, one by one. Each pull request will mutate the repository object (to update the counters).
for _, extPullReqData := range extPullReqs {
extPullReq := &extPullReqData.PullRequest
if _, exists := pullReqUnique[extPullReq.Number]; exists {
return nil, errors.Conflictf("duplicate pull request number %d", extPullReq.Number)
}
pullReqUnique[extPullReq.Number] = *extPullReqData
pr, err := repoState.convertPullReq(ctx, repo, extPullReqData)
if err != nil {
return nil, fmt.Errorf("failed to import pull request %d: %w", extPullReq.Number, err)
}
pullReqs = append(pullReqs, pr)
pullReqComments[pr] = extPullReqData.Comments
}
if len(pullReqs) == 0 { // nothing to do: exit early to avoid accessing the database
return nil, nil
}
err := migrate.tx.WithTx(ctx, func(ctx context.Context) error {
var deltaOpen, deltaClosed, deltaMerged int
var maxNumber int64
for _, pullReq := range pullReqs {
if err := migrate.pullReqStore.Create(ctx, pullReq); err != nil {
return fmt.Errorf("failed to import the pull request %d: %w", pullReq.Number, err)
}
}
for _, pr := range pullReqs {
extPullReqData := pullReqUnique[int(pr.Number)]
_, err := repoState.createReviewers(ctx, repo, pr, extPullReqData.Reviewers)
if err != nil {
return fmt.Errorf("failed to create reviewers for PR %d: %w", pr.Number, err)
}
_, err = repoState.createReviews(ctx, repo, pr, extPullReqData.Reviews)
if err != nil {
return fmt.Errorf("failed to create reviews for PR %d: %w", pr.Number, err)
}
}
for _, pullReq := range pullReqs {
switch pullReq.State {
case enum.PullReqStateOpen:
deltaOpen++
case enum.PullReqStateClosed:
deltaClosed++
case enum.PullReqStateMerged:
deltaMerged++
}
if maxNumber < pullReq.Number {
maxNumber = pullReq.Number
}
comments, err := repoState.createComments(ctx, repo, pullReq, pullReqComments[pullReq])
if err != nil {
return fmt.Errorf("failed to import pull request comments: %w", err)
}
// Add a comment if any principal (PR author or commenter) were replaced by the fallback migrator principal
if prUnknownEmails, ok := repoState.unknownEmails[int(pullReq.Number)]; ok && len(prUnknownEmails) != 0 {
infoComment, err := repoState.createInfoComment(ctx, repo, pullReq)
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msg("failed to add an informational comment for replacing non-existing users")
} else {
comments = append(comments, infoComment)
}
}
prLabels := pullReqUnique[int(pullReq.Number)].PullRequest.Labels
err = repoState.assignLabels(ctx, repo.ParentID, pullReq, prLabels)
if err != nil {
return fmt.Errorf("failed to assign pull request %d labels: %w", pullReq.Number, err)
}
// no need to update the pull request object in the DB if there are no comments.
if len(comments) == 0 && len(prLabels) == 0 {
continue
}
if err := migrate.pullReqStore.Update(ctx, pullReq); err != nil {
return fmt.Errorf("failed to update pull request after importing of the comments: %w", err)
}
}
// Update the repository
repoUpdate, err := migrate.repoStore.Find(ctx, repo.ID)
if err != nil {
return fmt.Errorf("failed to fetch repo in pull request import: %w", err)
}
if repoUpdate.PullReqSeq < maxNumber {
repoUpdate.PullReqSeq = maxNumber
}
repoUpdate.NumPulls += len(pullReqs)
repoUpdate.NumOpenPulls += deltaOpen
repoUpdate.NumClosedPulls += deltaClosed
repoUpdate.NumMergedPulls += deltaMerged
if err := migrate.repoStore.Update(ctx, repoUpdate); err != nil {
return fmt.Errorf("failed to update repo in pull request import: %w", err)
}
return nil
})
if err != nil {
return nil, err
}
migrate.repoFinder.MarkChanged(ctx, repo)
return pullReqs, nil
}
// convertPullReq analyses external pull request object and creates types.PullReq object out of it.
func (r *repoImportState) convertPullReq(
ctx context.Context,
repo *types.RepositoryCore,
extPullReqData *ExternalPullRequest,
) (*types.PullReq, error) {
extPullReq := extPullReqData.PullRequest
log := log.Ctx(ctx).With().
Str("repo.identifier", repo.Identifier).
Int("pullreq.number", extPullReq.Number).
Logger()
author, err := r.getPrincipalByEmail(ctx, extPullReq.Author.Email, extPullReq.Number, false)
if err != nil {
return nil, fmt.Errorf("failed to get pull request author: %w", err)
}
now := time.Now().UnixMilli()
createdAt := timestampMillis(extPullReq.Created, now)
updatedAt := timestampMillis(extPullReq.Updated, now)
const maxTitleLen = 256
const maxDescriptionLen = 100000 // This limit is deliberately higher than the limit in our API.
if len(extPullReq.Title) > maxTitleLen {
extPullReq.Title = extPullReq.Title[:maxTitleLen]
}
if len(extPullReq.Body) > maxDescriptionLen {
extPullReq.Body = extPullReq.Body[:maxDescriptionLen]
}
pr := &types.PullReq{
ID: 0, // the ID will be populated in the data layer
Version: 0,
Number: int64(extPullReq.Number),
CreatedBy: author.ID,
Created: createdAt,
Updated: updatedAt,
Edited: updatedAt,
Closed: nil,
State: enum.PullReqStateOpen,
IsDraft: extPullReq.Draft,
CommentCount: 0,
UnresolvedCount: 0,
Title: extPullReq.Title,
Description: extPullReq.Body,
SourceRepoID: &repo.ID,
SourceBranch: extPullReq.Head.Name,
SourceSHA: extPullReq.Head.SHA,
TargetRepoID: repo.ID,
TargetBranch: extPullReq.Base.Name,
ActivitySeq: 0,
// Merge related fields are all left unset and will be set depending on the PR state
}
params := git.ReadParams{RepoUID: repo.GitUID}
// Set the state of the PR
switch {
case extPullReq.Merged:
pr.State = enum.PullReqStateMerged
case extPullReq.Closed:
pr.State = enum.PullReqStateClosed
default:
pr.State = enum.PullReqStateOpen
}
// Update the PR depending on its state
switch pr.State {
case enum.PullReqStateMerged:
// For merged PR's assume the Head.Sha and Base.Sha point to commits at the time of merging.
pr.Merged = &pr.Updated
pr.MergedBy = &author.ID // Don't have real info for this - use the author.
mergeMethod := enum.MergeMethodMerge // Don't know
pr.MergeMethod = &mergeMethod
pr.SourceSHA = extPullReq.Head.SHA
pr.MergeTargetSHA = &extPullReq.Base.SHA // TODO: Check why target == base. Can it be nil?
pr.MergeBaseSHA = extPullReq.Base.SHA
pr.MergeSHA = nil // Don't have this.
pr.MarkAsMerged()
case enum.PullReqStateClosed:
// For closed PR's it's not important to verify existence of branches and commits.
// If these don't exist the PR will be impossible to open.
pr.SourceSHA = extPullReq.Head.SHA
pr.MergeTargetSHA = nil
pr.MergeBaseSHA = extPullReq.Base.SHA
pr.MergeSHA = nil
pr.MergeConflicts = nil
pr.MarkAsMergeUnchecked()
pr.Closed = &pr.Updated
case enum.PullReqStateOpen:
// For open PR we need to verify existence of branches and find to merge base.
sourceBranch, err := r.git.GetBranch(ctx, &git.GetBranchParams{
ReadParams: params,
BranchName: extPullReq.Head.Name,
})
if err != nil {
return nil, fmt.Errorf("failed to fetch source branch of an open pull request: %w", err)
}
// TODO: Cache this in the repoImportState - it's very likely that it will be the same for other PRs
targetBranch, err := r.git.GetBranch(ctx, &git.GetBranchParams{
ReadParams: params,
BranchName: extPullReq.Base.Name,
})
if err != nil {
return nil, fmt.Errorf("failed to fetch target branch of an open pull request: %w", err)
}
mergeBase, err := r.git.MergeBase(ctx, git.MergeBaseParams{
ReadParams: params,
Ref1: sourceBranch.Branch.SHA.String(),
Ref2: targetBranch.Branch.SHA.String(),
})
if err != nil {
return nil, fmt.Errorf("failed to find merge base an open pull request: %w", err)
}
sourceSHA := sourceBranch.Branch.SHA.String()
targetSHA := targetBranch.Branch.SHA.String()
pr.SourceSHA = sourceSHA
pr.MergeTargetSHA = &targetSHA
pr.MergeBaseSHA = mergeBase.MergeBaseSHA.String()
pr.MarkAsMergeUnchecked()
}
log.Debug().Str("pullreq.state", string(pr.State)).Msg("importing pull request")
return pr, nil
}
// createComments analyses external pull request comment objects and stores types.PullReqActivity object to the DB.
// It will mutate the pull request object to update counter fields.
func (r *repoImportState) createComments(
ctx context.Context,
repo *types.RepositoryCore,
pullReq *types.PullReq,
extComments []ExternalComment,
) ([]*types.PullReqActivity, error) {
log := log.Ctx(ctx).With().
Str("repo.id", repo.Identifier).
Int("pullreq.number", int(pullReq.Number)).
Logger()
extThreads := generateThreads(extComments)
comments := make([]*types.PullReqActivity, 0, len(extComments))
for idxTopLevel, extThread := range extThreads {
order := int(pullReq.ActivitySeq) + idxTopLevel + 1
// Create the top level comment with the correct value of Order, SubOrder and ReplySeq.
commentTopLevel, err := r.createComment(ctx, repo, pullReq, nil,
order, 0, len(extThread.Replies), &extThread.TopLevel)
if err != nil {
return nil, fmt.Errorf("failed to create top level comment: %w", err)
}
comments = append(comments, commentTopLevel)
for idxReply, extReply := range extThread.Replies {
subOrder := idxReply + 1
// Create the reply comment with the correct value of Order, SubOrder and ReplySeq.
//nolint:gosec
commentReply, err := r.createComment(ctx, repo, pullReq, &commentTopLevel.ID,
order, subOrder, 0, &extReply)
if err != nil {
return nil, fmt.Errorf("failed to create reply comment: %w", err)
}
comments = append(comments, commentReply)
}
}
log.Debug().Int("count", len(comments)).Msg("imported pull request comments")
return comments, nil
}
// createComment analyses an external pull request comment object and creates types.PullReqActivity object out of it.
// It will mutate the pull request object to update counter fields.
func (r *repoImportState) createComment(
ctx context.Context,
repo *types.RepositoryCore,
pullReq *types.PullReq,
parentID *int64,
order, subOrder, replySeq int,
extComment *ExternalComment,
) (*types.PullReqActivity, error) {
commenter, err := r.getPrincipalByEmail(ctx, extComment.Author.Email, int(pullReq.Number), false)
if err != nil {
return nil, fmt.Errorf("failed to get comment ID=%d author: %w", extComment.ID, err)
}
commentedAt := extComment.Created.UnixMilli()
// Mark comments as resolved if the PR is merged, otherwise they are unresolved.
var resolved, resolvedBy *int64
if pullReq.State == enum.PullReqStateMerged {
resolved = &commentedAt
resolvedBy = &commenter.ID
}
const maxLenText = 64 << 10 // This limit is deliberately larger than the limit in our API.
if len(extComment.Body) > maxLenText {
extComment.Body = extComment.Body[:maxLenText]
}
comment := &types.PullReqActivity{
CreatedBy: commenter.ID,
Created: commentedAt,
Updated: commentedAt,
Edited: commentedAt,
Deleted: nil,
ParentID: parentID,
RepoID: repo.ID,
PullReqID: pullReq.ID,
Order: int64(order),
SubOrder: int64(subOrder),
ReplySeq: int64(replySeq),
Type: enum.PullReqActivityTypeComment,
Kind: enum.PullReqActivityKindComment,
Text: extComment.Body,
PayloadRaw: json.RawMessage("{}"),
Metadata: nil,
ResolvedBy: resolvedBy,
Resolved: resolved,
CodeComment: nil,
Mentions: nil,
}
if cc := extComment.CodeComment; cc != nil && cc.HunkHeader != "" && extComment.ParentID == 0 {
// a code comment must have a valid HunkHeader and must not be a reply
hunkHeader, ok := parser.ParseDiffHunkHeader(cc.HunkHeader)
if !ok {
return nil, errors.InvalidArgumentf("Invalid hunk header for code comment: %s", cc.HunkHeader)
}
comment.Kind = enum.PullReqActivityKindChangeComment
comment.Type = enum.PullReqActivityTypeCodeComment
comment.CodeComment = &types.CodeCommentFields{
Outdated: cc.SourceSHA != pullReq.SourceSHA,
MergeBaseSHA: cc.MergeBaseSHA,
SourceSHA: cc.SourceSHA,
Path: cc.Path,
LineNew: hunkHeader.NewLine,
SpanNew: hunkHeader.NewSpan,
LineOld: hunkHeader.OldLine,
SpanOld: hunkHeader.OldSpan,
}
sideNew := !strings.EqualFold(cc.Side, "OLD") // cc.Side can be either OLD or NEW
_ = comment.SetPayload(&types.PullRequestActivityPayloadCodeComment{
Title: cc.CodeSnippet.Header,
Lines: cc.CodeSnippet.Lines,
LineStartNew: sideNew,
LineEndNew: sideNew,
})
}
// store the comment
if err := r.pullReqActivityStore.Create(ctx, comment); err != nil {
return nil, fmt.Errorf("failed to store the external comment ID=%d author: %w", extComment.ID, err)
}
// update the pull request's counter fields
pullReq.CommentCount++
if comment.IsBlocking() {
pullReq.UnresolvedCount++
}
if pullReq.ActivitySeq < comment.Order {
pullReq.ActivitySeq = comment.Order
}
return comment, nil
}
// createInfoComment creates an informational comment on the PR
// if any of the principals were replaced with the migrator.
func (r *repoImportState) createInfoComment(
ctx context.Context,
repo *types.RepositoryCore,
pullReq *types.PullReq,
) (*types.PullReqActivity, error) {
var unknownEmails []string
for email := range r.unknownEmails[int(pullReq.Number)] {
unknownEmails = append(unknownEmails, email)
}
now := time.Now().UnixMilli()
text := fmt.Sprintf(InfoCommentMessage, r.migrator.UID, strings.Join(unknownEmails, ", "))
comment := &types.PullReqActivity{
CreatedBy: r.migrator.ID,
Created: now,
Updated: now,
Deleted: nil,
ParentID: nil,
RepoID: repo.ID,
PullReqID: pullReq.ID,
Order: pullReq.ActivitySeq + 1,
SubOrder: 0,
ReplySeq: 0,
Type: enum.PullReqActivityTypeComment,
Kind: enum.PullReqActivityKindComment,
Text: text,
PayloadRaw: json.RawMessage("{}"),
Metadata: nil,
ResolvedBy: &r.migrator.ID,
Resolved: &now,
CodeComment: nil,
Mentions: nil,
}
if err := r.pullReqActivityStore.Create(ctx, comment); err != nil {
return nil, fmt.Errorf("failed to store the info comment author: %w", err)
}
pullReq.ActivitySeq++
pullReq.CommentCount++
return comment, nil
}
//nolint:unparam
func (r *repoImportState) getPrincipalByEmail(
ctx context.Context,
emailAddress string,
prNumber int,
strict bool,
) (*types.Principal, error) {
if principal, exists := r.principals[emailAddress]; exists {
return principal, nil
}
principal, err := r.principalStore.FindByEmail(ctx, emailAddress)
if err != nil && !errors.Is(err, gitness_store.ErrResourceNotFound) {
return nil, fmt.Errorf("failed to load principal by email: %w", err)
}
if err == nil {
r.principals[emailAddress] = principal
return principal, nil
}
if strict {
return nil, fmt.Errorf(
"could not find principal by email %s and automatic replacing unknown prinicapls is disabled: %w",
emailAddress, err)
}
// ignore not found emails if is not strict
if _, exists := r.unknownEmails[prNumber]; !exists {
r.unknownEmails[prNumber] = make(map[string]bool, 0)
}
if _, ok := r.unknownEmails[prNumber][emailAddress]; !ok && len(r.unknownEmails[prNumber]) < MaxNumberOfUnknownEmails {
r.unknownEmails[prNumber][emailAddress] = true
}
return &r.migrator, nil
}
func (r *repoImportState) assignLabels(
ctx context.Context,
spaceID int64,
pullreq *types.PullReq,
labels []ExternalLabel,
) error {
if len(labels) == 0 {
return nil
}
now := time.Now().UnixMilli()
for _, l := range labels {
var label *types.Label
var err error
labelID, found := r.labels[l.Name]
if !found {
label, err = r.labelStore.Find(ctx, &spaceID, nil, l.Name)
if errors.Is(err, gitness_store.ErrResourceNotFound) {
label, err = r.defineLabel(ctx, spaceID, l)
if err != nil {
return fmt.Errorf("failed to define label: %w", err)
}
} else if err != nil {
return fmt.Errorf("failed to find the label with key %s in space %d: %w", l.Name, spaceID, err)
}
r.labels[l.Name], labelID = label.ID, label.ID
}
var valueID *int64
valueID, found = r.labelValues[labelID][l.Value]
if !found && l.Value != "" {
var labelValue *types.LabelValue
labelValue, err = r.labelValueStore.FindByLabelID(ctx, labelID, l.Value)
if errors.Is(err, gitness_store.ErrResourceNotFound) {
labelValue, err = r.defineLabelValue(ctx, labelID, l.Value)
if err != nil {
return fmt.Errorf("failed to define label values: %w", err)
}
} else if err != nil {
return fmt.Errorf("failed to find the label with value %s and key %s in space %d: %w",
l.Value, l.Name, spaceID, err)
}
valueID = &labelValue.ID
}
pullReqLabel := &types.PullReqLabel{
PullReqID: pullreq.ID,
LabelID: labelID,
ValueID: valueID,
Created: now,
Updated: now,
CreatedBy: r.migrator.ID,
UpdatedBy: r.migrator.ID,
}
err = r.pullReqLabelAssignmentStore.Assign(ctx, pullReqLabel)
if err != nil {
return fmt.Errorf("failed to assign label %s to pull request: %w", l.Name, err)
}
pullreq.ActivitySeq++
}
return nil
}
func (r *repoImportState) defineLabel(
ctx context.Context,
spaceID int64,
extLabel ExternalLabel,
) (*types.Label, error) {
if r.scope == 0 {
spaceIDs, err := r.spaceStore.GetAncestorIDs(ctx, spaceID)
if err != nil {
return nil, fmt.Errorf("failed to get space ids hierarchy: %w", err)
}
r.scope = int64(len(spaceIDs))
}
labelIn, err := convertLabelWithSanitization(ctx, r.migrator, spaceID, r.scope, extLabel)
if err != nil {
return nil, fmt.Errorf("failed to sanitize and convert external label input: %w", err)
}
label, err := r.labelStore.Find(ctx, &spaceID, nil, labelIn.Key)
if errors.Is(err, gitness_store.ErrResourceNotFound) {
err = r.labelStore.Define(ctx, labelIn)
if err != nil {
return nil, fmt.Errorf("failed to define and find the label: %w", err)
}
return labelIn, nil
}
if err != nil {
return nil, fmt.Errorf("failed to define and find the label: %w", err)
}
return label, nil
}
func (r *repoImportState) defineLabelValue(
ctx context.Context,
labelID int64,
value string,
) (*types.LabelValue, error) {
valueIn := &types.DefineValueInput{
Value: value,
Color: defaultLabelValueColor,
}
if err := valueIn.Sanitize(); err != nil {
return nil, fmt.Errorf("failed to sanitize external label value input: %w", err)
}
if _, exists := r.labelValues[labelID]; !exists {
r.labelValues[labelID] = make(map[string]*int64)
}
labelValue, err := r.labelValueStore.FindByLabelID(ctx, labelID, valueIn.Value)
if err == nil {
r.labelValues[labelID][labelValue.Value] = &labelValue.ID
return labelValue, nil
}
if !errors.Is(err, gitness_store.ErrResourceNotFound) {
return nil, fmt.Errorf("failed to fine label value: %w", err)
}
// define the label value if not exists
now := time.Now().UnixMilli()
labelValue = &types.LabelValue{
LabelID: labelID,
Value: valueIn.Value,
Color: defaultLabelValueColor,
Created: now,
Updated: now,
CreatedBy: r.migrator.ID,
UpdatedBy: r.migrator.ID,
}
err = r.labelValueStore.Define(ctx, labelValue)
if err != nil {
return nil, fmt.Errorf("failed to define label value: %w", err)
}
_, err = r.labelStore.IncrementValueCount(ctx, labelID, 1)
if err != nil {
return nil, fmt.Errorf("failed to update label value count: %w", err)
}
r.labelValues[labelID][labelValue.Value] = &labelValue.ID
return labelValue, nil
}
func timestampMillis(t time.Time, def int64) int64 {
if t.IsZero() {
return def
}
return t.UnixMilli()
}
func generateThreads(extComments []ExternalComment) []*externalCommentThread {
extCommentParents := make(map[int]int, len(extComments))
extCommentMap := make(map[int]ExternalComment, len(extComments))
for _, extComment := range extComments {
extCommentParents[extComment.ID] = extComment.ParentID
extCommentMap[extComment.ID] = extComment
}
// Make flat list of reply comment IDs: create map[topLevelCommentID]->[]commentID
extCommentIDReplyMap := make(map[int][]int)
for _, extComment := range extComments {
topLevelParentID := getTopLevelParentID(extComment.ID, extCommentParents)
if topLevelParentID < 0 {
continue
}
if topLevelParentID == extComment.ID {
// Make sure the item with topLevelParentID exist in the map, at least as a nil entry.
extCommentIDReplyMap[topLevelParentID] = extCommentIDReplyMap[topLevelParentID] //nolint:staticcheck
continue
}
extCommentIDReplyMap[topLevelParentID] = append(extCommentIDReplyMap[topLevelParentID], extComment.ID)
}
countTopLevel := len(extCommentIDReplyMap)
if countTopLevel == 0 {
return nil
}
extCommentThreads := make([]*externalCommentThread, 0, countTopLevel)
for topLevelID, replyIDs := range extCommentIDReplyMap {
expReplyComments := make([]ExternalComment, len(replyIDs))
for i, replyID := range replyIDs {
expReplyComments[i] = extCommentMap[replyID]
}
thread := &externalCommentThread{
TopLevel: extCommentMap[topLevelID],
Replies: expReplyComments,
}
extCommentThreads = append(extCommentThreads, thread)
}
// order top level comments
sort.Slice(extCommentThreads, func(i, j int) bool {
created1 := extCommentThreads[i].TopLevel.Created
created2 := extCommentThreads[j].TopLevel.Created
return created1.Before(created2)
})
// order reply comments
for _, thread := range extCommentThreads {
sort.Slice(thread.Replies, func(i, j int) bool {
created1 := thread.Replies[i].Created
created2 := thread.Replies[j].Created
return created1.Before(created2)
})
}
return extCommentThreads
}
func getTopLevelParentID(id int, tree map[int]int) int {
const maxDepth = 20
for currID, depth := id, 0; depth < maxDepth; depth++ {
parentID := tree[currID]
if parentID == 0 {
return currID
}
currID = parentID
}
return -1
}
// createReviewers processes external reviewer objects.
func (r *repoImportState) createReviewers(
ctx context.Context,
repo *types.RepositoryCore,
pullReq *types.PullReq,
extReviewers []ExternalReviewer,
) ([]*types.PullReqReviewer, error) {
log := log.Ctx(ctx).With().
Str("repo.id", repo.Identifier).
Int("pullreq.number", int(pullReq.Number)).
Logger()
reviewers := make([]*types.PullReqReviewer, 0, len(extReviewers))
for _, extReviewer := range extReviewers {
reviewer, err := r.getPrincipalByEmail(ctx, extReviewer.User.Email, int(pullReq.Number), false)
if err != nil {
return nil, fmt.Errorf("failed to get reviewer principal: %w", err)
}
if reviewer.ID == pullReq.CreatedBy {
continue
}
// Use PR created timestamp for reviewer assignment
assignedAt := pullReq.Created
prReviewer := &types.PullReqReviewer{
PullReqID: pullReq.ID,
PrincipalID: reviewer.ID,
CreatedBy: r.migrator.ID,
Created: assignedAt,
Updated: assignedAt,
RepoID: repo.ID,
Type: enum.PullReqReviewerTypeRequested,
LatestReviewID: nil, // Will be set when reviews are processed
ReviewDecision: enum.PullReqReviewDecisionPending, // Will be updated when reviews are processed
SHA: pullReq.SourceSHA,
Reviewer: *reviewer.ToPrincipalInfo(),
AddedBy: *r.migrator.ToPrincipalInfo(),
}
if err := r.pullReqReviewerStore.Create(ctx, prReviewer); err != nil {
return nil, fmt.Errorf("failed to store pull request reviewer: %w", err)
}
reviewers = append(reviewers, prReviewer)
}
log.Debug().Int("count", len(reviewers)).Msg("imported pull request reviewers")
if len(reviewers) > 0 {
reviewerIDs := make([]int64, 0, len(reviewers))
for _, reviewer := range reviewers {
reviewerIDs = append(reviewerIDs, reviewer.PrincipalID)
}
r.createReviewerActivity(ctx, pullReq, reviewerIDs, enum.PullReqReviewerTypeRequested)
}
return reviewers, nil
}
// createReviews processes external review objects.
func (r *repoImportState) createReviews(
ctx context.Context,
repo *types.RepositoryCore,
pullReq *types.PullReq,
extReviews []ExternalReview,
) ([]*types.PullReqReview, error) {
log := log.Ctx(ctx).With().
Str("repo.id", repo.Identifier).
Int("pullreq.number", int(pullReq.Number)).
Logger()
reviews := make([]*types.PullReqReview, 0, len(extReviews))
for _, extReview := range extReviews {
reviewer, err := r.getPrincipalByEmail(ctx, extReview.Author.Email, int(pullReq.Number), false)
if err != nil {
return nil, fmt.Errorf("failed to get reviewer principal: %w", err)
}
if reviewer.ID == pullReq.CreatedBy {
continue
}
decision := enum.PullReqReviewDecision(extReview.Decision)
now := time.Now().UnixMilli()
createdAt := timestampMillis(extReview.Created, now)
updatedAt := timestampMillis(extReview.Updated, now)
prReview := &types.PullReqReview{
CreatedBy: reviewer.ID,
Created: createdAt,
Updated: updatedAt,
PullReqID: pullReq.ID,
Decision: decision,
SHA: extReview.SHA,
}
if err := r.pullReqReviewStore.Create(ctx, prReview); err != nil {
return nil, fmt.Errorf("failed to store pull request review: %w", err)
}
existingReviewer, err := r.pullReqReviewerStore.Find(ctx, pullReq.ID, reviewer.ID)
if err != nil && !errors.Is(err, gitness_store.ErrResourceNotFound) {
return nil, fmt.Errorf("failed to find reviewer from review author: %w", err)
}
if errors.Is(err, gitness_store.ErrResourceNotFound) {
reviewerFromReview := &types.PullReqReviewer{
PullReqID: pullReq.ID,
PrincipalID: reviewer.ID,
CreatedBy: r.migrator.ID,
Created: createdAt,
Updated: updatedAt,
RepoID: repo.ID,
Type: enum.PullReqReviewerTypeSelfAssigned,
LatestReviewID: &prReview.ID,
ReviewDecision: decision,
SHA: pullReq.SourceSHA,
Reviewer: *reviewer.ToPrincipalInfo(),
AddedBy: *reviewer.ToPrincipalInfo(),
}
if err := r.pullReqReviewerStore.Create(ctx, reviewerFromReview); err != nil {
return nil, fmt.Errorf("failed to create reviewer from review author: %w", err)
}
}
if existingReviewer != nil {
// Update existing reviewer with latest review
existingReviewer.LatestReviewID = &prReview.ID
existingReviewer.ReviewDecision = decision
existingReviewer.Updated = updatedAt
if err := r.pullReqReviewerStore.Update(ctx, existingReviewer); err != nil {
log.Warn().Err(err).Msg("failed to update reviewer with latest review")
}
}
reviews = append(reviews, prReview)
}
log.Debug().Int("count", len(reviews)).Msg("imported pull request reviews")
// Create activity entries for review submissions
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | true |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/migrate/rule_types.go | app/services/migrate/rule_types.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migrate
import (
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/harness/gitness/app/services/protection"
"github.com/harness/gitness/app/store"
migratetypes "github.com/harness/harness-migrate/types"
)
const ExternalRuleTypeBranch = migratetypes.RuleTypeBranch
type (
ExternalRuleType = migratetypes.RuleType
ExternalRule = migratetypes.Rule
ExternalDefinition = migratetypes.Definition
ExternalBranchPattern = migratetypes.BranchPattern
definitionDeserializer func(context.Context, string) (protection.Definition, error)
patternDeserializer func(context.Context, string) (*protection.Pattern, error)
)
func (migrate *Rule) registerDeserializers(principalStore store.PrincipalStore) {
// banch rules definition deserializer
migrate.DefDeserializationMap[ExternalRuleTypeBranch] = func(
ctx context.Context,
rawDef string,
) (protection.Definition, error) {
// deserialize string into external branch rule type
var extrDef ExternalDefinition
decoder := json.NewDecoder(bytes.NewReader([]byte(rawDef)))
if err := decoder.Decode(&extrDef); err != nil {
return nil, fmt.Errorf("failed to decode external branch rule definition: %w", err)
}
rule, err := mapToBranchRules(ctx, extrDef, principalStore)
if err != nil {
return nil, fmt.Errorf("failed to map external branch rule definition to internal: %w", err)
}
return rule, nil
}
// branch rules pattern deserializer
migrate.PatternDeserializationMap[ExternalRuleTypeBranch] = func(
_ context.Context,
rawDef string,
) (*protection.Pattern, error) {
var extrPattern ExternalBranchPattern
decoder := json.NewDecoder(bytes.NewReader([]byte(rawDef)))
if err := decoder.Decode(&extrPattern); err != nil {
return nil, fmt.Errorf("failed to decode external branch rule pattern: %w", err)
}
return &protection.Pattern{
Default: extrPattern.Default,
Include: extrPattern.Include,
Exclude: extrPattern.Exclude,
}, nil
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/migrate/pullreq_types.go | app/services/migrate/pullreq_types.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migrate
import migratetypes "github.com/harness/harness-migrate/types"
type ExternalPullRequest = migratetypes.PullRequestData
type ExternalComment = migratetypes.Comment
type ExternalReview = migratetypes.Review
type ExternalReviewer = migratetypes.Reviewer
type externalCommentThread struct {
TopLevel ExternalComment
Replies []ExternalComment
}
const (
InfoCommentMessage = "This pull request has been imported. Non-existent users who were originally listed " +
"as the pull request author or commenter have been replaced by the principal '%s' which performed the migration.\n" +
"Unknown emails: %v"
MaxNumberOfUnknownEmails = 500 // limit keeping unknown users to avoid info comment text exceed ~1000 characters
)
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/migrate/rule.go | app/services/migrate/rule.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migrate
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/harness/gitness/app/services/protection"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/errors"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/check"
"github.com/harness/gitness/types/enum"
migratetypes "github.com/harness/harness-migrate/types"
"github.com/rs/zerolog/log"
)
type Rule struct {
ruleStore store.RuleStore
principalStore store.PrincipalStore
tx dbtx.Transactor
DefDeserializationMap map[migratetypes.RuleType]definitionDeserializer
PatternDeserializationMap map[migratetypes.RuleType]patternDeserializer
}
func NewRule(
ruleStore store.RuleStore,
tx dbtx.Transactor,
principalStore store.PrincipalStore,
) *Rule {
rule := &Rule{
ruleStore: ruleStore,
principalStore: principalStore,
tx: tx,
DefDeserializationMap: make(map[ExternalRuleType]definitionDeserializer),
PatternDeserializationMap: make(map[ExternalRuleType]patternDeserializer),
}
rule.registerDeserializers(principalStore)
return rule
}
func (migrate Rule) Import(
ctx context.Context,
migrator types.Principal,
repo *types.RepositoryCore,
typ ExternalRuleType,
extRules []*ExternalRule,
) ([]*types.Rule, error) {
rules := make([]*types.Rule, len(extRules))
for i, extRule := range extRules {
if err := check.Identifier(extRule.Identifier); err != nil {
return nil, fmt.Errorf("branch rule identifier '%s' is invalid: %w", extRule.Identifier, err)
}
def, err := migrate.DefDeserializationMap[typ](ctx, string(extRule.Definition))
if err != nil {
return nil, fmt.Errorf("failed to deserialize rule definition: %w", err)
}
if err = def.Sanitize(); err != nil {
return nil, fmt.Errorf("provided rule definition is invalid: %w", err)
}
definitionJSON, err := json.Marshal(def)
if err != nil {
return nil, fmt.Errorf("failed to marshal rule definition: %w", err)
}
pattern, err := migrate.PatternDeserializationMap[typ](ctx, string(extRule.Pattern))
if err != nil {
return nil, fmt.Errorf("failed to deserialize rule pattern: %w", err)
}
if err = pattern.Validate(); err != nil {
return nil, fmt.Errorf("provided rule pattern is invalid: %w", err)
}
now := time.Now().UnixMilli()
r := &types.Rule{
CreatedBy: migrator.ID,
Created: now,
Updated: now,
RepoID: &repo.ID,
SpaceID: nil,
Type: protection.TypeBranch,
State: enum.RuleState(extRule.State),
Identifier: extRule.Identifier,
Pattern: pattern.JSON(),
Definition: json.RawMessage(definitionJSON),
}
rules[i] = r
}
err := migrate.tx.WithTx(ctx, func(ctx context.Context) error {
for _, rule := range rules {
err := migrate.ruleStore.Create(ctx, rule)
if err != nil {
return fmt.Errorf("failed to create branch rule: %w", err)
}
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to store external branch rules: %w", err)
}
return rules, nil
}
func mapToBranchRules(
ctx context.Context,
rule ExternalDefinition,
principalStore store.PrincipalStore,
) (*protection.Branch, error) {
// map users
var userIDs []int64
for _, email := range rule.Bypass.UserEmails {
principal, err := principalStore.FindByEmail(ctx, email)
if err != nil && !errors.Is(err, gitness_store.ErrResourceNotFound) {
return nil, fmt.Errorf("failed to find principal by email for '%s': %w", email, err)
}
if errors.Is(err, gitness_store.ErrResourceNotFound) {
log.Ctx(ctx).Warn().Msgf("skipping principal '%s' on bypass list", email)
continue
}
userIDs = append(userIDs, principal.ID)
}
return &protection.Branch{
Bypass: protection.DefBypass{
UserIDs: userIDs,
RepoOwners: rule.Bypass.RepoOwners,
},
PullReq: protection.DefPullReq{
Approvals: protection.DefApprovals{
RequireCodeOwners: rule.PullReq.Approvals.RequireCodeOwners,
RequireMinimumCount: rule.PullReq.Approvals.RequireMinimumCount,
RequireLatestCommit: rule.PullReq.Approvals.RequireLatestCommit,
RequireNoChangeRequest: rule.PullReq.Approvals.RequireNoChangeRequest,
RequireMinimumDefaultReviewerCount: 0,
},
Comments: protection.DefComments(rule.PullReq.Comments),
Merge: protection.DefMerge{
StrategiesAllowed: convertMergeMethods(rule.PullReq.Merge.StrategiesAllowed),
DeleteBranch: rule.PullReq.Merge.DeleteBranch,
Block: rule.PullReq.Merge.Block,
},
},
Lifecycle: protection.DefBranchLifecycle{
DefLifecycle: protection.DefLifecycle{
CreateForbidden: rule.Lifecycle.CreateForbidden,
DeleteForbidden: rule.Lifecycle.DeleteForbidden,
UpdateForceForbidden: rule.Lifecycle.UpdateForceForbidden,
},
UpdateForbidden: rule.Lifecycle.UpdateForbidden,
},
}, nil
}
func convertMergeMethods(vals []string) []enum.MergeMethod {
res := make([]enum.MergeMethod, len(vals))
for i := range vals {
res[i] = enum.MergeMethod(vals[i])
}
return res
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/migrate/label.go | app/services/migrate/label.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migrate
import (
"context"
"fmt"
"strings"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/store/database"
"github.com/harness/gitness/errors"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/lucasb-eyer/go-colorful"
"github.com/rs/zerolog/log"
)
const defaultLabelValueColor = enum.LabelColorGreen
// Label is label migrate.
type Label struct {
labelStore store.LabelStore
labelValueStore store.LabelValueStore
spaceStore store.SpaceStore
tx dbtx.Transactor
}
func NewLabel(
labelStore store.LabelStore,
labelValueStore store.LabelValueStore,
spaceStore store.SpaceStore,
tx dbtx.Transactor,
) *Label {
return &Label{
labelStore: labelStore,
labelValueStore: labelValueStore,
spaceStore: spaceStore,
tx: tx,
}
}
//nolint:gocognit
func (migrate Label) Import(
ctx context.Context,
migrator types.Principal,
space *types.SpaceCore,
extLabels []*ExternalLabel,
) ([]*types.Label, error) {
labels := make([]*types.Label, len(extLabels))
labelValues := make(map[string][]string)
spaceIDs, err := migrate.spaceStore.GetAncestorIDs(ctx, space.ID)
if err != nil {
return nil, fmt.Errorf("failed to get space ids hierarchy: %w", err)
}
scope := int64(len(spaceIDs))
for i, extLabel := range extLabels {
label, err := convertLabelWithSanitization(ctx, migrator, space.ID, scope, *extLabel)
if err != nil {
return nil, fmt.Errorf("failed to sanitize and convert external label input: %w", err)
}
labels[i] = label
if extLabel.Value != "" {
valueIn := &types.DefineValueInput{
Value: extLabel.Value,
Color: defaultLabelValueColor,
}
if err := valueIn.Sanitize(); err != nil {
return nil, fmt.Errorf("failed to sanitize external label value input: %w", err)
}
labelValues[label.Key] = append(labelValues[label.Key], valueIn.Value)
}
}
err = migrate.tx.WithTx(ctx, func(ctx context.Context) error {
for _, label := range labels {
err := migrate.defineLabelsAndValues(ctx, migrator.ID, space.ID, label, labelValues[label.Key])
if err != nil {
return fmt.Errorf("failed to define labels and/or values: %w", err)
}
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to define external labels: %w", err)
}
return labels, nil
}
func (migrate Label) defineLabelsAndValues(
ctx context.Context,
migratorID int64,
spaceID int64,
labelIn *types.Label,
extValues []string) error {
var label *types.Label
var err error
// try to find the label first as it might have been defined already.
label, err = migrate.labelStore.Find(ctx, &spaceID, nil, labelIn.Key)
if errors.Is(err, gitness_store.ErrResourceNotFound) {
err := migrate.labelStore.Define(ctx, labelIn)
if err != nil {
return fmt.Errorf("failed to define label: %w", err)
}
label = labelIn
} else if err != nil {
return fmt.Errorf("failed to find the label: %w", err)
}
values, err := migrate.labelValueStore.List(
ctx,
label.ID,
types.ListQueryFilter{
Pagination: types.Pagination{
Size: database.MaxLabelValueSize,
},
},
)
if err != nil {
return fmt.Errorf("failed to list label values: %w", err)
}
now := time.Now().UnixMilli()
existingValues := make(map[string]bool)
for _, val := range values {
existingValues[val.Value] = true
}
var newValuesCount int
for _, val := range extValues {
if existingValues[val] {
continue
}
// define new label values
if err := migrate.labelValueStore.Define(ctx, &types.LabelValue{
LabelID: label.ID,
Value: val,
Color: defaultLabelValueColor,
Created: now,
Updated: now,
CreatedBy: migratorID,
UpdatedBy: migratorID,
}); err != nil {
return fmt.Errorf("failed to create label value: %w", err)
}
newValuesCount++
}
_, err = migrate.labelStore.IncrementValueCount(ctx, label.ID, newValuesCount)
if err != nil {
return fmt.Errorf("failed to update label value count: %w", err)
}
return nil
}
func convertLabelWithSanitization(
ctx context.Context,
migrator types.Principal,
spaceID int64,
scope int64,
extLabel ExternalLabel,
) (*types.Label, error) {
in := &types.DefineLabelInput{
Key: extLabel.Name,
Type: enum.LabelTypeStatic,
Description: extLabel.Description,
Color: findClosestColor(ctx, extLabel.Color),
}
if err := in.Sanitize(); err != nil {
return nil, fmt.Errorf("failed to sanitize external labels input: %w", err)
}
now := time.Now().UnixMilli()
label := &types.Label{
SpaceID: &spaceID,
RepoID: nil,
Scope: scope,
Key: in.Key,
Color: in.Color,
Description: in.Description,
Type: in.Type,
Created: now,
Updated: now,
CreatedBy: migrator.ID,
UpdatedBy: migrator.ID,
}
return label, nil
}
// findClosestColor finds the visually closest color to a provided value using go-colorful library.
func findClosestColor(ctx context.Context, extColor string) enum.LabelColor {
supportedColors, defColor := enum.GetAllLabelColors()
if len(extColor) > 1 && string(extColor[0]) != "#" {
extColor = "#" + extColor
}
targetColor, err := colorful.Hex(strings.ToUpper(extColor))
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msg("failed to convert the color to hex. choosing default color instead.")
return defColor
}
closestColor := supportedColors[0]
minDistance := targetColor.DistanceLab(convertToColorful(supportedColors[0]))
for _, labelColor := range supportedColors[1:] {
distance := targetColor.DistanceLab(convertToColorful(labelColor))
if distance < minDistance {
closestColor = labelColor
minDistance = distance
}
}
return closestColor
}
// convertToColorful converts Gitness supported label colors to the hex (text value) using web/src/utils:ColorDetails.
func convertToColorful(color enum.LabelColor) colorful.Color {
var hexColor colorful.Color
switch color {
case enum.LabelColorRed:
hexColor, _ = colorful.Hex("#C7292F")
case enum.LabelColorGreen:
hexColor, _ = colorful.Hex("#16794C")
case enum.LabelColorYellow:
hexColor, _ = colorful.Hex("#92582D")
case enum.LabelColorBlue:
hexColor, _ = colorful.Hex("#236E93")
case enum.LabelColorPink:
hexColor, _ = colorful.Hex("#C41B87")
case enum.LabelColorPurple:
hexColor, _ = colorful.Hex("#9C2AAD")
case enum.LabelColorViolet:
hexColor, _ = colorful.Hex("#5645AF")
case enum.LabelColorIndigo:
hexColor, _ = colorful.Hex("#3250B2")
case enum.LabelColorCyan:
hexColor, _ = colorful.Hex("#0B7792")
case enum.LabelColorOrange:
hexColor, _ = colorful.Hex("#995137")
case enum.LabelColorBrown:
hexColor, _ = colorful.Hex("#805C43")
case enum.LabelColorMint:
hexColor, _ = colorful.Hex("#247469")
case enum.LabelColorLime:
hexColor, _ = colorful.Hex("#586729")
default:
// blue is the default color on Gitness
hexColor, _ = colorful.Hex("#236E93")
}
return hexColor
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/refcache/wire.go | app/services/refcache/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package refcache
import (
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/store/cache"
"github.com/harness/gitness/types"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideSpaceFinder,
ProvideRepoFinder,
)
func ProvideSpaceFinder(
spaceIDCache store.SpaceIDCache,
spaceRefCache store.SpacePathCache,
evictor cache.Evictor[*types.SpaceCore],
) SpaceFinder {
return NewSpaceFinder(spaceIDCache, spaceRefCache, evictor)
}
func ProvideRepoFinder(
repoStore store.RepoStore,
spaceRefCache store.SpacePathCache,
repoIDCache store.RepoIDCache,
repoRefCache store.RepoRefCache,
evictor cache.Evictor[*types.RepositoryCore],
) RepoFinder {
return NewRepoFinder(repoStore, spaceRefCache, repoIDCache, repoRefCache, evictor)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/refcache/repo_finder.go | app/services/refcache/repo_finder.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package refcache
import (
"context"
"fmt"
"strconv"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/store/cache"
"github.com/harness/gitness/types"
)
type RepoFinder struct {
repoStore store.RepoStore
spacePathCache store.SpacePathCache
repoIDCache store.RepoIDCache
repoRefCache store.RepoRefCache
evictor cache.Evictor[*types.RepositoryCore]
}
func NewRepoFinder(
repoStore store.RepoStore,
spacePathCache store.SpacePathCache,
repoIDCache store.RepoIDCache,
repoRefCache store.RepoRefCache,
evictor cache.Evictor[*types.RepositoryCore],
) RepoFinder {
return RepoFinder{
repoStore: repoStore,
spacePathCache: spacePathCache,
repoIDCache: repoIDCache,
repoRefCache: repoRefCache,
evictor: evictor,
}
}
func (r RepoFinder) MarkChanged(ctx context.Context, repoCore *types.RepositoryCore) {
r.evictor.Evict(ctx, repoCore)
}
func (r RepoFinder) FindByID(ctx context.Context, repoID int64) (*types.RepositoryCore, error) {
return r.repoIDCache.Get(ctx, repoID)
}
func (r RepoFinder) FindByRef(ctx context.Context, repoRef string) (*types.RepositoryCore, error) {
repoID, err := strconv.ParseInt(repoRef, 10, 64)
if err != nil || repoID <= 0 {
spaceRef, repoIdentifier, err := paths.DisectLeaf(repoRef)
if err != nil {
return nil, fmt.Errorf("failed to disect extract repo idenfifier from path: %w", err)
}
spacePath, err := r.spacePathCache.Get(ctx, spaceRef)
if err != nil {
return nil, fmt.Errorf("failed to get space from cache: %w", err)
}
key := types.RepoCacheKey{SpaceID: spacePath.SpaceID, RepoIdentifier: repoIdentifier}
repoID, err = r.repoRefCache.Get(ctx, key)
if err != nil {
return nil, fmt.Errorf("failed to get repository ID by space ID and repo identifier: %w", err)
}
}
repoCore, err := r.repoIDCache.Get(ctx, repoID)
if err != nil {
return nil, fmt.Errorf("failed to get repository by ID: %w", err)
}
return repoCore, nil
}
func (r RepoFinder) FindDeletedByRef(ctx context.Context, repoRef string, deleted int64) (*types.Repository, error) {
repoID, err := strconv.ParseInt(repoRef, 10, 64)
if err == nil && repoID >= 0 {
repo, err := r.repoStore.FindDeleted(ctx, repoID, &deleted)
if err != nil {
return nil, fmt.Errorf("failed to get repository by ID: %w", err)
}
return repo, nil
}
spaceRef, repoIdentifier, err := paths.DisectLeaf(repoRef)
if err != nil {
return nil, fmt.Errorf("failed to disect extract repo idenfifier from path: %w", err)
}
spacePath, err := r.spacePathCache.Get(ctx, spaceRef)
if err != nil {
return nil, fmt.Errorf("failed to get space ID by space ref from cache: %w", err)
}
repo, err := r.repoStore.FindDeletedByUID(ctx, spacePath.SpaceID, repoIdentifier, deleted)
if err != nil {
return nil, fmt.Errorf("failed to get deleted repository ID by space ID and repo identifier: %w", err)
}
return repo, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/refcache/space_finder.go | app/services/refcache/space_finder.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package refcache
import (
"context"
"fmt"
"strconv"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/store/cache"
"github.com/harness/gitness/types"
)
type SpaceFinder struct {
spaceIDCache store.SpaceIDCache
spacePathCache store.SpacePathCache
evictor cache.Evictor[*types.SpaceCore]
}
func NewSpaceFinder(
spaceIDCache store.SpaceIDCache,
spacePathCache store.SpacePathCache,
evictor cache.Evictor[*types.SpaceCore],
) SpaceFinder {
s := SpaceFinder{
spaceIDCache: spaceIDCache,
spacePathCache: spacePathCache,
evictor: evictor,
}
return s
}
func (s SpaceFinder) MarkChanged(ctx context.Context, spaceCore *types.SpaceCore) {
s.evictor.Evict(ctx, spaceCore)
}
func (s SpaceFinder) FindByID(ctx context.Context, spaceID int64) (*types.SpaceCore, error) {
spaceCore, err := s.spaceIDCache.Get(ctx, spaceID)
if err != nil {
return nil, fmt.Errorf("failed to get space by ID from cache: %w", err)
}
return spaceCore, nil
}
func (s SpaceFinder) FindByRef(ctx context.Context, spaceRef string) (*types.SpaceCore, error) {
spaceID, err := strconv.ParseInt(spaceRef, 10, 64)
if err != nil || spaceID <= 0 {
spacePath, err := s.spacePathCache.Get(ctx, spaceRef)
if err != nil {
return nil, fmt.Errorf("failed to get space ID by space path from cache: %w", err)
}
spaceID = spacePath.SpaceID
}
spaceCore, err := s.spaceIDCache.Get(ctx, spaceID)
if err != nil {
return nil, fmt.Errorf("failed to get space by ID from cache: %w", err)
}
return spaceCore, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspaceservice/wire.go | app/services/gitspaceservice/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspaceservice
import (
"github.com/harness/gitness/app/services/aitaskevent"
"github.com/harness/gitness/app/services/gitspace"
"github.com/harness/gitness/app/services/gitspaceinfraevent"
"github.com/harness/gitness/app/services/gitspaceoperationsevent"
"github.com/harness/gitness/app/services/infraprovider"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
gitspace.WireSet,
gitspaceinfraevent.WireSet,
infraprovider.WireSet,
gitspaceoperationsevent.WireSet,
aitaskevent.WireSet,
)
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/codecomments/wire.go | app/services/codecomments/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package codecomments
import (
"github.com/harness/gitness/git"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideMigrator,
)
func ProvideMigrator(
git git.Interface,
) *Migrator {
return &Migrator{
hunkHeaderFetcher: git,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/codecomments/migrator_test.go | app/services/codecomments/migrator_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package codecomments
import (
"context"
"testing"
"github.com/harness/gitness/git"
"github.com/harness/gitness/types"
)
// nolint:gocognit // it's a unit test
func TestMigrator(t *testing.T) {
const (
repoUID = "not-important"
fileName = "blah" // file name is fixed across all the tests.
shaSrcOld = "old"
shaSrcNew = "new"
shaBaseOld = "base-old"
shaBaseNew = "base-new"
)
type position struct {
lineOld, spanOld, lineNew, spanNew int
mergeBaseSHA, sourceSHA string
outdated bool
}
tests := []struct {
name string
headers []git.HunkHeader
rebase bool
positions []position
expected []position
}{
{
name: "source:no-hunks",
headers: nil,
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
},
expected: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
},
},
{
name: "source:lines-added-before-two-comments",
headers: []git.HunkHeader{
{OldLine: 0, OldSpan: 0, NewLine: 10, NewSpan: 10},
},
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
},
expected: []position{
{lineOld: 30, spanOld: 1, lineNew: 40, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
{lineOld: 50, spanOld: 1, lineNew: 60, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
},
},
{
name: "source:lines-added-between-two-comments",
headers: []git.HunkHeader{
{OldLine: 40, OldSpan: 0, NewLine: 40, NewSpan: 40},
},
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
},
expected: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
{lineOld: 50, spanOld: 1, lineNew: 90, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
},
},
{
name: "source:lines-added-after-two-comments",
headers: []git.HunkHeader{
{OldLine: 60, OldSpan: 0, NewLine: 60, NewSpan: 200},
},
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
},
expected: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
},
},
{
name: "source:modified-second-comment",
headers: []git.HunkHeader{
{OldLine: 50, OldSpan: 1, NewLine: 50, NewSpan: 1},
},
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
{lineOld: 70, spanOld: 1, lineNew: 70, spanNew: 1},
},
expected: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcOld,
outdated: true},
{lineOld: 70, spanOld: 1, lineNew: 70, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
},
},
{
name: "source:modified-second-comment;also-removed-10-lines-at-1",
headers: []git.HunkHeader{
{OldLine: 1, OldSpan: 10, NewLine: 0, NewSpan: 0},
{OldLine: 50, OldSpan: 1, NewLine: 40, NewSpan: 1},
},
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
{lineOld: 70, spanOld: 1, lineNew: 70, spanNew: 1},
},
expected: []position{
{lineOld: 30, spanOld: 1, lineNew: 20, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcOld,
outdated: true},
{lineOld: 70, spanOld: 1, lineNew: 60, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
},
},
{
name: "source:modified-second-comment;also-added-10-lines-at-1",
headers: []git.HunkHeader{
{OldLine: 0, OldSpan: 0, NewLine: 1, NewSpan: 10},
{OldLine: 50, OldSpan: 1, NewLine: 60, NewSpan: 1},
},
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
{lineOld: 70, spanOld: 1, lineNew: 70, spanNew: 1},
},
expected: []position{
{lineOld: 30, spanOld: 1, lineNew: 40, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcOld,
outdated: true},
{lineOld: 70, spanOld: 1, lineNew: 80, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcNew},
},
},
{
name: "merge-base:no-hunks",
headers: nil,
rebase: true,
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
},
expected: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
},
},
{
name: "merge-base:lines-added-before-two-comments",
headers: []git.HunkHeader{
{OldLine: 0, OldSpan: 0, NewLine: 10, NewSpan: 10},
},
rebase: true,
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
},
expected: []position{
{lineOld: 40, spanOld: 1, lineNew: 30, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
{lineOld: 60, spanOld: 1, lineNew: 50, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
},
},
{
name: "merge-base:lines-added-between-two-comments",
headers: []git.HunkHeader{
{OldLine: 40, OldSpan: 0, NewLine: 40, NewSpan: 40},
},
rebase: true,
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
},
expected: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
{lineOld: 90, spanOld: 1, lineNew: 50, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
},
},
{
name: "merge-base:lines-added-after-two-comments",
headers: []git.HunkHeader{
{OldLine: 60, OldSpan: 0, NewLine: 60, NewSpan: 200},
},
rebase: true,
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
},
expected: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
},
},
{
name: "merge-base:modified-second-comment",
headers: []git.HunkHeader{
{OldLine: 50, OldSpan: 1, NewLine: 50, NewSpan: 1},
},
rebase: true,
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
{lineOld: 70, spanOld: 1, lineNew: 70, spanNew: 1},
},
expected: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcOld,
outdated: true},
{lineOld: 70, spanOld: 1, lineNew: 70, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
},
},
{
name: "merge-base:modified-second-comment;also-removed-10-lines-at-1",
headers: []git.HunkHeader{
{OldLine: 1, OldSpan: 10, NewLine: 0, NewSpan: 0},
{OldLine: 50, OldSpan: 1, NewLine: 40, NewSpan: 1},
},
rebase: true,
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
{lineOld: 70, spanOld: 1, lineNew: 70, spanNew: 1},
},
expected: []position{
{lineOld: 20, spanOld: 1, lineNew: 30, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcOld,
outdated: true},
{lineOld: 60, spanOld: 1, lineNew: 70, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
},
},
{
name: "merge-base:modified-second-comment;also-added-10-lines-at-1",
headers: []git.HunkHeader{
{OldLine: 0, OldSpan: 0, NewLine: 1, NewSpan: 10},
{OldLine: 50, OldSpan: 1, NewLine: 60, NewSpan: 1},
},
rebase: true,
positions: []position{
{lineOld: 30, spanOld: 1, lineNew: 30, spanNew: 1},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1},
{lineOld: 70, spanOld: 1, lineNew: 70, spanNew: 1},
},
expected: []position{
{lineOld: 40, spanOld: 1, lineNew: 30, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
{lineOld: 50, spanOld: 1, lineNew: 50, spanNew: 1, mergeBaseSHA: shaBaseOld, sourceSHA: shaSrcOld,
outdated: true},
{lineOld: 80, spanOld: 1, lineNew: 70, spanNew: 1, mergeBaseSHA: shaBaseNew, sourceSHA: shaSrcOld},
},
},
}
ctx := context.Background()
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
f := testHunkHeaderFetcher{
fileName: fileName,
headers: test.headers,
}
m := &Migrator{
hunkHeaderFetcher: f,
}
comments := make([]*types.CodeComment, len(test.positions))
for i, pos := range test.positions {
comments[i] = &types.CodeComment{
ID: int64(i),
CodeCommentFields: types.CodeCommentFields{
Outdated: pos.outdated,
MergeBaseSHA: shaBaseOld,
SourceSHA: shaSrcOld,
Path: fileName,
LineNew: pos.lineNew,
SpanNew: pos.spanNew,
LineOld: pos.lineOld,
SpanOld: pos.spanOld,
},
}
}
if test.rebase {
m.MigrateOld(ctx, repoUID, shaBaseNew, comments)
} else {
m.MigrateNew(ctx, repoUID, shaSrcNew, comments)
}
for i, expPos := range test.expected {
if expPos.outdated != comments[i].Outdated {
t.Errorf("comment=%d, outdated mismatch", i)
}
if want, got := expPos.lineNew, comments[i].LineNew; want != got {
t.Errorf("comment=%d, line new, want=%d got=%d", i, want, got)
}
if want, got := expPos.spanNew, comments[i].SpanNew; want != got {
t.Errorf("comment=%d, span new, want=%d got=%d", i, want, got)
}
if want, got := expPos.lineOld, comments[i].LineOld; want != got {
t.Errorf("comment=%d, line old, want=%d got=%d", i, want, got)
}
if want, got := expPos.spanOld, comments[i].SpanOld; want != got {
t.Errorf("comment=%d, span old, want=%d got=%d", i, want, got)
}
if want, got := expPos.mergeBaseSHA, comments[i].MergeBaseSHA; want != got {
t.Errorf("comment=%d, merge base sha, want=%s got=%s", i, want, got)
}
if want, got := expPos.sourceSHA, comments[i].SourceSHA; want != got {
t.Errorf("comment=%d, source sha, want=%s got=%s", i, want, got)
}
}
})
}
}
type testHunkHeaderFetcher struct {
fileName string
headers []git.HunkHeader
}
func (f testHunkHeaderFetcher) GetDiffHunkHeaders(
_ context.Context,
_ git.GetDiffHunkHeadersParams,
) (git.GetDiffHunkHeadersOutput, error) {
return git.GetDiffHunkHeadersOutput{
Files: []git.DiffFileHunkHeaders{
{
FileHeader: git.DiffFileHeader{
OldName: f.fileName,
NewName: f.fileName,
Extensions: nil,
},
HunkHeaders: f.headers,
},
},
}, nil
}
func TestProcessCodeComment(t *testing.T) {
// the code comment tested in this unit test spans five lines, from line 20 to line 24
const ccStart = 20
const ccEnd = 24
tests := []struct {
name string
hunk git.HunkHeader
expOutdated bool
expMoveDelta int
}{
// only added lines
{
name: "three-lines-added-before-far",
hunk: git.HunkHeader{OldLine: 10, OldSpan: 0, NewLine: 11, NewSpan: 3},
expOutdated: false, expMoveDelta: 3,
},
{
name: "three-lines-added-before-but-touching",
hunk: git.HunkHeader{OldLine: 19, OldSpan: 0, NewLine: 20, NewSpan: 3},
expOutdated: false, expMoveDelta: 3,
},
{
name: "three-lines-added-overlap-at-start",
hunk: git.HunkHeader{OldLine: 20, OldSpan: 0, NewLine: 21, NewSpan: 3},
expOutdated: true, expMoveDelta: 0,
},
{
name: "three-lines-added-inside",
hunk: git.HunkHeader{OldLine: 21, OldSpan: 0, NewLine: 22, NewSpan: 3},
expOutdated: true, expMoveDelta: 0,
},
{
name: "three-lines-added-overlap-at-end",
hunk: git.HunkHeader{OldLine: 23, OldSpan: 0, NewLine: 24, NewSpan: 3},
expOutdated: true, expMoveDelta: 0,
},
{
name: "three-lines-added-after-but-touching",
hunk: git.HunkHeader{OldLine: 24, OldSpan: 0, NewLine: 25, NewSpan: 3},
expOutdated: false, expMoveDelta: 0,
},
{
name: "three-lines-added-after-far",
hunk: git.HunkHeader{OldLine: 30, OldSpan: 0, NewLine: 31, NewSpan: 3},
expOutdated: false, expMoveDelta: 0,
},
// only removed lines
{
name: "three-lines-removed-before-far",
hunk: git.HunkHeader{OldLine: 10, OldSpan: 3, NewLine: 9, NewSpan: 0},
expOutdated: false, expMoveDelta: -3,
},
{
name: "three-lines-removed-before-but-touching",
hunk: git.HunkHeader{OldLine: 17, OldSpan: 3, NewLine: 16, NewSpan: 0},
expOutdated: false, expMoveDelta: -3,
},
{
name: "three-lines-removed-overlap-at-start",
hunk: git.HunkHeader{OldLine: 18, OldSpan: 3, NewLine: 17, NewSpan: 0},
expOutdated: true, expMoveDelta: 0,
},
{
name: "three-lines-removed-inside",
hunk: git.HunkHeader{OldLine: 21, OldSpan: 3, NewLine: 20, NewSpan: 0},
expOutdated: true, expMoveDelta: 0,
},
{
name: "three-lines-removed-overlap-at-end",
hunk: git.HunkHeader{OldLine: 24, OldSpan: 3, NewLine: 23, NewSpan: 0},
expOutdated: true, expMoveDelta: 0,
},
{
name: "three-lines-removed-after-but-touching",
hunk: git.HunkHeader{OldLine: 25, OldSpan: 3, NewLine: 24, NewSpan: 0},
expOutdated: false, expMoveDelta: 0,
},
{
name: "three-lines-removed-after-far",
hunk: git.HunkHeader{OldLine: 30, OldSpan: 3, NewLine: 29, NewSpan: 0},
expOutdated: false, expMoveDelta: 0,
},
// only changed lines
{
name: "three-lines-changed-before-far",
hunk: git.HunkHeader{OldLine: 10, OldSpan: 3, NewLine: 10, NewSpan: 3},
expOutdated: false, expMoveDelta: 0,
},
{
name: "three-lines-changed-before-but-touching",
hunk: git.HunkHeader{OldLine: 17, OldSpan: 3, NewLine: 17, NewSpan: 3},
expOutdated: false, expMoveDelta: 0,
},
{
name: "three-lines-changed-overlap-at-start",
hunk: git.HunkHeader{OldLine: 18, OldSpan: 3, NewLine: 18, NewSpan: 3},
expOutdated: true, expMoveDelta: 0,
},
{
name: "three-lines-changed-inside",
hunk: git.HunkHeader{OldLine: 21, OldSpan: 3, NewLine: 21, NewSpan: 3},
expOutdated: true, expMoveDelta: 0,
},
{
name: "three-lines-changed-overlap-at-end",
hunk: git.HunkHeader{OldLine: 24, OldSpan: 3, NewLine: 24, NewSpan: 3},
expOutdated: true, expMoveDelta: 0,
},
{
name: "three-lines-changed-after-but-touching",
hunk: git.HunkHeader{OldLine: 25, OldSpan: 3, NewLine: 25, NewSpan: 3},
expOutdated: false, expMoveDelta: 0,
},
{
name: "three-lines-changed-after-far",
hunk: git.HunkHeader{OldLine: 30, OldSpan: 3, NewLine: 30, NewSpan: 3},
expOutdated: false, expMoveDelta: 0,
},
// mixed tests
{
name: "two-lines-added-one-changed-just-before",
hunk: git.HunkHeader{OldLine: 19, OldSpan: 1, NewLine: 19, NewSpan: 3},
expOutdated: false, expMoveDelta: 2,
},
{
name: "two-lines-removed-one-added-just-after",
hunk: git.HunkHeader{OldLine: 25, OldSpan: 2, NewLine: 25, NewSpan: 1},
expOutdated: false, expMoveDelta: 0,
},
{
name: "twenty-lines-added-at-line-15",
hunk: git.HunkHeader{OldLine: 14, OldSpan: 0, NewLine: 15, NewSpan: 20},
expOutdated: false, expMoveDelta: 20,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
outdated, moveDelta := processCodeComment(ccStart, ccEnd, test.hunk)
if want, got := test.expOutdated, outdated; want != got {
t.Errorf("outdated mismatch; want=%t got=%t", want, got)
return
}
if want, got := test.expMoveDelta, moveDelta; want != got {
t.Errorf("moveDelta mismatch; want=%d got=%d", want, got)
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/codecomments/migrator.go | app/services/codecomments/migrator.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package codecomments
import (
"context"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/git"
gitenum "github.com/harness/gitness/git/enum"
"github.com/harness/gitness/types"
"github.com/rs/zerolog/log"
)
// Migrator is a utility used to migrate code comments after update of the pull request's source branch.
type Migrator struct {
hunkHeaderFetcher hunkHeaderFetcher
}
type hunkHeaderFetcher interface {
GetDiffHunkHeaders(context.Context, git.GetDiffHunkHeadersParams) (git.GetDiffHunkHeadersOutput, error)
}
// MigrateNew updates the "+" (the added lines) part of code comments
// after a new commit on the pull request's source branch.
// The parameter newSHA should contain the latest commit SHA of the pull request's source branch.
func (migrator *Migrator) MigrateNew(
ctx context.Context,
repoGitUID string,
newSHA string,
comments []*types.CodeComment,
) {
migrator.migrate(
ctx,
repoGitUID,
newSHA,
comments,
func(codeComment *types.CodeComment) string {
return codeComment.SourceSHA
},
func(codeComment *types.CodeComment, sha string) {
codeComment.SourceSHA = sha
},
func(codeComment *types.CodeComment) (int, int) {
return codeComment.LineNew, codeComment.LineNew + codeComment.SpanNew - 1
},
func(codeComment *types.CodeComment, line int) {
codeComment.LineNew += line
},
)
}
// MigrateOld updates the "-" (the removes lines) part of code comments
// after the pull request's change of the merge base commit.
func (migrator *Migrator) MigrateOld(
ctx context.Context,
repoGitUID string,
newSHA string,
comments []*types.CodeComment,
) {
migrator.migrate(
ctx,
repoGitUID,
newSHA,
comments,
func(codeComment *types.CodeComment) string {
return codeComment.MergeBaseSHA
},
func(codeComment *types.CodeComment, sha string) {
codeComment.MergeBaseSHA = sha
},
func(codeComment *types.CodeComment) (int, int) {
return codeComment.LineOld, codeComment.LineOld + codeComment.SpanOld - 1
},
func(codeComment *types.CodeComment, line int) {
codeComment.LineOld += line
},
)
}
//nolint:gocognit,funlen // refactor if needed
func (migrator *Migrator) migrate(
ctx context.Context,
repoGitUID string,
newSHA string,
comments []*types.CodeComment,
getSHA func(codeComment *types.CodeComment) string,
setSHA func(codeComment *types.CodeComment, sha string),
getCommentStartEnd func(codeComment *types.CodeComment) (int, int),
updateCommentLine func(codeComment *types.CodeComment, line int),
) {
if len(comments) == 0 {
return
}
commitMap, initialValuesMap := mapCodeComments(comments, getSHA)
for commentSHA, fileMap := range commitMap {
// get all hunk headers for the diff between the SHA that's stored in the comment and the new SHA.
diffSummary, errDiff := migrator.hunkHeaderFetcher.GetDiffHunkHeaders(ctx, git.GetDiffHunkHeadersParams{
ReadParams: git.ReadParams{
RepoUID: repoGitUID,
},
SourceCommitSHA: commentSHA,
TargetCommitSHA: newSHA,
})
if errors.AsStatus(errDiff) == errors.StatusNotFound {
// Handle the commit SHA not found error and mark all code comments as outdated.
for _, codeComments := range fileMap {
for _, codeComment := range codeComments {
codeComment.Outdated = true
}
}
continue
}
if errDiff != nil {
log.Ctx(ctx).Err(errDiff).
Msgf("failed to get git diff between comment's sha %s and the latest %s", commentSHA, newSHA)
continue
}
// Traverse all the changed files
for _, file := range diffSummary.Files {
var codeComments []*types.CodeComment
codeComments = fileMap[file.FileHeader.OldName]
// Handle file renames
if file.FileHeader.OldName != file.FileHeader.NewName {
if len(codeComments) == 0 {
// If the code comments are not found using the old name of the file, try with the new name.
codeComments = fileMap[file.FileHeader.NewName]
} else {
// Update the code comment's path to the new file name
for _, cc := range codeComments {
cc.Path = file.FileHeader.NewName
}
}
}
// Handle file delete
if _, isDeleted := file.FileHeader.Extensions[gitenum.DiffExtHeaderDeletedFileMode]; isDeleted {
for _, codeComment := range codeComments {
codeComment.Outdated = true
}
continue
}
// Handle new files - shouldn't happen because no code comments should exist for a non-existing file.
if _, isAdded := file.FileHeader.Extensions[gitenum.DiffExtHeaderNewFileMode]; isAdded {
for _, codeComment := range codeComments {
codeComment.Outdated = true
}
continue
}
for hunkIdx := len(file.HunkHeaders) - 1; hunkIdx >= 0; hunkIdx-- {
hunk := file.HunkHeaders[hunkIdx]
for _, cc := range codeComments {
if cc.Outdated {
continue
}
ccStart, ccEnd := getCommentStartEnd(cc)
outdated, moveDelta := processCodeComment(ccStart, ccEnd, hunk)
if outdated {
cc.CodeCommentFields = initialValuesMap[cc.ID] // revert the CC to the original values
cc.Outdated = true
continue
}
updateCommentLine(cc, moveDelta)
}
}
}
for _, codeComments := range fileMap {
for _, codeComment := range codeComments {
if codeComment.Outdated {
continue
}
setSHA(codeComment, newSHA)
}
}
}
}
// mapCodeComments groups code comments to maps, first by commit SHA and then by file name.
// It assumes the incoming list is already sorted.
func mapCodeComments(
comments []*types.CodeComment,
extractSHA func(*types.CodeComment) string,
) (map[string]map[string][]*types.CodeComment, map[int64]types.CodeCommentFields) {
commitMap := map[string]map[string][]*types.CodeComment{}
originalComments := make(map[int64]types.CodeCommentFields, len(comments))
for _, comment := range comments {
commitSHA := extractSHA(comment)
fileMap := commitMap[commitSHA]
if fileMap == nil {
fileMap = map[string][]*types.CodeComment{}
}
fileComments := fileMap[comment.Path]
fileComments = append(fileComments, comment)
fileMap[comment.Path] = fileComments
commitMap[commitSHA] = fileMap
originalComments[comment.ID] = comment.CodeCommentFields
}
return commitMap, originalComments
}
func processCodeComment(ccStart, ccEnd int, h git.HunkHeader) (outdated bool, moveDelta int) {
// A code comment is marked as outdated if:
// * The code lines covered by the code comment are changed
// (the range given by the OldLine/OldSpan overlaps the code comment's code range)
// * There are new lines inside the line range covered by the code comment, don't care about how many
// (the NewLine is between the CC start and CC end; the value of the NewSpan is unimportant).
outdated =
(h.OldSpan > 0 && ccEnd >= h.OldLine && ccStart <= h.OldLine+h.OldSpan-1) || // code comment's code is changed
(h.NewSpan > 0 && h.NewLine > ccStart && h.NewLine <= ccEnd) // lines are added inside the code comment
if outdated {
return // outdated comments aren't moved
}
if ccEnd <= h.OldLine {
return // the change described by the hunk header is below the code comment, so it doesn't affect it
}
moveDelta = h.NewSpan - h.OldSpan
return
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/keywordsearch/wire.go | app/services/keywordsearch/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keywordsearch
import (
"context"
gitevents "github.com/harness/gitness/app/events/git"
repoevents "github.com/harness/gitness/app/events/repo"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/events"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideLocalIndexSearcher,
ProvideIndexer,
ProvideSearcher,
ProvideService,
)
func ProvideService(ctx context.Context,
config Config,
gitReaderFactory *events.ReaderFactory[*gitevents.Reader],
repoReaderFactory *events.ReaderFactory[*repoevents.Reader],
repoStore store.RepoStore,
indexer Indexer,
) (*Service, error) {
return NewService(ctx,
config,
gitReaderFactory,
repoReaderFactory,
repoStore,
indexer)
}
func ProvideLocalIndexSearcher() *LocalIndexSearcher {
return NewLocalIndexSearcher()
}
func ProvideIndexer(l *LocalIndexSearcher) Indexer {
return l
}
func ProvideSearcher(l *LocalIndexSearcher) Searcher {
return l
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/keywordsearch/handler_branch.go | app/services/keywordsearch/handler_branch.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keywordsearch
import (
"context"
"fmt"
"strings"
gitevents "github.com/harness/gitness/app/events/git"
repoevents "github.com/harness/gitness/app/events/repo"
"github.com/harness/gitness/events"
)
func (s *Service) handleEventBranchCreated(ctx context.Context,
event *events.Event[*gitevents.BranchCreatedPayload]) error {
return s.indexRepo(ctx, event.Payload.RepoID, event.Payload.Ref)
}
func (s *Service) handleEventBranchUpdated(ctx context.Context,
event *events.Event[*gitevents.BranchUpdatedPayload]) error {
return s.indexRepo(ctx, event.Payload.RepoID, event.Payload.Ref)
}
func (s *Service) handleUpdateDefaultBranch(ctx context.Context,
event *events.Event[*repoevents.DefaultBranchUpdatedPayload]) error {
repo, err := s.repoStore.Find(ctx, event.Payload.RepoID)
if err != nil {
return fmt.Errorf("failed to find repository in db: %w", err)
}
err = s.indexer.Index(ctx, repo)
if err != nil {
return fmt.Errorf("index update failed for repo %d: %w", repo.ID, err)
}
return nil
}
func (s *Service) indexRepo(
ctx context.Context,
repoID int64,
ref string,
) error {
repo, err := s.repoStore.Find(ctx, repoID)
if err != nil {
return fmt.Errorf("failed to find repository in db: %w", err)
}
branch, err := getBranchFromRef(ref)
if err != nil {
return events.NewDiscardEventError(
fmt.Errorf("failed to parse branch name from ref: %w", err))
}
// we only maintain the index on the default branch
if repo.DefaultBranch != branch {
return nil
}
err = s.indexer.Index(ctx, repo)
if err != nil {
return fmt.Errorf("index update failed for repo %d: %w", repo.ID, err)
}
return nil
}
func getBranchFromRef(ref string) (string, error) {
const refPrefix = "refs/heads/"
if !strings.HasPrefix(ref, refPrefix) {
return "", fmt.Errorf("failed to get branch name from branch ref %s", ref)
}
branch := ref[len(refPrefix):]
if len(branch) == 0 {
return "", fmt.Errorf("got an empty branch name from branch ref %s", ref)
}
return branch, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/keywordsearch/index_searcher.go | app/services/keywordsearch/index_searcher.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keywordsearch
import (
"context"
"github.com/harness/gitness/types"
)
type Indexer interface {
Index(ctx context.Context, repo *types.Repository) error
}
type Searcher interface {
Search(ctx context.Context, repoIDs []int64, query string, enableRegex bool, maxResultCount int) (
types.SearchResult, error)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/keywordsearch/service.go | app/services/keywordsearch/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keywordsearch
import (
"context"
"errors"
"fmt"
"time"
gitevents "github.com/harness/gitness/app/events/git"
repoevents "github.com/harness/gitness/app/events/repo"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/events"
"github.com/harness/gitness/stream"
)
const groupGitEvents = "gitness:keywordsearch"
type Config struct {
EventReaderName string
Concurrency int
MaxRetries int
}
func (c *Config) Prepare() error {
if c == nil {
return errors.New("config is required")
}
if c.EventReaderName == "" {
return errors.New("config.EventReaderName is required")
}
if c.Concurrency < 1 {
return errors.New("config.Concurrency has to be a positive number")
}
if c.MaxRetries < 0 {
return errors.New("config.MaxRetries can't be negative")
}
return nil
}
// Service is responsible for indexing of repository for keyword search.
type Service struct {
config Config
indexer Indexer
repoStore store.RepoStore
}
func NewService(
ctx context.Context,
config Config,
gitReaderFactory *events.ReaderFactory[*gitevents.Reader],
repoReaderFactory *events.ReaderFactory[*repoevents.Reader],
repoStore store.RepoStore,
indexer Indexer,
) (*Service, error) {
if err := config.Prepare(); err != nil {
return nil, fmt.Errorf("provided codesearch service config is invalid: %w", err)
}
service := &Service{
config: config,
repoStore: repoStore,
indexer: indexer,
}
_, err := gitReaderFactory.Launch(ctx, groupGitEvents, config.EventReaderName,
func(r *gitevents.Reader) error {
const idleTimeout = 1 * time.Minute
r.Configure(
stream.WithConcurrency(config.Concurrency),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(config.MaxRetries),
))
// register events
_ = r.RegisterBranchCreated(service.handleEventBranchCreated)
_ = r.RegisterBranchUpdated(service.handleEventBranchUpdated)
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to launch git event reader for webhooks: %w", err)
}
_, err = repoReaderFactory.Launch(ctx, groupGitEvents, config.EventReaderName,
func(r *repoevents.Reader) error {
const idleTimeout = 1 * time.Minute
r.Configure(
stream.WithConcurrency(config.Concurrency),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(config.MaxRetries),
))
_ = r.RegisterDefaultBranchUpdated((service.handleUpdateDefaultBranch))
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to launch reader factory for repo git group: %w", err)
}
return service, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/keywordsearch/local_index_searcher.go | app/services/keywordsearch/local_index_searcher.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keywordsearch
import (
"context"
"github.com/harness/gitness/types"
)
type LocalIndexSearcher struct {
}
func NewLocalIndexSearcher() *LocalIndexSearcher {
return &LocalIndexSearcher{}
}
func (s *LocalIndexSearcher) Search(
_ context.Context,
_ []int64,
_ string,
_ bool,
_ int,
) (types.SearchResult, error) {
return types.SearchResult{}, nil
}
func (s *LocalIndexSearcher) Index(_ context.Context, _ *types.Repository) error {
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/importer/wire.go | app/services/importer/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"context"
"fmt"
repoevents "github.com/harness/gitness/app/events/repo"
"github.com/harness/gitness/app/services/keywordsearch"
"github.com/harness/gitness/app/services/publicaccess"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/services/settings"
"github.com/harness/gitness/app/sse"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/encrypt"
"github.com/harness/gitness/git"
"github.com/harness/gitness/job"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideImporter,
ProvideJobRepositoryImport,
ProvideJobRepositoryLink,
ProvideJobReferenceSync,
)
func ProvideConnectorService() ConnectorService {
return connectorServiceNoop{}
}
func ProvideImporter(
config *types.Config,
urlProvider url.Provider,
git git.Interface,
tx dbtx.Transactor,
repoStore store.RepoStore,
pipelineStore store.PipelineStore,
triggerStore store.TriggerStore,
repoFinder refcache.RepoFinder,
sseStreamer sse.Streamer,
indexer keywordsearch.Indexer,
publicAccess publicaccess.Service,
eventReporter *repoevents.Reporter,
auditService audit.Service,
settings *settings.Service,
) *Importer {
return NewImporter(
config.Git.DefaultBranch,
urlProvider,
git,
tx,
repoStore,
pipelineStore,
triggerStore,
repoFinder,
sseStreamer,
indexer,
publicAccess,
eventReporter,
auditService,
settings,
)
}
func ProvideJobRepositoryImport(
encrypter encrypt.Encrypter,
scheduler *job.Scheduler,
executor *job.Executor,
importer *Importer,
) (*JobRepository, error) {
j := &JobRepository{
encrypter: encrypter,
scheduler: scheduler,
importer: importer,
}
if err := executor.Register(jobTypeRepositoryImport, j); err != nil {
return nil, err
}
return j, nil
}
func ProvideJobRepositoryLink(
ctx context.Context,
config *types.Config,
scheduler *job.Scheduler,
executor *job.Executor,
urlProvider url.Provider,
git git.Interface,
connectorService ConnectorService,
repoStore store.RepoStore,
linkedRepoStore store.LinkedRepoStore,
repoFinder refcache.RepoFinder,
sseStreamer sse.Streamer,
indexer keywordsearch.Indexer,
eventReporter *repoevents.Reporter,
) (*JobRepositoryLink, error) {
j := NewJobRepositoryLink(
scheduler,
urlProvider,
git,
connectorService,
repoStore,
linkedRepoStore,
repoFinder,
sseStreamer,
indexer,
eventReporter,
)
if err := executor.Register(jobTypeRepositoryLink, j); err != nil {
return nil, err
}
if err := CreateAndRegisterJobSyncLinkedRepositories(
ctx,
scheduler,
executor,
config.Git.DefaultBranch,
urlProvider,
git,
repoFinder,
linkedRepoStore,
indexer,
connectorService,
); err != nil {
return nil, fmt.Errorf("unable to register job sync linked repositories: %w", err)
}
return j, nil
}
func ProvideJobReferenceSync(
config *types.Config,
urlProvider url.Provider,
git git.Interface,
repoStore store.RepoStore,
repoFinder refcache.RepoFinder,
scheduler *job.Scheduler,
executor *job.Executor,
indexer keywordsearch.Indexer,
eventReporter *repoevents.Reporter,
) (*JobReferenceSync, error) {
importer := &JobReferenceSync{
defaultBranch: config.Git.DefaultBranch,
urlProvider: urlProvider,
git: git,
repoStore: repoStore,
repoFinder: repoFinder,
scheduler: scheduler,
indexer: indexer,
eventReporter: eventReporter,
}
err := executor.Register(refSyncJobType, importer)
if err != nil {
return nil, err
}
return importer, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/importer/job_sync_linked_repositories.go | app/services/importer/job_sync_linked_repositories.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/bootstrap"
"github.com/harness/gitness/app/githook"
"github.com/harness/gitness/app/services/keywordsearch"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
gitnessurl "github.com/harness/gitness/app/url"
"github.com/harness/gitness/git"
"github.com/harness/gitness/git/api"
"github.com/harness/gitness/job"
"github.com/harness/gitness/types"
"github.com/rs/zerolog/log"
)
func CreateAndRegisterJobSyncLinkedRepositories(
ctx context.Context,
scheduler *job.Scheduler,
executor *job.Executor,
defaultBranch string,
urlProvider gitnessurl.Provider,
git git.Interface,
repoFinder refcache.RepoFinder,
linkedRepoStore store.LinkedRepoStore,
indexer keywordsearch.Indexer,
connectorService ConnectorService,
) error {
const (
jobMaxDuration = 3*time.Hour + 55*time.Minute
jobType = "gitness:jobs:sync_linked_repositories"
jobUID = jobType
jobCron = "45 */4 * * *" // every 4 hours at 45 minutes
)
err := scheduler.AddRecurring(
ctx,
jobUID,
jobType,
jobCron,
jobMaxDuration)
if err != nil {
return fmt.Errorf("failed to create recurring job linked repositories sync: %w", err)
}
handler := NewJobSyncLinkedRepositories(
defaultBranch,
urlProvider,
git,
repoFinder,
linkedRepoStore,
scheduler,
indexer,
connectorService,
)
err = executor.Register(jobType, handler)
if err != nil {
return err
}
return nil
}
func NewJobSyncLinkedRepositories(
defaultBranch string,
urlProvider gitnessurl.Provider,
git git.Interface,
repoFinder refcache.RepoFinder,
linkedRepoStore store.LinkedRepoStore,
scheduler *job.Scheduler,
indexer keywordsearch.Indexer,
connectorService ConnectorService,
) *JobSyncLinkedRepositories {
return &JobSyncLinkedRepositories{
defaultBranch: defaultBranch,
urlProvider: urlProvider,
git: git,
repoFinder: repoFinder,
linkedRepoStore: linkedRepoStore,
scheduler: scheduler,
indexer: indexer,
connectorService: connectorService,
}
}
type JobSyncLinkedRepositories struct {
defaultBranch string
urlProvider gitnessurl.Provider
git git.Interface
repoFinder refcache.RepoFinder
linkedRepoStore store.LinkedRepoStore
scheduler *job.Scheduler
indexer keywordsearch.Indexer
connectorService ConnectorService
}
var _ job.Handler = (*JobSyncLinkedRepositories)(nil)
type JobLinkedRepositoriesSyncInput struct {
SourceRepoID int64 `json:"source_repo_id"`
TargetRepoID int64 `json:"target_repo_id"`
RefSpecType RefSpecType `json:"ref_spec_type"`
SourceRef string `json:"source_ref"`
TargetRef string `json:"target_ref"`
}
// Handle executes synchronization of linked repositories.
func (r *JobSyncLinkedRepositories) Handle(
ctx context.Context,
_ string,
progress job.ProgressReporter,
) (string, error) {
systemPrincipal := bootstrap.NewSystemServiceSession().Principal
refSpec := []string{
api.BranchPrefix + "*:" + api.BranchPrefix + "*",
api.TagPrefix + "*:" + api.TagPrefix + "*",
}
const limit = 1000
linkedRepos, err := r.linkedRepoStore.List(ctx, limit)
if err != nil {
return "", fmt.Errorf("failed to list linked repositories: %w", err)
}
for linkedRepoIndex, linkedRepo := range linkedRepos {
log := log.Ctx(ctx).With().
Int64("repo.id", linkedRepo.RepoID).
Logger()
repo, err := r.repoFinder.FindByID(ctx, linkedRepo.RepoID)
if err != nil {
log.Warn().Err(err).Msg("failed to find repo")
continue
}
connector := ConnectorDef{
Path: linkedRepo.ConnectorPath,
Identifier: linkedRepo.ConnectorIdentifier,
}
accessInfo, err := r.connectorService.GetAccessInfo(ctx, connector)
if err != nil {
log.Warn().Err(err).Msg("failed to access info from connector")
continue
}
cloneURLWithAuth, err := accessInfo.URLWithCredentials()
if err != nil {
log.Warn().Err(err).Msg("failed to get clone URL from connector's access info")
continue
}
writeParams, err := r.createRPCWriteParams(ctx, systemPrincipal, repo.ID, repo.GitUID)
if err != nil {
return "", fmt.Errorf("failed to create rpc write params: %w", err)
}
_, err = r.git.SyncRepository(ctx, &git.SyncRepositoryParams{
WriteParams: writeParams,
Source: cloneURLWithAuth,
CreateIfNotExists: false,
RefSpecs: refSpec,
})
if err != nil {
return "", fmt.Errorf("failed to sync repository: %w", err)
}
_, err = r.linkedRepoStore.UpdateOptLock(ctx, &linkedRepo, func(l *types.LinkedRepo) error {
l.LastFullSync = time.Now().UnixMilli()
return nil
})
if err != nil {
log.Warn().Err(err).Msg("failed to update linked repo")
continue
}
log.Info().Msg("synced linked repository")
err = progress(100*linkedRepoIndex/len(linkedRepos), "")
if err != nil {
log.Warn().Err(err).Msg("failed to update job progress")
continue
}
}
return "", nil
}
func (r *JobSyncLinkedRepositories) createRPCWriteParams(
ctx context.Context,
principal types.Principal,
repoID int64,
repoGitUID string,
) (git.WriteParams, error) {
envVars, err := githook.GenerateEnvironmentVariables(
ctx,
r.urlProvider.GetInternalAPIURL(ctx),
repoID,
principal.ID,
true,
true,
)
if err != nil {
return git.WriteParams{}, fmt.Errorf("failed to generate git hook environment variables: %w", err)
}
return git.WriteParams{
RepoUID: repoGitUID,
Actor: git.Identity{
Name: principal.DisplayName,
Email: principal.Email,
},
EnvVars: envVars,
}, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.