repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/notification/reviewer_added.go | app/services/notification/reviewer_added.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notification
import (
"context"
"fmt"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types"
)
type ReviewerAddedPayload struct {
Base *BasePullReqPayload
Reviewer *types.PrincipalInfo
}
func (s *Service) notifyReviewerAdded(
ctx context.Context,
event *events.Event[*pullreqevents.ReviewerAddedPayload],
) error {
payload, recipients, err := s.processReviewerAddedEvent(ctx, event)
if err != nil {
return fmt.Errorf(
"failed to process %s event for pullReqID %d: %w",
pullreqevents.ReviewerAddedEvent,
event.Payload.PullReqID,
err,
)
}
err = s.notificationClient.SendReviewerAdded(ctx, recipients, payload)
if err != nil {
return fmt.Errorf(
"failed to send email for event %s for pullReqID %d: %w",
pullreqevents.ReviewerAddedEvent,
event.Payload.PullReqID,
err,
)
}
return nil
}
func (s *Service) processReviewerAddedEvent(
ctx context.Context,
event *events.Event[*pullreqevents.ReviewerAddedPayload],
) (*ReviewerAddedPayload, []*types.PrincipalInfo, error) {
base, err := s.getBasePayload(ctx, event.Payload.Base)
if err != nil {
return nil, nil, fmt.Errorf("failed to get base payload: %w", err)
}
reviewerPrincipal, err := s.principalInfoCache.Get(ctx, event.Payload.ReviewerID)
if err != nil {
return nil, nil, fmt.Errorf("failed to get reviewer from principalInfoCache: %w", err)
}
recipients := []*types.PrincipalInfo{
base.Author,
reviewerPrincipal,
}
return &ReviewerAddedPayload{
Base: base,
Reviewer: reviewerPrincipal,
}, recipients, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/notification/pullreq_state.go | app/services/notification/pullreq_state.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notification
import (
"context"
"fmt"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types"
)
type PullReqState string
const (
PullReqStateMerged PullReqState = "merged"
PullReqStateClosed PullReqState = "closed"
PullReqStateReopened PullReqState = "reopened"
)
type PullReqStateChangedPayload struct {
Base *BasePullReqPayload
ChangedBy *types.PrincipalInfo
State PullReqState
}
func (s *Service) notifyPullReqStateMerged(
ctx context.Context,
event *events.Event[*pullreqevents.MergedPayload],
) error {
payload, recipients, err := s.processPullReqStateChangedEvent(ctx, event.Payload.Base, PullReqStateMerged)
if err != nil {
return fmt.Errorf(
"failed to process %s event for pullReqID %d: %w",
pullreqevents.MergedEvent,
event.Payload.PullReqID,
err,
)
}
if err = s.notificationClient.SendPullReqStateChanged(
ctx,
recipients,
payload,
); err != nil {
return fmt.Errorf(
"failed to send email for event %s for pullReqID %d: %w",
pullreqevents.MergedEvent,
payload.Base.PullReq.ID,
err,
)
}
return nil
}
func (s *Service) notifyPullReqStateClosed(
ctx context.Context,
event *events.Event[*pullreqevents.ClosedPayload],
) error {
payload, recipients, err := s.processPullReqStateChangedEvent(ctx, event.Payload.Base, PullReqStateClosed)
if err != nil {
return fmt.Errorf(
"failed to process %s event for pullReqID %d: %w",
pullreqevents.ClosedEvent,
event.Payload.PullReqID,
err,
)
}
if err = s.notificationClient.SendPullReqStateChanged(
ctx,
recipients,
payload,
); err != nil {
return fmt.Errorf(
"failed to send email for event %s for pullReqID %d: %w",
pullreqevents.ClosedEvent,
payload.Base.PullReq.ID,
err,
)
}
return nil
}
func (s *Service) notifyPullReqStateReOpened(
ctx context.Context,
event *events.Event[*pullreqevents.ReopenedPayload],
) error {
payload, recipients, err := s.processPullReqStateChangedEvent(ctx, event.Payload.Base, PullReqStateReopened)
if err != nil {
return fmt.Errorf(
"failed to process %s event for pullReqID %d: %w",
pullreqevents.ReopenedEvent,
event.Payload.PullReqID,
err,
)
}
if err = s.notificationClient.SendPullReqStateChanged(
ctx,
recipients,
payload,
); err != nil {
return fmt.Errorf(
"failed to send email for event %s for pullReqID %d: %w",
pullreqevents.ReopenedEvent,
payload.Base.PullReq.ID,
err,
)
}
return nil
}
func (s *Service) processPullReqStateChangedEvent(
ctx context.Context,
baseEvent pullreqevents.Base,
state PullReqState,
) (*PullReqStateChangedPayload, []*types.PrincipalInfo, error) {
basePayload, err := s.getBasePayload(ctx, baseEvent)
if err != nil {
return nil, nil, fmt.Errorf("failed to get base payload: %w", err)
}
author, err := s.principalInfoCache.Get(ctx, basePayload.PullReq.CreatedBy)
if err != nil {
return nil, nil, fmt.Errorf(
"failed to get author from principalInfoCache for pullReqID %d: %w",
baseEvent.PullReqID,
err,
)
}
stateModifierPrincipal, err := s.principalInfoCache.Get(ctx, baseEvent.PrincipalID)
if err != nil {
return nil, nil,
fmt.Errorf(
"failed to get principal information about principal that changed PR state for pullReqID %d: %w",
baseEvent.PullReqID,
err,
)
}
reviewers, err := s.pullReqReviewersStore.List(ctx, baseEvent.PullReqID)
if err != nil {
return nil, nil, fmt.Errorf(
"failed to get reviewers from pullReqReviewersStore for pullReqID %d: %w",
baseEvent.PullReqID,
err,
)
}
recipients := make([]*types.PrincipalInfo, len(reviewers)+1)
for i := range reviewers {
recipients[i] = &reviewers[i].Reviewer
}
recipients[len(reviewers)] = author
return &PullReqStateChangedPayload{
Base: basePayload,
ChangedBy: stateModifierPrincipal,
State: state,
}, recipients, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/notification/client_interface.go | app/services/notification/client_interface.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notification
import (
"context"
"github.com/harness/gitness/types"
)
// Client is an interface for sending notifications, such as emails, Slack messages etc.
// It is implemented by MailClient and in future we can have other implementations for other channels like Slack etc.
type Client interface {
SendCommentPRAuthor(
ctx context.Context,
recipients []*types.PrincipalInfo,
payload *CommentPayload,
) error
SendCommentMentions(
ctx context.Context,
recipients []*types.PrincipalInfo,
payload *CommentPayload,
) error
SendCommentParticipants(
ctx context.Context,
recipients []*types.PrincipalInfo,
payload *CommentPayload,
) error
SendReviewerAdded(
ctx context.Context,
recipients []*types.PrincipalInfo,
payload *ReviewerAddedPayload,
) error
SendPullReqBranchUpdated(
ctx context.Context,
recipients []*types.PrincipalInfo,
payload *PullReqBranchUpdatedPayload,
) error
SendReviewSubmitted(
ctx context.Context,
recipients []*types.PrincipalInfo,
payload *ReviewSubmittedPayload,
) error
SendPullReqStateChanged(
ctx context.Context,
recipients []*types.PrincipalInfo,
payload *PullReqStateChangedPayload,
) error
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/notification/comment_created.go | app/services/notification/comment_created.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notification
import (
"context"
"fmt"
"strconv"
"strings"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types"
gitnessenum "github.com/harness/gitness/types/enum"
"golang.org/x/exp/maps"
)
type CommentPayload struct {
Base *BasePullReqPayload
Commenter *types.PrincipalInfo
Text string
}
func (s *Service) notifyCommentCreated(
ctx context.Context,
event *events.Event[*pullreqevents.CommentCreatedPayload],
) error {
payload, mentions, participants, author, err := s.processCommentCreatedEvent(ctx, event)
if err != nil {
return fmt.Errorf(
"failed to process %s event for pullReqID %d: %w",
pullreqevents.CommentCreatedEvent,
event.Payload.PullReqID,
err,
)
}
if len(mentions) > 0 {
err = s.notificationClient.SendCommentMentions(ctx, mentions, payload)
if err != nil {
return fmt.Errorf(
"failed to send notification to mentions for event %s for pullReqID %d: %w",
pullreqevents.CommentCreatedEvent,
event.Payload.PullReqID,
err,
)
}
}
if len(participants) > 0 {
err = s.notificationClient.SendCommentParticipants(ctx, participants, payload)
if err != nil {
return fmt.Errorf(
"failed to send notification to participants for event %s for pullReqID %d: %w",
pullreqevents.CommentCreatedEvent,
event.Payload.PullReqID,
err,
)
}
}
if author != nil {
err = s.notificationClient.SendCommentPRAuthor(
ctx,
[]*types.PrincipalInfo{author},
payload,
)
if err != nil {
return fmt.Errorf(
"failed to send notification to author for event %s for pullReqID %d: %w",
pullreqevents.CommentCreatedEvent,
event.Payload.PullReqID,
err,
)
}
}
return nil
}
func (s *Service) processCommentCreatedEvent(
ctx context.Context,
event *events.Event[*pullreqevents.CommentCreatedPayload],
) (
payload *CommentPayload,
mentions []*types.PrincipalInfo,
participants []*types.PrincipalInfo,
author *types.PrincipalInfo,
err error,
) {
base, err := s.getBasePayload(ctx, event.Payload.Base)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to get base payload: %w", err)
}
activity, err := s.pullReqActivityStore.Find(ctx, event.Payload.ActivityID)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to fetch activity from pullReqActivityStore: %w", err)
}
if activity.Type != gitnessenum.PullReqActivityTypeComment {
return nil, nil, nil, nil, fmt.Errorf("code-comments are not supported currently")
}
commenter, err := s.principalInfoView.Find(ctx, activity.CreatedBy)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to fetch commenter from principalInfoView: %w", err)
}
payload = &CommentPayload{
Base: base,
Commenter: commenter,
Text: activity.Text,
}
seen := make(map[int64]bool)
seen[commenter.ID] = true
// process mentions
mentionsMap, err := s.processMentions(ctx, activity.Metadata, seen)
if err != nil {
return nil, nil, nil, nil, err
}
for i, mention := range mentionsMap {
payload.Text = strings.ReplaceAll(
payload.Text, "@["+strconv.FormatInt(i, 10)+"]", mention.DisplayName,
)
}
// process participants
participants, err = s.processParticipants(
ctx, event.Payload.IsReply, seen, event.Payload.PullReqID, activity.Order)
if err != nil {
return nil, nil, nil, nil, err
}
// process author
if !seen[base.Author.ID] {
author = base.Author
}
return payload, maps.Values(mentionsMap), participants, author, nil
}
func (s *Service) processMentions(
ctx context.Context,
metadata *types.PullReqActivityMetadata,
seen map[int64]bool,
) (map[int64]*types.PrincipalInfo, error) {
if metadata == nil || metadata.Mentions == nil {
return map[int64]*types.PrincipalInfo{}, nil
}
var ids []int64
for _, id := range metadata.Mentions.IDs {
if !seen[id] {
ids = append(ids, id)
seen[id] = true
}
}
if len(ids) == 0 {
return map[int64]*types.PrincipalInfo{}, nil
}
mentions, err := s.principalInfoCache.Map(ctx, ids)
if err != nil {
return nil, fmt.Errorf("failed to fetch thread mentions from principalInfoView: %w", err)
}
return mentions, nil
}
func (s *Service) processParticipants(
ctx context.Context,
isReply bool,
seen map[int64]bool,
prID int64,
order int64,
) ([]*types.PrincipalInfo, error) {
var participants []*types.PrincipalInfo
if !isReply {
return participants, nil
}
authorIDs, err := s.pullReqActivityStore.ListAuthorIDs(
ctx,
prID,
order,
)
if err != nil {
return participants, fmt.Errorf("failed to fetch thread participant IDs from pullReqActivityStore: %w", err)
}
var participantIDs []int64
for _, authorID := range authorIDs {
if !seen[authorID] {
participantIDs = append(participantIDs, authorID)
seen[authorID] = true
}
}
if len(participantIDs) > 0 {
participants, err = s.principalInfoView.FindMany(ctx, participantIDs)
if err != nil {
return participants, fmt.Errorf("failed to fetch thread participants from principalInfoView: %w", err)
}
}
return participants, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/notification/review_submitted.go | app/services/notification/review_submitted.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notification
import (
"context"
"fmt"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
type ReviewSubmittedPayload struct {
Base *BasePullReqPayload
Author *types.PrincipalInfo
Reviewer *types.PrincipalInfo
Decision enum.PullReqReviewDecision
}
func (s *Service) notifyReviewSubmitted(
ctx context.Context,
event *events.Event[*pullreqevents.ReviewSubmittedPayload],
) error {
notificationPayload, recipients, err := s.processReviewSubmittedEvent(ctx, event)
if err != nil {
return fmt.Errorf(
"failed to process %s event for pullReqID %d: %w",
pullreqevents.ReviewSubmittedEvent,
event.Payload.PullReqID,
err,
)
}
err = s.notificationClient.SendReviewSubmitted(
ctx,
recipients,
notificationPayload,
)
if err != nil {
return fmt.Errorf(
"failed to send notification for event %s for pullReqID %d: %w",
pullreqevents.ReviewSubmittedEvent,
event.Payload.PullReqID,
err,
)
}
return nil
}
func (s *Service) processReviewSubmittedEvent(
ctx context.Context,
event *events.Event[*pullreqevents.ReviewSubmittedPayload],
) (*ReviewSubmittedPayload, []*types.PrincipalInfo, error) {
base, err := s.getBasePayload(ctx, event.Payload.Base)
if err != nil {
return nil, nil, fmt.Errorf("failed to get base payload: %w", err)
}
authorPrincipal, err := s.principalInfoCache.Get(ctx, base.PullReq.CreatedBy)
if err != nil {
return nil, nil, fmt.Errorf(
"failed to get author from principalInfoCache on %s event for pullReqID %d: %w",
pullreqevents.ReviewSubmittedEvent,
event.Payload.PullReqID,
err,
)
}
reviewerPrincipal, err := s.principalInfoCache.Get(ctx, event.Payload.ReviewerID)
if err != nil {
return nil, nil, fmt.Errorf(
"failed to get reviewer from principalInfoCache on event %s for pullReqID %d: %w",
pullreqevents.ReviewSubmittedEvent,
event.Payload.PullReqID,
err,
)
}
return &ReviewSubmittedPayload{
Base: base,
Author: authorPrincipal,
Decision: event.Payload.Decision,
Reviewer: reviewerPrincipal,
}, []*types.PrincipalInfo{authorPrincipal}, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/notification/mailer/mail_interface.go | app/services/notification/mailer/mail_interface.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mailer
import (
"context"
gomail "gopkg.in/mail.v2"
)
const (
mailContentType = "text/html"
)
type Mailer interface {
Send(ctx context.Context, mailPayload Payload) error
}
type Payload struct {
CCRecipients []string
ToRecipients []string
Subject string
Body string
ContentType string
RepoRef string
}
func ToGoMail(dto Payload) *gomail.Message {
mail := gomail.NewMessage()
mail.SetHeader("To", dto.ToRecipients...)
mail.SetHeader("Cc", dto.CCRecipients...)
mail.SetHeader("Subject", dto.Subject)
mail.SetBody(mailContentType, dto.Body)
return mail
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/notification/mailer/wire.go | app/services/notification/mailer/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mailer
import (
"github.com/harness/gitness/types"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideMailClient,
)
func ProvideMailClient(config *types.Config) Mailer {
return NewMailClient(
config.SMTP.Host,
config.SMTP.Port,
config.SMTP.Username,
config.SMTP.FromMail,
config.SMTP.Password,
config.SMTP.Insecure, // #nosec G402 (insecure skipVerify configuration)
)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/notification/mailer/mail.go | app/services/notification/mailer/mail.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mailer
import (
"context"
"crypto/tls"
gomail "gopkg.in/mail.v2"
)
type GoMailClient struct {
dialer *gomail.Dialer
fromMail string
}
func NewMailClient(
host string,
port int,
username string,
fromMail string,
password string,
insecure bool,
) GoMailClient {
d := gomail.NewDialer(host, port, username, password)
d.TLSConfig = &tls.Config{InsecureSkipVerify: insecure} // #nosec G402 (insecure TLS configuration)
return GoMailClient{
dialer: d,
fromMail: fromMail,
}
}
func (c GoMailClient) Send(_ context.Context, mailPayload Payload) error {
mail := ToGoMail(mailPayload)
mail.SetHeader("From", c.fromMail)
return c.dialer.DialAndSend(mail)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspacesettings/wire.go | app/services/gitspacesettings/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspacesettings
import (
"context"
"github.com/harness/gitness/app/store"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideService,
)
func ProvideService(
ctx context.Context,
gitspaceSettingsStore store.GitspaceSettingsStore,
) (Service, error) {
return NewSettingsService(
ctx,
gitspaceSettingsStore,
), nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspacesettings/service.go | app/services/gitspacesettings/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspacesettings
import (
"context"
"github.com/harness/gitness/app/gitspace/scm"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/types"
)
type Service interface {
GetGitspaceConfigSettings(
ctx context.Context,
spaceID int64,
criteria *types.GitspaceSettingsCriteria,
) (*types.GitspaceConfigSettings, error)
GetInfraProviderSettings(
ctx context.Context,
spaceID int64,
criteria *types.GitspaceSettingsCriteria,
) (*types.InfraProviderSettings, error)
ValidateGitspaceConfigCreate(
ctx context.Context,
resource types.InfraProviderResource,
gitspaceConfig types.GitspaceConfig,
) error
ValidateResolvedSCMDetails(
ctx context.Context,
gitspaceConfig types.GitspaceConfig,
scmResolvedDetails *scm.ResolvedDetails,
) *types.GitspaceError
}
// Existing SettingsService struct implements gitspacesettings.Service.
var _ Service = (*settingsService)(nil)
type settingsService struct {
gitspaceSettingsStore store.GitspaceSettingsStore
}
func (s *settingsService) GetInfraProviderSettings(
_ context.Context,
_ int64,
_ *types.GitspaceSettingsCriteria,
) (*types.InfraProviderSettings, error) {
return nil, nil // nolint: nilnil
}
func NewSettingsService(
_ context.Context,
store store.GitspaceSettingsStore,
) Service {
return &settingsService{
gitspaceSettingsStore: store,
}
}
func (s *settingsService) GetGitspaceConfigSettings(
_ context.Context,
_ int64,
_ *types.GitspaceSettingsCriteria,
) (*types.GitspaceConfigSettings, error) {
return nil, nil // nolint: nilnil
}
func (s *settingsService) ValidateGitspaceConfigCreate(
_ context.Context,
_ types.InfraProviderResource,
_ types.GitspaceConfig,
) error {
return nil
}
func (s *settingsService) ValidateResolvedSCMDetails(
_ context.Context,
_ types.GitspaceConfig,
_ *scm.ResolvedDetails,
) *types.GitspaceError {
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspaceevent/wire.go | app/services/gitspaceevent/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspaceevent
import (
"context"
gitspaceevents "github.com/harness/gitness/app/events/gitspace"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/events"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideService,
)
func ProvideService(ctx context.Context,
config *Config,
gitspaceEventReaderFactory *events.ReaderFactory[*gitspaceevents.Reader],
gitspaceEventStore store.GitspaceEventStore,
) (*Service, error) {
return NewService(
ctx,
config,
gitspaceEventReaderFactory,
gitspaceEventStore,
)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspaceevent/service.go | app/services/gitspaceevent/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspaceevent
import (
"context"
"errors"
"fmt"
"time"
gitspaceevents "github.com/harness/gitness/app/events/gitspace"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/events"
"github.com/harness/gitness/stream"
)
const groupGitspaceEvents = "gitness:gitspace"
type Config struct {
EventReaderName string
Concurrency int
MaxRetries int
TimeoutInMins int
}
func (c *Config) Sanitize() error {
if c == nil {
return errors.New("config is required")
}
if c.EventReaderName == "" {
return errors.New("config.EventReaderName is required")
}
if c.Concurrency < 1 {
return errors.New("config.Concurrency has to be a positive number")
}
if c.MaxRetries < 0 {
return errors.New("config.MaxRetries can't be negative")
}
return nil
}
type Service struct {
config *Config
gitspaceEventStore store.GitspaceEventStore
}
func NewService(
ctx context.Context,
config *Config,
gitspaceEventReaderFactory *events.ReaderFactory[*gitspaceevents.Reader],
gitspaceEventStore store.GitspaceEventStore,
) (*Service, error) {
if err := config.Sanitize(); err != nil {
return nil, fmt.Errorf("provided gitspace event service config is invalid: %w", err)
}
service := &Service{
config: config,
gitspaceEventStore: gitspaceEventStore,
}
_, err := gitspaceEventReaderFactory.Launch(ctx, groupGitspaceEvents, config.EventReaderName,
func(r *gitspaceevents.Reader) error {
var idleTimeout = time.Duration(config.TimeoutInMins) * time.Minute
r.Configure(
stream.WithConcurrency(config.Concurrency),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(config.MaxRetries),
))
// register gitspace config events
_ = r.RegisterGitspaceEvent(service.handleGitspaceEvent)
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to launch gitspace event reader: %w", err)
}
return service, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspaceevent/handler.go | app/services/gitspaceevent/handler.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspaceevent
import (
"context"
"fmt"
"time"
gitspaceevents "github.com/harness/gitness/app/events/gitspace"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types"
"github.com/rs/zerolog/log"
)
func (s *Service) handleGitspaceEvent(
ctx context.Context,
event *events.Event[*gitspaceevents.GitspaceEventPayload],
) error {
gitspaceEvent := &types.GitspaceEvent{
Event: event.Payload.EventType,
EntityID: event.Payload.EntityID,
QueryKey: event.Payload.QueryKey,
EntityType: event.Payload.EntityType,
Timestamp: event.Payload.Timestamp,
Created: time.Now().UnixMilli(),
}
log.Debug().Msgf("received gitspace event, event type: %s, entity type: %s, entity id: %d",
gitspaceEvent.Event,
gitspaceEvent.EntityType,
gitspaceEvent.EntityID,
)
err := s.gitspaceEventStore.Create(ctx, gitspaceEvent)
if err != nil {
return fmt.Errorf("failed to create gitspace event: %w", err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/label/label_pullreq.go | app/services/label/label_pullreq.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package label
import (
"context"
"fmt"
"slices"
"sort"
"time"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"golang.org/x/exp/maps"
)
type AssignToPullReqOut struct {
Label *types.Label
PullReqLabel *types.PullReqLabel
OldLabelValue *types.LabelValue
NewLabelValue *types.LabelValue
ActivityType enum.PullReqLabelActivityType
}
type WithValue struct {
Label *types.Label
Value *types.LabelValue
}
func (out *AssignToPullReqOut) ToLabelPullReqAssignmentInfo() *types.LabelPullReqAssignmentInfo {
var valueID *int64
var value *string
var valueColor *enum.LabelColor
if out.NewLabelValue != nil {
valueID = &out.NewLabelValue.ID
value = &out.NewLabelValue.Value
valueColor = &out.NewLabelValue.Color
}
assignmentInfo := &types.LabelPullReqAssignmentInfo{
PullReqID: out.PullReqLabel.PullReqID,
LabelID: out.Label.ID,
LabelKey: out.Label.Key,
LabelColor: out.Label.Color,
LabelScope: out.Label.Scope,
ValueCount: out.Label.ValueCount,
ValueID: valueID,
Value: value,
ValueColor: valueColor,
}
return assignmentInfo
}
func (s *Service) AssignToPullReq(
ctx context.Context,
principalID int64,
pullreqID int64,
repoID int64,
repoParentID int64,
in *types.PullReqLabelAssignInput,
) (*AssignToPullReqOut, error) {
label, err := s.labelStore.FindByID(ctx, in.LabelID)
if err != nil {
return nil, fmt.Errorf("failed to find label by id: %w", err)
}
if err := s.checkPullreqLabelInScope(ctx, repoParentID, repoID, label); err != nil {
return nil, err
}
oldPullreqLabel, err := s.pullReqLabelAssignmentStore.FindByLabelID(ctx, pullreqID, label.ID)
if err != nil && !errors.Is(err, store.ErrResourceNotFound) {
return nil, fmt.Errorf("failed to find label by id: %w", err)
}
// if the pullreq label did not have value
if oldPullreqLabel != nil && oldPullreqLabel.ValueID == nil &&
// and we don't assign it a new value
in.Value == "" && in.ValueID == nil {
return &AssignToPullReqOut{
Label: label,
PullReqLabel: oldPullreqLabel,
OldLabelValue: nil,
NewLabelValue: nil,
ActivityType: enum.LabelActivityNoop,
}, nil
}
var oldLabelValue *types.LabelValue
if oldPullreqLabel != nil && oldPullreqLabel.ValueID != nil {
oldLabelValue, err = s.labelValueStore.FindByID(ctx, *oldPullreqLabel.ValueID)
if err != nil {
return nil, fmt.Errorf("failed to find label value by id: %w", err)
}
}
// if the pullreq label had a value
if oldLabelValue != nil {
// and we reassign it the same value
if in.ValueID != nil && oldLabelValue.ID == *in.ValueID {
return &AssignToPullReqOut{
Label: label,
PullReqLabel: oldPullreqLabel,
OldLabelValue: oldLabelValue,
NewLabelValue: nil,
ActivityType: enum.LabelActivityNoop,
}, nil
}
// and we reassign it the same value
if in.Value != "" && oldLabelValue.Value == in.Value {
return &AssignToPullReqOut{
Label: label,
PullReqLabel: oldPullreqLabel,
OldLabelValue: oldLabelValue,
NewLabelValue: nil,
ActivityType: enum.LabelActivityNoop,
}, nil
}
}
var newLabelValue *types.LabelValue
if in.ValueID != nil {
newLabelValue, err = s.labelValueStore.FindByID(ctx, *in.ValueID)
if err != nil {
return nil, fmt.Errorf("failed to find label value by id: %w", err)
}
if label.ID != newLabelValue.LabelID {
return nil, errors.InvalidArgument("label value is not associated with label")
}
}
newPullreqLabel := newPullReqLabel(pullreqID, principalID, in)
if in.Value != "" {
newLabelValue, err = s.getOrDefineValue(ctx, principalID, label, in.Value)
if err != nil {
return nil, err
}
newPullreqLabel.ValueID = &newLabelValue.ID
}
err = s.pullReqLabelAssignmentStore.Assign(ctx, newPullreqLabel)
if err != nil {
return nil, fmt.Errorf("failed to assign label to pullreq: %w", err)
}
activityType := enum.LabelActivityAssign
if oldPullreqLabel != nil {
activityType = enum.LabelActivityReassign
}
return &AssignToPullReqOut{
Label: label,
PullReqLabel: newPullreqLabel,
OldLabelValue: oldLabelValue,
NewLabelValue: newLabelValue,
ActivityType: activityType,
}, nil
}
func (s *Service) PreparePullReqLabel(
ctx context.Context,
principalID int64,
repoID int64,
repoParentID int64,
in *types.PullReqLabelAssignInput,
) (WithValue, error) {
label, err := s.labelStore.FindByID(ctx, in.LabelID)
if err != nil {
return WithValue{}, fmt.Errorf("failed to find label by id: %w", err)
}
if err := s.checkPullreqLabelInScope(ctx, repoParentID, repoID, label); err != nil {
return WithValue{}, err
}
var value *types.LabelValue
if in.ValueID != nil {
value, err = s.labelValueStore.FindByID(ctx, *in.ValueID)
if err != nil {
return WithValue{}, fmt.Errorf("failed to find label value by id: %w", err)
}
if label.ID != value.LabelID {
return WithValue{}, errors.InvalidArgument("label value is not associated with label")
}
} else if in.Value != "" {
value, err = s.getOrDefineValue(ctx, principalID, label, in.Value)
if err != nil {
return WithValue{}, err
}
}
return WithValue{
Label: label,
Value: value,
}, nil
}
func (s *Service) AssignToPullReqOnCreation(
ctx context.Context,
pullreqID int64,
principalID int64,
labelWithValue *WithValue,
labelAssignInput *types.PullReqLabelAssignInput,
) (*AssignToPullReqOut, error) {
pullReqLabel := newPullReqLabel(pullreqID, principalID, labelAssignInput)
if labelWithValue.Value != nil {
pullReqLabel.ValueID = &labelWithValue.Value.ID
}
err := s.pullReqLabelAssignmentStore.Assign(ctx, pullReqLabel)
if err != nil {
return nil, fmt.Errorf("failed to assign label to pullreq: %w", err)
}
return &AssignToPullReqOut{
Label: labelWithValue.Label,
PullReqLabel: pullReqLabel,
NewLabelValue: labelWithValue.Value,
ActivityType: enum.LabelActivityAssign,
}, nil
}
func (s *Service) getOrDefineValue(
ctx context.Context,
principalID int64,
label *types.Label,
value string,
) (*types.LabelValue, error) {
if label.Type != enum.LabelTypeDynamic {
return nil, errors.InvalidArgument("label doesn't allow new value assignment")
}
labelValue, err := s.labelValueStore.FindByLabelID(ctx, label.ID, value)
if err == nil {
return labelValue, nil
}
if !errors.Is(err, store.ErrResourceNotFound) {
return nil, fmt.Errorf("failed to find label value: %w", err)
}
labelValue, err = s.DefineValue(
ctx,
principalID,
label.ID,
&types.DefineValueInput{
Value: value,
Color: label.Color,
},
)
if err != nil {
return nil, fmt.Errorf("failed to create label value: %w", err)
}
label.ValueCount++
return labelValue, nil
}
func (s *Service) UnassignFromPullReq(
ctx context.Context, repoID, repoParentID, pullreqID, labelID int64,
) (*types.Label, *types.LabelValue, error) {
label, err := s.labelStore.FindByID(ctx, labelID)
if err != nil {
return nil, nil, fmt.Errorf("failed to find label by id: %w", err)
}
if err := s.checkPullreqLabelInScope(ctx, repoParentID, repoID, label); err != nil {
return nil, nil, err
}
value, err := s.pullReqLabelAssignmentStore.FindValueByLabelID(ctx, pullreqID, labelID)
if err != nil && !errors.Is(err, store.ErrResourceNotFound) {
return nil, nil, fmt.Errorf("failed to find label value: %w", err)
}
return label, value, s.pullReqLabelAssignmentStore.Unassign(ctx, pullreqID, labelID)
}
func (s *Service) ListPullReqLabels(
ctx context.Context,
repo *types.RepositoryCore,
spaceID int64,
pullreqID int64,
filter *types.AssignableLabelFilter,
) (*types.ScopesLabels, int64, error) {
spaceIDs, err := s.spaceStore.GetAncestorIDs(ctx, spaceID)
if err != nil {
return nil, 0, fmt.Errorf("failed to get space hierarchy: %w", err)
}
spaces := make([]*types.SpaceCore, len(spaceIDs))
for i, id := range spaceIDs {
spaces[i], err = s.spaceFinder.FindByID(ctx, id)
if err != nil {
return nil, 0, fmt.Errorf("failed to find space by ID: %w", err)
}
}
scopeLabelsMap := make(map[int64]*types.ScopeData)
pullreqAssignments, err := s.pullReqLabelAssignmentStore.ListAssigned(ctx, pullreqID)
if err != nil {
return nil, 0, fmt.Errorf("failed to list labels assigned to pullreq: %w", err)
}
if !filter.Assignable {
sortedAssignments := maps.Values(pullreqAssignments)
sort.Slice(sortedAssignments, func(i, j int) bool {
if sortedAssignments[i].Key != sortedAssignments[j].Key {
return sortedAssignments[i].Key < sortedAssignments[j].Key
}
return sortedAssignments[i].Scope < sortedAssignments[j].Scope
})
populateScopeLabelsMap(sortedAssignments, scopeLabelsMap, repo, spaces)
return createScopeLabels(sortedAssignments, scopeLabelsMap), 0, nil
}
total, err := s.labelStore.CountInScopes(ctx, repo.ID, spaceIDs, &types.LabelFilter{
ListQueryFilter: types.ListQueryFilter{
Query: filter.Query,
},
})
if err != nil {
return nil, 0, fmt.Errorf("failed to count labels in scopes: %w", err)
}
labelInfos, err := s.labelStore.ListInfosInScopes(ctx, repo.ID, spaceIDs, filter)
if err != nil {
return nil, 0, fmt.Errorf("failed to list repo and spaces label infos: %w", err)
}
labelIDs := make([]int64, len(labelInfos))
for i, labelInfo := range labelInfos {
labelIDs[i] = labelInfo.ID
}
valueInfos, err := s.labelValueStore.ListInfosByLabelIDs(ctx, labelIDs)
if err != nil {
return nil, 0, fmt.Errorf("failed to list label value infos by label ids: %w", err)
}
allAssignments := make([]*types.LabelAssignment, len(labelInfos))
for i, labelInfo := range labelInfos {
assignment, ok := pullreqAssignments[labelInfo.ID]
if !ok {
assignment = &types.LabelAssignment{
LabelInfo: *labelInfo,
}
}
assignment.LabelInfo.Assigned = &ok
allAssignments[i] = assignment
allAssignments[i].Values = valueInfos[labelInfo.ID]
}
populateScopeLabelsMap(allAssignments, scopeLabelsMap, repo, spaces)
return createScopeLabels(allAssignments, scopeLabelsMap), total, nil
}
func (s *Service) Backfill(
ctx context.Context,
pullreq *types.PullReq,
) error {
pullreqAssignments, err := s.pullReqLabelAssignmentStore.ListAssignedByPullreqIDs(
ctx, []int64{pullreq.ID})
if err != nil {
return fmt.Errorf("failed to list labels assigned to pullreq: %w", err)
}
pullreq.Labels = pullreqAssignments[pullreq.ID]
return nil
}
func (s *Service) BackfillMany(
ctx context.Context,
pullreqs []*types.PullReq,
) error {
pullreqIDs := make([]int64, len(pullreqs))
for i, pr := range pullreqs {
pullreqIDs[i] = pr.ID
}
pullreqAssignments, err := s.pullReqLabelAssignmentStore.ListAssignedByPullreqIDs(
ctx, pullreqIDs)
if err != nil {
return fmt.Errorf("failed to list labels assigned to pullreq: %w", err)
}
for _, pullreq := range pullreqs {
pullreq.Labels = pullreqAssignments[pullreq.ID]
}
return nil
}
func populateScopeLabelsMap(
assignments []*types.LabelAssignment,
scopeLabelsMap map[int64]*types.ScopeData,
repo *types.RepositoryCore,
spaces []*types.SpaceCore,
) {
for _, assignment := range assignments {
_, ok := scopeLabelsMap[assignment.Scope]
if ok {
continue
}
scopeLabelsMap[assignment.Scope] = &types.ScopeData{Scope: assignment.Scope}
if assignment.Scope == 0 {
scopeLabelsMap[assignment.Scope].Repo = repo
} else {
for _, space := range spaces {
if space.ID == *assignment.SpaceID {
scopeLabelsMap[assignment.Scope].Space = space
}
}
}
}
}
func createScopeLabels(
assignments []*types.LabelAssignment,
scopeLabelsMap map[int64]*types.ScopeData,
) *types.ScopesLabels {
scopeData := make([]*types.ScopeData, len(scopeLabelsMap))
for i, scopeLabel := range maps.Values(scopeLabelsMap) {
scopeData[i] = scopeLabel
}
sort.Slice(scopeData, func(i, j int) bool {
return scopeData[i].Scope < scopeData[j].Scope
})
return &types.ScopesLabels{
LabelData: assignments,
ScopeData: scopeData,
}
}
func newPullReqLabel(
pullreqID int64,
principalID int64,
in *types.PullReqLabelAssignInput,
) *types.PullReqLabel {
now := time.Now().UnixMilli()
return &types.PullReqLabel{
PullReqID: pullreqID,
LabelID: in.LabelID,
ValueID: in.ValueID,
Created: now,
Updated: now,
CreatedBy: principalID,
UpdatedBy: principalID,
}
}
func (s *Service) checkPullreqLabelInScope(
ctx context.Context,
repoParentID, repoID int64,
label *types.Label,
) error {
if label.RepoID != nil && *label.RepoID != repoID {
return errors.InvalidArgumentf("label %d is not defined in current repo", label.ID)
}
if label.SpaceID != nil {
spaceIDs, err := s.spaceStore.GetAncestorIDs(ctx, repoParentID)
if err != nil {
return fmt.Errorf("failed to get parent space ids: %w", err)
}
if ok := slices.Contains(spaceIDs, *label.SpaceID); !ok {
return errors.InvalidArgumentf("label %d is not defined in current space tree path", label.ID)
}
}
return nil
}
func (s *Service) backfillPullreqCount(
ctx context.Context,
labels []*types.Label,
) error {
ids := make([]int64, len(labels))
for i, label := range labels {
ids[i] = label.ID
}
counts, err := s.pullReqLabelAssignmentStore.CountPullreqAssignments(ctx, ids)
if err != nil {
return fmt.Errorf("failed to count pullreq assignments: %w", err)
}
for _, label := range labels {
label.PullreqCount = counts[label.ID]
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/label/wire.go | app/services/label/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package label
import (
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database/dbtx"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideLabel,
)
func ProvideLabel(
tx dbtx.Transactor,
spaceStore store.SpaceStore,
labelStore store.LabelStore,
labelValueStore store.LabelValueStore,
pullReqLabelStore store.PullReqLabelAssignmentStore,
spaceFinder refcache.SpaceFinder,
) *Service {
return New(tx, spaceStore, labelStore, labelValueStore, pullReqLabelStore, spaceFinder)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/label/service.go | app/services/label/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package label
import (
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database/dbtx"
)
type Service struct {
tx dbtx.Transactor
spaceStore store.SpaceStore
labelStore store.LabelStore
labelValueStore store.LabelValueStore
pullReqLabelAssignmentStore store.PullReqLabelAssignmentStore
spaceFinder refcache.SpaceFinder
}
func New(
tx dbtx.Transactor,
spaceStore store.SpaceStore,
labelStore store.LabelStore,
labelValueStore store.LabelValueStore,
pullReqLabelAssignmentStore store.PullReqLabelAssignmentStore,
spaceFinder refcache.SpaceFinder,
) *Service {
return &Service{
tx: tx,
spaceStore: spaceStore,
labelStore: labelStore,
labelValueStore: labelValueStore,
pullReqLabelAssignmentStore: pullReqLabelAssignmentStore,
spaceFinder: spaceFinder,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/label/label_value.go | app/services/label/label_value.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package label
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/types"
)
func (s *Service) DefineValue(
ctx context.Context,
principalID int64,
labelID int64,
in *types.DefineValueInput,
) (*types.LabelValue, error) {
labelValue := newLabelValue(principalID, labelID, in)
err := s.tx.WithTx(ctx, func(ctx context.Context) error {
if err := s.labelValueStore.Define(ctx, labelValue); err != nil {
return err
}
if _, err := s.labelStore.IncrementValueCount(ctx, labelID, 1); err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return labelValue, nil
}
func applyValueChanges(
principalID int64,
value *types.LabelValue,
in *types.UpdateValueInput,
) (*types.LabelValue, bool) {
hasChanges := false
if value.UpdatedBy != principalID {
hasChanges = true
value.UpdatedBy = principalID
}
if in.Value != nil && value.Value != *in.Value {
hasChanges = true
value.Value = *in.Value
}
if in.Color != nil && value.Color != *in.Color {
hasChanges = true
value.Color = *in.Color
}
if hasChanges {
value.Updated = time.Now().UnixMilli()
}
return value, hasChanges
}
func (s *Service) UpdateValue(
ctx context.Context,
principalID int64,
labelID int64,
value string,
in *types.UpdateValueInput,
) (*types.LabelValue, error) {
labelValue, err := s.labelValueStore.FindByLabelID(ctx, labelID, value)
if err != nil {
return nil, fmt.Errorf("failed to find label value: %w", err)
}
return s.updateValue(ctx, principalID, labelValue, in)
}
func (s *Service) updateValue(
ctx context.Context,
principalID int64,
labelValue *types.LabelValue,
in *types.UpdateValueInput,
) (*types.LabelValue, error) {
labelValue, hasChanges := applyValueChanges(
principalID, labelValue, in)
if !hasChanges {
return labelValue, nil
}
if err := s.labelValueStore.Update(ctx, labelValue); err != nil {
return nil, fmt.Errorf("failed to update label value: %w", err)
}
return labelValue, nil
}
func (s *Service) ListValues(
ctx context.Context,
spaceID, repoID *int64,
labelKey string,
filter types.ListQueryFilter,
) ([]*types.LabelValue, int64, error) {
var count int64
var values []*types.LabelValue
err := s.tx.WithTx(ctx, func(ctx context.Context) error {
label, err := s.labelStore.Find(ctx, spaceID, repoID, labelKey)
if err != nil {
return fmt.Errorf("failed to find label: %w", err)
}
values, err = s.labelValueStore.List(ctx, label.ID, filter)
if err != nil {
return fmt.Errorf("failed to list label values: %w", err)
}
if filter.Page == 1 && len(values) < filter.Size {
count = int64(len(values))
return nil
}
count, err = s.labelValueStore.Count(ctx, label.ID, filter)
if err != nil {
return fmt.Errorf("failed to count label values: %w", err)
}
return nil
})
if err != nil {
return nil, 0, err
}
return values, count, nil
}
func (s *Service) DeleteValue(
ctx context.Context,
spaceID, repoID *int64,
labelKey string,
value string,
) error {
label, err := s.labelStore.Find(ctx, spaceID, repoID, labelKey)
if err != nil {
return err
}
err = s.tx.WithTx(ctx, func(ctx context.Context) error {
if err := s.labelValueStore.Delete(ctx, label.ID, value); err != nil {
return err
}
if _, err := s.labelStore.IncrementValueCount(ctx, label.ID, -1); err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
func newLabelValue(
principalID int64,
labelID int64,
in *types.DefineValueInput,
) *types.LabelValue {
now := time.Now().UnixMilli()
return &types.LabelValue{
LabelID: labelID,
Value: in.Value,
Color: in.Color,
Created: now,
Updated: now,
CreatedBy: principalID,
UpdatedBy: principalID,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/label/label.go | app/services/label/label.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package label
import (
"context"
"fmt"
"sort"
"time"
"github.com/harness/gitness/app/store/database"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/store"
"github.com/harness/gitness/types"
)
const labelScopeRepo = int64(0)
func (s *Service) Define(
ctx context.Context,
principalID int64,
spaceID, repoID *int64,
in *types.DefineLabelInput,
) (*types.Label, error) {
scope := labelScopeRepo
if spaceID != nil {
var err error
scope, err = s.spaceStore.GetTreeLevel(ctx, *spaceID)
if err != nil {
return nil, fmt.Errorf("failed to get space tree level: %w", err)
}
}
label := newLabel(principalID, spaceID, repoID, scope, in)
if err := s.labelStore.Define(ctx, label); err != nil {
return nil, err
}
return label, nil
}
func (s *Service) Update(
ctx context.Context,
principalID int64,
spaceID, repoID *int64,
key string,
in *types.UpdateLabelInput,
) (*types.Label, error) {
label, err := s.labelStore.Find(ctx, spaceID, repoID, key)
if err != nil {
return nil, fmt.Errorf("failed to find repo label: %w", err)
}
return s.update(ctx, principalID, label, in)
}
func (s *Service) update(
ctx context.Context,
principalID int64,
label *types.Label,
in *types.UpdateLabelInput,
) (*types.Label, error) {
label, hasChanges := applyChanges(principalID, label, in)
if !hasChanges {
return label, nil
}
err := s.labelStore.Update(ctx, label)
if err != nil {
return nil, fmt.Errorf("failed to update label: %w", err)
}
return label, nil
}
//nolint:gocognit
func (s *Service) Save(
ctx context.Context,
principalID int64,
spaceID, repoID *int64,
in *types.SaveInput,
) (*types.LabelWithValues, error) {
var label *types.Label
var valuesToReturn []*types.LabelValue
var err error
err = s.tx.WithTx(ctx, func(ctx context.Context) error {
label, err = s.labelStore.FindByID(ctx, in.Label.ID)
if err != nil { //nolint:nestif
if !errors.Is(err, store.ErrResourceNotFound) {
return err
}
label, err = s.Define(ctx, principalID, spaceID, repoID, &in.Label.DefineLabelInput)
if err != nil {
return err
}
} else {
if err := checkLabelInScope(spaceID, repoID, label); err != nil {
return err
}
label, err = s.update(ctx, principalID, label, &types.UpdateLabelInput{
Key: &in.Label.Key,
Type: &in.Label.Type,
Description: &in.Label.Description,
Color: &in.Label.Color,
})
if err != nil {
return err
}
}
existingValues, err := s.labelValueStore.List(
ctx,
label.ID,
types.ListQueryFilter{
Pagination: types.Pagination{
Size: database.MaxLabelValueSize,
},
},
)
if err != nil {
return err
}
existingValuesMap := make(map[int64]*types.LabelValue, len(existingValues))
for _, value := range existingValues {
existingValuesMap[value.ID] = value
}
var valuesToCreate []*types.SaveLabelValueInput
valuesToUpdate := make(map[int64]*types.SaveLabelValueInput)
var valuesToDelete []string
for _, value := range in.Values {
if _, ok := existingValuesMap[value.ID]; ok {
valuesToUpdate[value.ID] = value
} else {
valuesToCreate = append(valuesToCreate, value)
}
}
for _, value := range existingValues {
if _, ok := valuesToUpdate[value.ID]; !ok {
valuesToDelete = append(valuesToDelete, value.Value)
}
}
valuesToReturn = make([]*types.LabelValue, len(valuesToCreate)+len(valuesToUpdate))
for i, value := range valuesToCreate {
valuesToReturn[i] = newLabelValue(principalID, label.ID, &value.DefineValueInput)
if err = s.labelValueStore.Define(ctx, valuesToReturn[i]); err != nil {
if errors.Is(err, store.ErrDuplicate) {
return errors.Conflictf("value %s already exists", valuesToReturn[i].Value)
}
return err
}
}
i := len(valuesToCreate)
for _, value := range valuesToUpdate {
if valuesToReturn[i], err = s.updateValue(ctx, principalID, existingValuesMap[value.ID], &types.UpdateValueInput{
Value: &value.Value,
Color: &value.Color,
}); err != nil {
return err
}
i++
}
if err = s.labelValueStore.DeleteMany(ctx, label.ID, valuesToDelete); err != nil {
return err
}
if label.ValueCount, err = s.labelStore.IncrementValueCount(
ctx, label.ID, len(valuesToCreate)-len(valuesToDelete)); err != nil {
return err
}
sort.Slice(valuesToReturn, func(i, j int) bool {
return valuesToReturn[i].Value < valuesToReturn[j].Value
})
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to save label: %w", err)
}
return &types.LabelWithValues{
Label: *label,
Values: valuesToReturn,
}, nil
}
func (s *Service) Find(
ctx context.Context,
spaceID, repoID *int64,
key string,
) (*types.Label, error) {
return s.labelStore.Find(ctx, spaceID, repoID, key)
}
func (s *Service) FindWithValues(
ctx context.Context,
spaceID, repoID *int64,
key string,
) (*types.LabelWithValues, error) {
var label *types.Label
var values []*types.LabelValue
var err error
label, err = s.labelStore.Find(ctx, spaceID, repoID, key)
if err != nil {
return nil, fmt.Errorf("failed to find label: %w", err)
}
values, err = s.labelValueStore.List(
ctx,
label.ID,
types.ListQueryFilter{
Pagination: types.Pagination{
Size: database.MaxLabelValueSize,
},
},
)
if err != nil {
return nil, fmt.Errorf("failed to list label values: %w", err)
}
return &types.LabelWithValues{
Label: *label,
Values: values,
}, nil
}
func (s *Service) FindByID(ctx context.Context, labelID int64) (*types.Label, error) {
return s.labelStore.FindByID(ctx, labelID)
}
func (s *Service) List(
ctx context.Context,
spaceID, repoID *int64,
filter *types.LabelFilter,
) ([]*types.Label, int64, error) {
var labels []*types.Label
var count int64
var err error
if filter.Inherited {
labels, count, err = s.listInScopes(ctx, spaceID, repoID, filter)
} else {
labels, count, err = s.list(ctx, spaceID, repoID, filter)
}
if err != nil {
return nil, 0, err
}
if filter.IncludePullreqCount {
if err := s.backfillPullreqCount(ctx, labels); err != nil {
return nil, 0, err
}
}
return labels, count, nil
}
func (s *Service) list(
ctx context.Context,
spaceID, repoID *int64,
filter *types.LabelFilter,
) ([]*types.Label, int64, error) {
if repoID != nil {
total, err := s.labelStore.CountInRepo(ctx, *repoID, filter)
if err != nil {
return nil, 0, err
}
labels, err := s.labelStore.List(ctx, nil, repoID, filter)
if err != nil {
return nil, 0, err
}
return labels, total, nil
}
count, err := s.labelStore.CountInSpace(ctx, *spaceID, filter)
if err != nil {
return nil, 0, err
}
labels, err := s.labelStore.List(ctx, spaceID, nil, filter)
if err != nil {
return nil, 0, err
}
return labels, count, nil
}
func (s *Service) listInScopes(
ctx context.Context,
spaceID, repoID *int64,
filter *types.LabelFilter,
) ([]*types.Label, int64, error) {
var spaceIDs []int64
var repoIDVal int64
var err error
if repoID != nil {
spaceIDs, err = s.spaceStore.GetAncestorIDs(ctx, *spaceID)
if err != nil {
return nil, 0, err
}
repoIDVal = *repoID
} else {
spaceIDs, err = s.spaceStore.GetAncestorIDs(ctx, *spaceID)
if err != nil {
return nil, 0, err
}
}
total, err := s.labelStore.CountInScopes(ctx, repoIDVal, spaceIDs, filter)
if err != nil {
return nil, 0, err
}
labels, err := s.labelStore.ListInScopes(ctx, repoIDVal, spaceIDs, filter)
if err != nil {
return nil, 0, err
}
return labels, total, nil
}
func (s *Service) Delete(
ctx context.Context,
spaceID, repoID *int64,
key string,
) error {
return s.labelStore.Delete(ctx, spaceID, repoID, key)
}
func newLabel(
principalID int64,
spaceID, repoID *int64,
scope int64,
in *types.DefineLabelInput,
) *types.Label {
now := time.Now().UnixMilli()
return &types.Label{
RepoID: repoID,
SpaceID: spaceID,
Scope: scope,
Key: in.Key,
Type: in.Type,
Description: in.Description,
Color: in.Color,
Created: now,
Updated: now,
CreatedBy: principalID,
UpdatedBy: principalID,
}
}
func applyChanges(principalID int64, label *types.Label, in *types.UpdateLabelInput) (*types.Label, bool) {
hasChanges := false
if label.UpdatedBy != principalID {
hasChanges = true
label.UpdatedBy = principalID
}
if in.Key != nil && label.Key != *in.Key {
hasChanges = true
label.Key = *in.Key
}
if in.Description != nil && label.Description != *in.Description {
hasChanges = true
label.Description = *in.Description
}
if in.Color != nil && label.Color != *in.Color {
hasChanges = true
label.Color = *in.Color
}
if in.Type != nil && label.Type != *in.Type {
hasChanges = true
label.Type = *in.Type
}
if hasChanges {
label.Updated = time.Now().UnixMilli()
}
return label, hasChanges
}
func checkLabelInScope(
spaceID, repoID *int64,
label *types.Label,
) error {
if (repoID != nil && (label.RepoID == nil || *label.RepoID != *repoID)) ||
(spaceID != nil && (label.SpaceID == nil || *label.SpaceID != *spaceID)) {
return errors.InvalidArgument("label is not defined in requested scope")
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/rules/wire.go | app/services/rules/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
ruleevents "github.com/harness/gitness/app/events/rule"
"github.com/harness/gitness/app/services/instrument"
"github.com/harness/gitness/app/services/protection"
"github.com/harness/gitness/app/services/usergroup"
"github.com/harness/gitness/app/sse"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/store/database/dbtx"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideService,
)
func ProvideService(
tx dbtx.Transactor,
ruleStore store.RuleStore,
repoStore store.RepoStore,
spaceStore store.SpaceStore,
protectionManager *protection.Manager,
auditService audit.Service,
instrumentation instrument.Service,
principalInfoCache store.PrincipalInfoCache,
userGroupStore store.UserGroupStore,
userGroupService usergroup.Service,
eventReporter *ruleevents.Reporter,
sseStreamer sse.Streamer,
ruleValidator Validator,
repoIDCache store.RepoIDCache,
) *Service {
return NewService(
tx,
ruleStore,
repoStore,
spaceStore,
protectionManager,
auditService,
instrumentation,
principalInfoCache,
userGroupStore,
userGroupService,
eventReporter,
sseStreamer,
ruleValidator,
repoIDCache,
)
}
func ProvideValidator() Validator {
return validator{}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/rules/create.go | app/services/rules/create.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/harness/gitness/app/api/usererror"
ruleevents "github.com/harness/gitness/app/events/rule"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/app/services/instrument"
"github.com/harness/gitness/app/services/protection"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/check"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
const ruleScopeRepo = int64(0)
type CreateInput struct {
Type enum.RuleType `json:"type"`
State enum.RuleState `json:"state"`
// TODO [CODE-1363]: remove after identifier migration.
UID string `json:"uid" deprecated:"true"`
Identifier string `json:"identifier"`
Description string `json:"description"`
Pattern protection.Pattern `json:"pattern"`
RepoTarget protection.RepoTarget `json:"repo_target"`
Definition json.RawMessage `json:"definition"`
}
// sanitize validates and sanitizes the create rule input data.
func (in *CreateInput) sanitize() error {
// TODO [CODE-1363]: remove after identifier migration.
if in.Identifier == "" {
in.Identifier = in.UID
}
if err := check.Identifier(in.Identifier); err != nil {
return err
}
if err := in.Pattern.Validate(); err != nil {
return usererror.BadRequestf("Invalid pattern: %s", err)
}
if err := in.RepoTarget.Validate(); err != nil {
return usererror.BadRequestf("Invalid repo target: %s", err)
}
var ok bool
in.State, ok = in.State.Sanitize()
if !ok {
return usererror.BadRequest("Rule state is invalid")
}
if in.Type == "" {
in.Type = protection.TypeBranch
}
if len(in.Definition) == 0 {
return usererror.BadRequest("Rule definition missing")
}
return nil
}
// Create creates a new protection rule for a scope.
func (s *Service) Create(ctx context.Context,
principal *types.Principal,
parentType enum.RuleParent,
parentID int64,
scopeIdentifier string,
path string,
in *CreateInput,
) (*types.Rule, error) {
if err := in.sanitize(); err != nil {
return nil, err
}
var err error
in.Definition, err = s.protectionManager.SanitizeJSON(in.Type, in.Definition)
if err != nil {
return nil, errors.InvalidArgument("Invalid rule definition.")
}
scope := ruleScopeRepo
if parentType == enum.RuleParentSpace {
scope, err = s.spaceStore.GetTreeLevel(ctx, parentID)
if err != nil {
return nil, fmt.Errorf("failed to get parent tree level: %w", err)
}
}
now := time.Now().UnixMilli()
rule := &types.Rule{
CreatedBy: principal.ID,
Created: now,
Updated: now,
Type: in.Type,
State: in.State,
Identifier: in.Identifier,
Description: in.Description,
Pattern: in.Pattern.JSON(),
RepoTarget: in.RepoTarget.JSON(),
Definition: in.Definition,
Scope: scope,
CreatedByInfo: types.PrincipalInfo{},
}
spacePath := path
nameKey := audit.RepoName
switch parentType {
case enum.RuleParentRepo:
spacePath = paths.Parent(path)
rule.RepoID = &parentID
case enum.RuleParentSpace:
nameKey = audit.SpaceName
rule.SpaceID = &parentID
}
userMap, ruleUserIDs, userGroupMap, _, err := s.getRuleUserAndUserGroups(ctx, rule)
if err != nil {
return nil, fmt.Errorf("failed to get rule users and user groups: %w", err)
}
if err := s.ruleValidator.Validate(ctx, ruleUserIDs, userMap); err != nil {
return nil, fmt.Errorf("failed to validate users: %w", err)
}
err = s.ruleStore.Create(ctx, rule)
if err != nil {
return nil, fmt.Errorf("failed to create protection rule: %w", err)
}
err = s.auditService.Log(ctx,
*principal,
audit.NewResource(ruleTypeToResourceType(rule.Type), rule.Identifier, nameKey, scopeIdentifier),
audit.ActionCreated,
spacePath,
audit.WithNewObject(rule),
)
if err != nil {
log.Ctx(ctx).Warn().Msgf("failed to insert audit log for create rule operation: %s", err)
}
rule.Users = userMap
rule.UserGroups = userGroupMap
err = s.backfillRuleRepositories(ctx, rule)
if err != nil {
return nil, fmt.Errorf("failed to backfill rule repositories: %w", err)
}
var event instrument.Event
switch parentType {
case enum.RuleParentRepo:
event = instrumentEventRepo(
rule.ID, principal.ToPrincipalInfo(), parentID, scopeIdentifier, path,
)
case enum.RuleParentSpace:
event = instrumentEventSpace(
rule.ID, principal.ToPrincipalInfo(), parentID, scopeIdentifier, path,
)
}
err = s.instrumentation.Track(ctx, event)
if err != nil {
log.Ctx(ctx).Warn().Msgf("failed to insert instrumentation record for create branch rule operation: %s", err)
}
s.sendSSE(ctx, parentID, parentType, enum.SSETypeRuleCreated, rule)
s.eventReporter.Created(ctx, &ruleevents.CreatedPayload{
Base: ruleevents.Base{
RuleID: rule.ID,
SpaceID: rule.SpaceID,
RepoID: rule.RepoID,
PrincipalID: rule.CreatedBy,
},
})
return rule, nil
}
func instrumentEventRepo(
ruleID int64,
principalInfo *types.PrincipalInfo,
scopeID int64,
scopeIdentifier string,
path string,
) instrument.Event {
return instrument.Event{
Type: instrument.EventTypeCreateBranchRule,
Principal: principalInfo,
Path: path,
Properties: map[instrument.Property]any{
instrument.PropertyRepositoryID: scopeID,
instrument.PropertyRepositoryName: scopeIdentifier,
instrument.PropertyRuleID: ruleID,
},
}
}
func instrumentEventSpace(
ruleID int64,
principalInfo *types.PrincipalInfo,
scopeID int64,
scopeIdentifier string,
path string,
) instrument.Event {
return instrument.Event{
Type: instrument.EventTypeCreateBranchRule,
Principal: principalInfo,
Path: path,
Properties: map[instrument.Property]any{
instrument.PropertySpaceID: scopeID,
instrument.PropertySpaceName: scopeIdentifier,
instrument.PropertyRuleID: ruleID,
},
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/rules/service.go | app/services/rules/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
ruleevents "github.com/harness/gitness/app/events/rule"
"github.com/harness/gitness/app/services/instrument"
"github.com/harness/gitness/app/services/protection"
"github.com/harness/gitness/app/services/usergroup"
"github.com/harness/gitness/app/sse"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/store/database/dbtx"
)
// Service is responsible for processing rules.
type Service struct {
tx dbtx.Transactor
ruleStore store.RuleStore
repoStore store.RepoStore
spaceStore store.SpaceStore
protectionManager *protection.Manager
auditService audit.Service
instrumentation instrument.Service
principalInfoCache store.PrincipalInfoCache
userGroupStore store.UserGroupStore
userGroupService usergroup.Service
eventReporter *ruleevents.Reporter
ruleValidator Validator
repoIDCache store.RepoIDCache
sseStreamer sse.Streamer
}
func NewService(
tx dbtx.Transactor,
ruleStore store.RuleStore,
repoStore store.RepoStore,
spaceStore store.SpaceStore,
protectionManager *protection.Manager,
auditService audit.Service,
instrumentation instrument.Service,
principalInfoCache store.PrincipalInfoCache,
userGroupStore store.UserGroupStore,
userGroupService usergroup.Service,
eventReporter *ruleevents.Reporter,
sseStreamer sse.Streamer,
ruleValidator Validator,
repoIDCache store.RepoIDCache,
) *Service {
return &Service{
tx: tx,
ruleStore: ruleStore,
repoStore: repoStore,
spaceStore: spaceStore,
protectionManager: protectionManager,
auditService: auditService,
instrumentation: instrumentation,
principalInfoCache: principalInfoCache,
userGroupStore: userGroupStore,
userGroupService: userGroupService,
eventReporter: eventReporter,
sseStreamer: sseStreamer,
ruleValidator: ruleValidator,
repoIDCache: repoIDCache,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/rules/delete.go | app/services/rules/delete.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"fmt"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
// Delete deletes a protection rule by identifier.
func (s *Service) Delete(ctx context.Context,
principal *types.Principal,
parentType enum.RuleParent,
parentID int64,
scopeIdentifier string,
path string,
identifier string,
) error {
rule, err := s.ruleStore.FindByIdentifier(ctx, parentType, parentID, identifier)
if err != nil {
return fmt.Errorf("failed to find protection rule by identifier: %w", err)
}
err = s.ruleStore.Delete(ctx, rule.ID)
if err != nil {
return fmt.Errorf("failed to delete protection rule: %w", err)
}
nameKey := audit.RepoName
if parentType == enum.RuleParentSpace {
nameKey = audit.SpaceName
}
err = s.auditService.Log(ctx,
*principal,
audit.NewResource(
ruleTypeToResourceType(rule.Type),
rule.Identifier,
nameKey,
scopeIdentifier,
),
audit.ActionDeleted,
paths.Parent(path),
audit.WithOldObject(rule),
)
if err != nil {
log.Ctx(ctx).Warn().Msgf("failed to insert audit log for delete rule operation: %s", err)
}
s.sendSSE(ctx, parentID, parentType, enum.SSETypeRuleDeleted, rule)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/rules/validator.go | app/services/rules/validator.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"github.com/harness/gitness/app/api/usererror"
"github.com/harness/gitness/types"
)
type Validator interface {
Validate(context.Context, []int64, map[int64]*types.PrincipalInfo) error
}
type validator struct{}
func (v validator) Validate(
_ context.Context,
ruleUserIDs []int64,
userMap map[int64]*types.PrincipalInfo,
) error {
return ValidateUsers(ruleUserIDs, userMap)
}
func ValidateUsers(
ruleUserIDs []int64,
userMap map[int64]*types.PrincipalInfo,
) error {
var missing []int64
idSet := make(map[int64]struct{}, len(ruleUserIDs))
for _, id := range ruleUserIDs {
if _, seen := idSet[id]; seen {
continue // already checked
}
idSet[id] = struct{}{}
if _, exists := userMap[id]; !exists {
missing = append(missing, id)
}
}
if len(missing) > 0 {
return usererror.BadRequestf(
"unknown users in bypass and/or reviewer list: %v", missing,
)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/rules/find.go | app/services/rules/find.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"fmt"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
// Find returns the protection rule by identifier.
func (s *Service) Find(ctx context.Context,
parentType enum.RuleParent,
parentID int64,
identifier string,
) (*types.Rule, error) {
rule, err := s.ruleStore.FindByIdentifier(ctx, parentType, parentID, identifier)
if err != nil {
return nil, fmt.Errorf("failed to find protection rule by identifier: %w", err)
}
userMap, _, userGroupMap, _, err := s.getRuleUserAndUserGroups(ctx, rule)
if err != nil {
return nil, fmt.Errorf("failed to get rule users and user groups: %w", err)
}
rule.Users = userMap
rule.UserGroups = userGroupMap
err = s.backfillRuleRepositories(ctx, rule)
if err != nil {
return nil, fmt.Errorf("failed to backfill rule repositories: %w", err)
}
return rule, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/rules/list.go | app/services/rules/list.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"fmt"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
// List returns protection rules for a scope.
func (s *Service) List(ctx context.Context,
parentID int64,
parentType enum.RuleParent,
inherited bool,
filter *types.RuleFilter,
) ([]types.Rule, int64, error) {
var parents []types.RuleParentInfo
var err error
switch parentType {
case enum.RuleParentRepo:
parents, err = s.getParentInfoRepo(ctx, parentID, inherited)
if err != nil {
return nil, 0, err
}
case enum.RuleParentSpace:
parents, err = s.getParentInfoSpace(ctx, parentID, inherited)
if err != nil {
return nil, 0, err
}
default:
return nil, 0, fmt.Errorf("webhook type %s is not supported", parentType)
}
var list []types.Rule
var count int64
err = s.tx.WithTx(ctx, func(ctx context.Context) error {
list, err = s.ruleStore.List(ctx, parents, filter)
if err != nil {
return fmt.Errorf("failed to list protection rules: %w", err)
}
if filter.Page == 1 && len(list) < filter.Size {
count = int64(len(list))
return nil
}
count, err = s.ruleStore.Count(ctx, parents, filter)
if err != nil {
return fmt.Errorf("failed to count protection rules: %w", err)
}
return nil
}, dbtx.TxDefaultReadOnly)
if err != nil {
return nil, 0, err
}
for i := range list {
rule := &list[i]
rule.Users, _, rule.UserGroups, _, err = s.getRuleUserAndUserGroups(ctx, rule)
if err != nil {
return nil, 0, err
}
err = s.backfillRuleRepositories(ctx, rule)
if err != nil {
return nil, 0, fmt.Errorf("failed to backfill rule repositories: %w", err)
}
}
return list, count, nil
}
func (s *Service) getParentInfoRepo(
ctx context.Context,
repoID int64,
inherited bool,
) ([]types.RuleParentInfo, error) {
var parents []types.RuleParentInfo
parents = append(parents, types.RuleParentInfo{
ID: repoID,
Type: enum.RuleParentRepo,
})
if inherited {
repo, err := s.repoStore.Find(ctx, repoID)
if err != nil {
return nil, fmt.Errorf("failed to get repo: %w", err)
}
ids, err := s.spaceStore.GetAncestorIDs(ctx, repo.ParentID)
if err != nil {
return nil, fmt.Errorf("failed to get parent space ids: %w", err)
}
for _, id := range ids {
parents = append(parents, types.RuleParentInfo{
Type: enum.RuleParentSpace,
ID: id,
})
}
}
return parents, nil
}
func (s *Service) getParentInfoSpace(
ctx context.Context,
spaceID int64,
inherited bool,
) ([]types.RuleParentInfo, error) {
var parents []types.RuleParentInfo
if inherited {
ids, err := s.spaceStore.GetAncestorIDs(ctx, spaceID)
if err != nil {
return nil, fmt.Errorf("failed to get parent space ids: %w", err)
}
for _, id := range ids {
parents = append(parents, types.RuleParentInfo{
Type: enum.RuleParentSpace,
ID: id,
})
}
} else {
parents = append(parents, types.RuleParentInfo{
Type: enum.RuleParentSpace,
ID: spaceID,
})
}
return parents, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/rules/update.go | app/services/rules/update.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"encoding/json"
"fmt"
"github.com/harness/gitness/app/api/usererror"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/app/services/protection"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/check"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
type UpdateInput struct {
// TODO [CODE-1363]: remove after identifier migration.
UID *string `json:"uid" deprecated:"true"`
Identifier *string `json:"identifier"`
State *enum.RuleState `json:"state"`
Description *string `json:"description"`
Pattern *protection.Pattern `json:"pattern"`
RepoTarget *protection.RepoTarget `json:"repo_target"`
Definition *json.RawMessage `json:"definition"`
}
// sanitize validates and sanitizes the update rule input data.
func (in *UpdateInput) sanitize() error {
// TODO [CODE-1363]: remove after identifier migration.
if in.Identifier == nil {
in.Identifier = in.UID
}
if in.Identifier != nil {
if err := check.Identifier(*in.Identifier); err != nil {
return err
}
}
if in.State != nil {
state, ok := in.State.Sanitize()
if !ok {
return usererror.BadRequest("Rule state is invalid")
}
in.State = &state
}
if in.Pattern != nil {
if err := in.Pattern.Validate(); err != nil {
return usererror.BadRequestf("Invalid pattern: %s", err)
}
}
if in.RepoTarget != nil {
if err := in.RepoTarget.Validate(); err != nil {
return usererror.BadRequestf("Invalid repo target: %s", err)
}
}
if in.Definition != nil && len(*in.Definition) == 0 {
return usererror.BadRequest("Rule definition missing")
}
return nil
}
func (in *UpdateInput) isEmpty() bool {
return in.Identifier == nil && in.State == nil && in.Description == nil && in.Pattern == nil && in.Definition == nil
}
// Update updates an existing protection rule for a repository.
func (s *Service) Update(ctx context.Context,
principal *types.Principal,
parentType enum.RuleParent,
parentID int64,
scopeIdentifier string,
path string,
identifier string,
in *UpdateInput,
) (*types.Rule, error) {
if err := in.sanitize(); err != nil {
return nil, err
}
rule, err := s.ruleStore.FindByIdentifier(ctx, parentType, parentID, identifier)
if err != nil {
return nil, fmt.Errorf("failed to get a repository rule by its identifier: %w", err)
}
oldRule := rule.Clone()
if in.isEmpty() {
userMap, _, userGroupMap, _, err := s.getRuleUserAndUserGroups(ctx, rule)
if err != nil {
return nil, fmt.Errorf("failed to get rule users and user groups: %w", err)
}
rule.Users = userMap
rule.UserGroups = userGroupMap
return rule, nil
}
if in.Identifier != nil {
rule.Identifier = *in.Identifier
}
if in.State != nil {
rule.State = *in.State
}
if in.Description != nil {
rule.Description = *in.Description
}
if in.Pattern != nil {
rule.Pattern = in.Pattern.JSON()
}
if in.RepoTarget != nil {
rule.RepoTarget = in.RepoTarget.JSON()
}
if in.Definition != nil {
rule.Definition, err = s.protectionManager.SanitizeJSON(rule.Type, *in.Definition)
if err != nil {
return nil, errors.InvalidArgument("Invalid rule definition.")
}
}
userMap, ruleUserIDs, userGroupMap, _, err := s.getRuleUserAndUserGroups(ctx, rule)
if err != nil {
return nil, fmt.Errorf("failed to get rule users and user groups: %w", err)
}
if err := s.ruleValidator.Validate(ctx, ruleUserIDs, userMap); err != nil {
return nil, fmt.Errorf("failed to validate users: %w", err)
}
rule.Users = userMap
rule.UserGroups = userGroupMap
err = s.backfillRuleRepositories(ctx, rule)
if err != nil {
return nil, fmt.Errorf("failed to backfill rule repositories: %w", err)
}
if rule.IsEqual(&oldRule) {
return rule, nil
}
err = s.ruleStore.Update(ctx, rule)
if err != nil {
return nil, fmt.Errorf("failed to update repository-level protection rule: %w", err)
}
nameKey := audit.RepoName
if parentType == enum.RuleParentSpace {
nameKey = audit.SpaceName
}
err = s.auditService.Log(ctx,
*principal,
audit.NewResource(ruleTypeToResourceType(rule.Type), rule.Identifier, nameKey, scopeIdentifier),
audit.ActionUpdated,
paths.Parent(path),
audit.WithOldObject(oldRule),
audit.WithNewObject(rule),
)
if err != nil {
log.Ctx(ctx).Warn().Msgf("failed to insert audit log for update rule operation: %s", err)
}
s.sendSSE(ctx, parentID, parentType, enum.SSETypeRuleUpdated, rule)
return rule, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/rules/common.go | app/services/rules/common.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/harness/gitness/app/services/protection"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
// ruleTypeToResourceType maps a protection rule type to the correct audit.ResourceType.
func ruleTypeToResourceType(ruleType enum.RuleType) audit.ResourceType {
switch ruleType {
case protection.TypeBranch:
return audit.ResourceTypeBranchRule
case protection.TypeTag:
return audit.ResourceTypeTagRule
case protection.TypePush:
return audit.ResourceTypePushRule
}
return audit.ResourceTypeBranchRule
}
func (s *Service) getRuleUserAndUserGroups(
ctx context.Context,
rule *types.Rule,
) (
map[int64]*types.PrincipalInfo, []int64,
map[int64]*types.UserGroupInfo, []int64, //nolint:unparam
error,
) {
protection, err := s.parseRule(rule)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to parse rule: %w", err)
}
userMap, ruleUserIDs, err := s.getRuleUsers(ctx, protection)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to get rule users: %w", err)
}
userGroupMap, ruleGroupIDs, err := s.getRuleUserGroups(ctx, protection)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to get rule user groups: %w", err)
}
return userMap, ruleUserIDs, userGroupMap, ruleGroupIDs, nil
}
func (s *Service) getRuleUsers(
ctx context.Context,
protection protection.Protection,
) (map[int64]*types.PrincipalInfo, []int64, error) {
ruleUserIDs, err := protection.UserIDs()
if err != nil {
return nil, nil, fmt.Errorf("failed to get user IDs from rule: %w", err)
}
userMap, err := s.principalInfoCache.Map(ctx, ruleUserIDs)
if err != nil {
return nil, nil, fmt.Errorf("failed to get principal infos: %w", err)
}
return userMap, ruleUserIDs, nil
}
func (s *Service) getRuleUserGroups(
ctx context.Context,
protection protection.Protection,
) (map[int64]*types.UserGroupInfo, []int64, error) {
ruleGroupIDs, err := protection.UserGroupIDs()
if err != nil {
return nil, nil, fmt.Errorf("failed to get group IDs from rule: %w", err)
}
userGroupInfoMap := make(map[int64]*types.UserGroupInfo)
if len(ruleGroupIDs) == 0 {
return userGroupInfoMap, []int64{}, nil
}
groupMap, err := s.userGroupStore.Map(ctx, ruleGroupIDs)
if err != nil {
return nil, nil, fmt.Errorf("failed to get userGroup infos: %w", err)
}
for k, v := range groupMap {
userGroupInfoMap[k] = v.ToUserGroupInfo()
}
return userGroupInfoMap, ruleGroupIDs, nil
}
func (s *Service) parseRule(rule *types.Rule) (protection.Protection, error) {
protection, err := s.protectionManager.FromJSON(rule.Type, rule.Definition, false)
if err != nil {
return nil, fmt.Errorf("failed to parse json rule definition: %w", err)
}
return protection, nil
}
func (s *Service) sendSSE(
ctx context.Context,
parentID int64,
parentType enum.RuleParent,
sseType enum.SSEType,
rule *types.Rule,
) {
spaceID := parentID
if parentType == enum.RuleParentRepo {
repo, err := s.repoStore.Find(ctx, parentID)
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msg("failed to find repo")
return
}
spaceID = repo.ParentID
}
s.sseStreamer.Publish(ctx, spaceID, sseType, rule)
}
// backfillRuleRepositories populates the rule's Repositories field with
// the repositories specified in the rule's RepoTarget.
func (s *Service) backfillRuleRepositories(
ctx context.Context,
rule *types.Rule,
) error {
var repoTarget protection.RepoTarget
err := json.Unmarshal(rule.RepoTarget, &repoTarget)
if err != nil {
return fmt.Errorf("failed to unmarshal rule.RepoTarget: %w", err)
}
ids := repoTarget.Include.IDs
ids = append(ids, repoTarget.Exclude.IDs...)
// Deduplicate IDs
uniqueIDs := make(map[int64]struct{})
for _, id := range ids {
uniqueIDs[id] = struct{}{}
}
rule.Repositories = make(map[int64]*types.RepositoryCore, len(uniqueIDs))
for repoID := range uniqueIDs {
repo, err := s.repoIDCache.Get(ctx, repoID)
if err != nil {
if errors.Is(err, store.ErrResourceNotFound) {
continue
}
return fmt.Errorf("failed to get repository from cache: %w", err)
}
rule.Repositories[repoID] = repo
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/json.go | app/services/protection/json.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"bytes"
"encoding/json"
)
// ToJSON is utility function that converts types to a JSON message.
// It's used to sanitize protection definition data.
func ToJSON(v any) (json.RawMessage, error) {
buffer := bytes.NewBuffer(nil)
enc := json.NewEncoder(buffer)
enc.SetEscapeHTML(false)
if err := enc.Encode(v); err != nil {
return nil, err
}
data := buffer.Bytes()
data = bytes.TrimSpace(data)
return data, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/wire.go | app/services/protection/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"github.com/harness/gitness/app/store"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideManager,
)
func ProvideManager(ruleStore store.RuleStore) (*Manager, error) {
m := NewManager(ruleStore)
if err := m.Register(TypeBranch, func() Definition { return &Branch{} }); err != nil {
return nil, err
}
if err := m.Register(TypeTag, func() Definition { return &Tag{} }); err != nil {
return nil, err
}
if err := m.Register(TypePush, func() Definition { return &Push{} }); err != nil {
return nil, err
}
return m, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/set_common.go | app/services/protection/set_common.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"fmt"
"github.com/harness/gitness/types"
"golang.org/x/exp/maps"
)
func forEachRule(
manager *Manager,
rules []types.RuleInfoInternal,
fn func(r *types.RuleInfoInternal, p Protection) error,
) error {
for i := range rules {
r := rules[i]
protection, err := manager.FromJSON(r.Type, r.Definition, false)
if err != nil {
return fmt.Errorf("forEachRule: failed to parse protection definition ID=%d Type=%s: %w",
r.ID, r.Type, err)
}
err = fn(&r, protection)
if err != nil {
return fmt.Errorf("forEachRule: failed to process rule ID=%d Type=%s: %w",
r.ID, r.Type, err)
}
}
return nil
}
func collectIDs(
manager *Manager,
rules []types.RuleInfoInternal,
extract func(Protection) ([]int64, error),
) ([]int64, error) {
mapIDs := make(map[int64]bool)
err := forEachRule(manager, rules, func(_ *types.RuleInfoInternal, p Protection) error {
ids, err := extract(p)
if err != nil {
return fmt.Errorf("failed to extract IDs: %w", err)
}
for _, id := range ids {
mapIDs[id] = true
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to process each rule in ruleSet: %w", err)
}
return maps.Keys(mapIDs), nil
}
func refChangeVerifyFunc(
ctx context.Context,
in RefChangeVerifyInput,
violations *[]types.RuleViolations,
) func(r *types.RuleInfoInternal, p RefProtection, matched []string) error {
return func(r *types.RuleInfoInternal, p RefProtection, matched []string) error {
ruleIn := in
ruleIn.RefNames = matched
rVs, err := p.RefChangeVerify(ctx, ruleIn)
if err != nil {
return err
}
*violations = append(*violations, backFillRule(rVs, r.RuleInfo)...)
return nil
}
}
func forEachRuleMatchRefs(
manager *Manager,
rules []types.RuleInfoInternal,
repoID int64,
repoIdentifier string,
defaultRef string,
refNames []string,
fn func(r *types.RuleInfoInternal, p RefProtection, matched []string) error,
) error {
for i := range rules {
r := rules[i]
matchedRepo, err := matchesRepo(r.RepoTarget, repoID, repoIdentifier)
if err != nil {
return err
}
if !matchedRepo {
continue
}
matchedRefs, err := matchesRefs(r.Pattern, defaultRef, refNames...)
if err != nil {
return err
}
if len(matchedRefs) == 0 {
continue
}
protection, err := manager.FromJSON(r.Type, r.Definition, false)
if err != nil {
return fmt.Errorf(
"forEachRuleMatchRefs: failed to parse protection definition ID=%d Type=%s: %w",
r.ID, r.Type, err,
)
}
refProtection, ok := protection.(RefProtection)
if !ok { // theoretically, should never happen
return fmt.Errorf("unexpected type for protection: got %T, expected RefProtection", protection)
}
err = fn(&r, refProtection, matchedRefs)
if err != nil {
return fmt.Errorf(
"forEachRuleMatchRefs: failed to process rule ID=%d Type=%s: %w",
r.ID, r.Type, err,
)
}
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/rule_push.go | app/services/protection/rule_push.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"fmt"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
const TypePush enum.RuleType = "push"
// Push implements protection rules for the rule type TypePush.
type Push struct {
Bypass DefBypass `json:"bypass"`
Push DefPush `json:"push"`
}
var (
_ Definition = (*Push)(nil)
_ PushProtection = (*Push)(nil)
)
func (p *Push) PushVerify(
ctx context.Context,
in PushVerifyInput,
) (PushVerifyOutput, []types.RuleViolations, error) {
out, violations, err := p.Push.PushVerify(ctx, in)
if err != nil {
return PushVerifyOutput{}, nil, fmt.Errorf("file size limit verify error: %w", err)
}
bypassable := p.Bypass.matches(ctx, in.Actor, in.IsRepoOwner, in.ResolveUserGroupID)
for i := range violations {
violations[i].Bypassable = bypassable
violations[i].Bypassed = bypassable
}
return out, violations, nil
}
func (p *Push) Violations(
ctx context.Context,
in *PushViolationsInput,
) (PushViolationsOutput, error) {
var violations types.RuleViolations
if in.FindOversizeFilesOutput != nil {
for _, fileInfos := range in.FindOversizeFilesOutput.FileInfos {
if p.Push.FileSizeLimit > 0 && fileInfos.Size > p.Push.FileSizeLimit {
violations.Addf(codePushFileSizeLimit,
"Found file(s) exceeding the filesize limit of %d.",
p.Push.FileSizeLimit,
)
break
}
}
}
if p.Push.PrincipalCommitterMatch && in.PrincipalCommitterMatch &&
in.CommitterMismatchCount > 0 {
violations.Addf(codePushPrincipalCommitterMatch,
"Committer verification failed for total of %d commit(s).",
in.CommitterMismatchCount,
)
}
if p.Push.SecretScanningEnabled && in.SecretScanningEnabled &&
in.FoundSecretCount > 0 {
violations.Addf(codeSecretScanningEnabled,
"Found total of %d new secret(s)",
in.FoundSecretCount,
)
}
bypassable := p.Bypass.matches(ctx, in.Actor, in.IsRepoOwner, in.ResolveUserGroupID)
violations.Bypassable = bypassable
violations.Bypassed = bypassable
return PushViolationsOutput{
Violations: []types.RuleViolations{violations},
}, nil
}
func (p *Push) UserIDs() ([]int64, error) {
return p.Bypass.UserIDs, nil
}
func (p *Push) UserGroupIDs() ([]int64, error) {
return p.Bypass.UserGroupIDs, nil
}
func (p *Push) Sanitize() error {
if err := p.Bypass.Sanitize(); err != nil {
return fmt.Errorf("bypass: %w", err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/verify_lifecycle.go | app/services/protection/verify_lifecycle.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"github.com/harness/gitness/types"
)
type (
RefChangeVerifier interface {
RefChangeVerify(ctx context.Context, in RefChangeVerifyInput) ([]types.RuleViolations, error)
}
RefChangeVerifyInput struct {
ResolveUserGroupID func(ctx context.Context, userGroupIDs []int64) ([]int64, error)
Actor *types.Principal
AllowBypass bool
IsRepoOwner bool
Repo *types.RepositoryCore
RefAction RefAction
RefType RefType
RefNames []string
}
RefType int
RefAction int
DefLifecycle struct {
CreateForbidden bool `json:"create_forbidden,omitempty"`
DeleteForbidden bool `json:"delete_forbidden,omitempty"`
UpdateForceForbidden bool `json:"update_force_forbidden,omitempty"`
}
DefTagLifecycle struct {
DefLifecycle
}
DefBranchLifecycle struct {
DefLifecycle
UpdateForbidden bool `json:"update_forbidden,omitempty"`
}
)
const (
RefTypeRaw RefType = iota
RefTypeBranch
RefTypeTag
)
const (
RefActionCreate RefAction = iota
RefActionDelete
RefActionUpdate
RefActionUpdateForce
)
// ensures that the DefLifecycle type implements Sanitizer and RefChangeVerifier interfaces.
var (
_ Sanitizer = (*DefBranchLifecycle)(nil)
_ RefChangeVerifier = (*DefBranchLifecycle)(nil)
)
const (
codeLifecycleCreate = "lifecycle.create"
codeLifecycleDelete = "lifecycle.delete"
codeLifecycleUpdate = "lifecycle.update"
codeLifecycleUpdateForce = "lifecycle.update.force"
)
func (v *DefTagLifecycle) RefChangeVerify(
_ context.Context,
in RefChangeVerifyInput,
) ([]types.RuleViolations, error) {
var violations types.RuleViolations
//nolint:exhaustive
switch in.RefAction {
case RefActionCreate:
if v.CreateForbidden {
violations.Addf(codeLifecycleCreate,
"Creation of tag %q is not allowed.", in.RefNames[0])
}
case RefActionDelete:
if v.DeleteForbidden {
violations.Addf(codeLifecycleDelete,
"Deletion of tag %q is not allowed.", in.RefNames[0])
}
case RefActionUpdateForce:
if v.UpdateForceForbidden {
violations.Addf(codeLifecycleUpdateForce,
"Update of tag %q is not allowed.", in.RefNames[0])
}
}
if len(violations.Violations) > 0 {
return []types.RuleViolations{violations}, nil
}
return nil, nil
}
func (v *DefBranchLifecycle) RefChangeVerify(
_ context.Context,
in RefChangeVerifyInput,
) ([]types.RuleViolations, error) {
var violations types.RuleViolations
switch in.RefAction {
case RefActionCreate:
if v.CreateForbidden {
violations.Addf(codeLifecycleCreate,
"Creation of branch %q is not allowed.", in.RefNames[0])
}
case RefActionDelete:
if v.DeleteForbidden {
violations.Addf(codeLifecycleDelete,
"Deletion of branch %q is not allowed.", in.RefNames[0])
}
case RefActionUpdate:
if v.UpdateForbidden {
violations.Addf(codeLifecycleUpdate,
"Push to branch %q is not allowed. Please use pull requests.", in.RefNames[0])
}
case RefActionUpdateForce:
if v.UpdateForceForbidden || v.UpdateForbidden {
violations.Addf(codeLifecycleUpdateForce,
"Force push to branch %q is not allowed. Please use pull requests.", in.RefNames[0])
}
}
if len(violations.Violations) > 0 {
return []types.RuleViolations{violations}, nil
}
return nil, nil
}
func (*DefTagLifecycle) Sanitize() error {
return nil
}
func (*DefBranchLifecycle) Sanitize() error {
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/repo_target_test.go | app/services/protection/repo_target_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import "testing"
func TestRepoTarget_Matches(t *testing.T) {
tests := []struct {
name string
target RepoTarget
repoID int64
repoUID string
wantMatch bool
}{
{
name: "exclude multiple ids, id in list",
target: RepoTarget{
Exclude: RepoTargetFilter{IDs: []int64{1, 2, 3, 4, 5}},
},
repoID: 3,
repoUID: "whatever",
wantMatch: false,
},
{
name: "exclude multiple ids, id not in list",
target: RepoTarget{
Exclude: RepoTargetFilter{IDs: []int64{1, 2, 3, 4, 5}},
},
repoID: 9,
repoUID: "whatever",
wantMatch: true,
},
{
name: "include multiple ids, id in list",
target: RepoTarget{
Include: RepoTargetFilter{IDs: []int64{7, 8, 9, 10, 11}},
},
repoID: 10,
repoUID: "some-repo",
wantMatch: true,
},
{
name: "include multiple ids, id not in list",
target: RepoTarget{
Include: RepoTargetFilter{IDs: []int64{7, 8, 9, 10, 11}},
},
repoID: 6,
repoUID: "some-repo",
wantMatch: false,
},
{
name: "exclude ids and patterns, id match wins",
target: RepoTarget{
Exclude: RepoTargetFilter{
IDs: []int64{13, 14, 15},
Patterns: []string{"test-*"},
},
},
repoID: 14,
repoUID: "test-nothing",
wantMatch: false,
},
{
name: "exclude ids and patterns, pattern match wins",
target: RepoTarget{
Exclude: RepoTargetFilter{
IDs: []int64{13, 14, 15},
Patterns: []string{"test-*"},
},
},
repoID: 20,
repoUID: "test-nothing",
wantMatch: false,
},
{
name: "include multiple ids and patterns, id match wins",
target: RepoTarget{
Include: RepoTargetFilter{
IDs: []int64{21, 22, 23},
Patterns: []string{"cool-*"},
},
},
repoID: 21,
repoUID: "boring-repo",
wantMatch: true,
},
{
name: "include multiple ids and patterns, pattern match wins",
target: RepoTarget{
Include: RepoTargetFilter{
IDs: []int64{21, 22, 23},
Patterns: []string{"cool-*"},
},
},
repoID: 30,
repoUID: "cool-repo",
wantMatch: true,
},
{
name: "include multiple ids and patterns, match neither",
target: RepoTarget{
Include: RepoTargetFilter{
IDs: []int64{21, 22, 23},
Patterns: []string{"cool-*"},
},
},
repoID: 99,
repoUID: "boring-repo",
wantMatch: false,
},
{
name: "exclude and include multiple ids, exclude wins",
target: RepoTarget{
Exclude: RepoTargetFilter{IDs: []int64{1, 2, 3}},
Include: RepoTargetFilter{IDs: []int64{1, 2, 3, 4}},
},
repoID: 2,
repoUID: "match-any",
wantMatch: false,
},
{
name: "exclude and include multiple ids, include wins",
target: RepoTarget{
Exclude: RepoTargetFilter{IDs: []int64{5, 6}},
Include: RepoTargetFilter{IDs: []int64{7, 8, 9}},
},
repoID: 8,
repoUID: "match-any",
wantMatch: true,
},
{
name: "exclude and include multiple patterns, exclude wins",
target: RepoTarget{
Exclude: RepoTargetFilter{Patterns: []string{"foo-*", "bar-*"}},
Include: RepoTargetFilter{Patterns: []string{"foo-*", "baz-*"}},
},
repoID: 100,
repoUID: "bar-test",
wantMatch: false,
},
{
name: "exclude and include multiple patterns, include wins",
target: RepoTarget{
Exclude: RepoTargetFilter{Patterns: []string{"foo-*", "bar-*"}},
Include: RepoTargetFilter{Patterns: []string{"baz-*", "zoo-*"}},
},
repoID: 100,
repoUID: "zoo-special",
wantMatch: true,
},
{
name: "exclude and include patterns overlap, exclude wins",
target: RepoTarget{
Exclude: RepoTargetFilter{Patterns: []string{"common-*"}},
Include: RepoTargetFilter{Patterns: []string{"common-*", "rare-*"}},
},
repoID: 100,
repoUID: "common-42",
wantMatch: false,
},
{
name: "exclude and include patterns, include wins (not excluded)",
target: RepoTarget{
Exclude: RepoTargetFilter{Patterns: []string{"foo-*"}},
Include: RepoTargetFilter{Patterns: []string{"bar-*", "baz-*"}},
},
repoID: 100,
repoUID: "baz-42",
wantMatch: true,
},
{
name: "exclude and include patterns, neither matches",
target: RepoTarget{
Exclude: RepoTargetFilter{Patterns: []string{"foo-*"}},
Include: RepoTargetFilter{Patterns: []string{"bar-*", "baz-*"}},
},
repoID: 100,
repoUID: "other-42",
wantMatch: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.target.Matches(tt.repoID, tt.repoUID)
if got != tt.wantMatch {
t.Errorf("Matches(%d, %q) = %v; want %v", tt.repoID, tt.repoUID, got, tt.wantMatch)
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/set_branch_test.go | app/services/protection/set_branch_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"reflect"
"testing"
"github.com/harness/gitness/app/services/codeowners"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
var emptyRepoTarget = []byte(`{"include": {}, "exclude": {}}`)
// nolint:gocognit // it's a unit test
func TestRuleSet_MergeVerify(t *testing.T) {
tests := []struct {
name string
rules []types.RuleInfoInternal
input MergeVerifyInput
expOut MergeVerifyOutput
expViol []types.RuleViolations
}{
{
name: "empty-with-merge-method",
rules: []types.RuleInfoInternal{},
input: MergeVerifyInput{
Actor: &types.Principal{ID: 1},
Method: enum.MergeMethodRebase,
TargetRepo: &types.RepositoryCore{ID: 1, DefaultBranch: "main"},
PullReq: &types.PullReq{ID: 1, SourceBranch: "pr", TargetBranch: "main"},
},
expOut: MergeVerifyOutput{
DeleteSourceBranch: false,
AllowedMethods: enum.MergeMethods,
},
expViol: nil,
},
{
name: "empty-no-merge-method-specified",
rules: []types.RuleInfoInternal{},
input: MergeVerifyInput{
Actor: &types.Principal{ID: 1},
TargetRepo: &types.RepositoryCore{ID: 1, DefaultBranch: "main"},
PullReq: &types.PullReq{ID: 1, SourceBranch: "pr", TargetBranch: "main"},
},
expOut: MergeVerifyOutput{
DeleteSourceBranch: false,
AllowedMethods: enum.MergeMethods,
},
expViol: nil,
},
{
name: "two-rules-delete-source-branch",
rules: []types.RuleInfoInternal{
{
RuleInfo: types.RuleInfo{
SpacePath: "",
RepoPath: "space/repo",
ID: 1,
Identifier: "rule1",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Pattern: []byte(`{"default":true}`),
Definition: []byte(`{"pullreq":{"merge":{"strategies_allowed":["merge"],"delete_branch":true}}}`),
RepoTarget: emptyRepoTarget,
},
{
RuleInfo: types.RuleInfo{
SpacePath: "space",
RepoPath: "",
ID: 2,
Identifier: "rule2",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Pattern: []byte(`{"default":true}`),
Definition: []byte(`{"pullreq":{"approvals":{"require_minimum_count":1}}}`),
RepoTarget: emptyRepoTarget,
},
},
input: MergeVerifyInput{
Actor: &types.Principal{ID: 1},
TargetRepo: &types.RepositoryCore{ID: 1, DefaultBranch: "main"},
PullReq: &types.PullReq{ID: 1, SourceBranch: "pr", TargetBranch: "main"},
Method: enum.MergeMethodRebase,
},
expOut: MergeVerifyOutput{
DeleteSourceBranch: true,
MinimumRequiredApprovalsCount: 1,
AllowedMethods: []enum.MergeMethod{enum.MergeMethodMerge},
},
expViol: []types.RuleViolations{
{
Rule: types.RuleInfo{
SpacePath: "",
RepoPath: "space/repo",
ID: 1,
Identifier: "rule1",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Bypassed: false,
Violations: []types.Violation{
{Code: codePullReqMergeStrategiesAllowed},
},
},
{
Rule: types.RuleInfo{
SpacePath: "space",
RepoPath: "",
ID: 2,
Identifier: "rule2",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Bypassed: false,
Violations: []types.Violation{
{Code: codePullReqApprovalReqMinCount},
},
},
},
},
{
name: "two-rules-merge-strategies",
rules: []types.RuleInfoInternal{
{
RuleInfo: types.RuleInfo{
SpacePath: "",
RepoPath: "space/repo",
ID: 1,
Identifier: "rule1",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Pattern: []byte(`{"default":true}`),
Definition: []byte(`{"pullreq":{"merge":{"strategies_allowed":["merge","rebase"]}}}`),
RepoTarget: emptyRepoTarget,
},
{
RuleInfo: types.RuleInfo{
SpacePath: "space",
RepoPath: "",
ID: 2,
Identifier: "rule2",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Pattern: []byte(`{"default":true}`),
Definition: []byte(`{"pullreq":{"merge":{"strategies_allowed":["rebase"]}}}`),
RepoTarget: emptyRepoTarget,
},
},
input: MergeVerifyInput{
Actor: &types.Principal{ID: 1},
TargetRepo: &types.RepositoryCore{ID: 1, DefaultBranch: "main"},
PullReq: &types.PullReq{ID: 1, SourceBranch: "pr", TargetBranch: "main"},
},
expOut: MergeVerifyOutput{
DeleteSourceBranch: false,
AllowedMethods: []enum.MergeMethod{enum.MergeMethodRebase},
},
expViol: []types.RuleViolations{},
},
{
name: "combine-definition-values",
rules: []types.RuleInfoInternal{
{
RuleInfo: types.RuleInfo{
SpacePath: "",
RepoPath: "space/repo",
ID: 1,
Identifier: "rule1",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Pattern: []byte(`{"default":true}`),
Definition: []byte(`{
"pullreq": {
"approvals": {
"require_code_owners": false,
"require_minimum_count": 2,
"require_no_change_request": false,
"require_latest_commit": true
},
"comments":{
"require_resolve_all": false
},
"merge":{
"delete_branch": true,
"strategies_allowed": ["merge","rebase"]
}
}
}`),
RepoTarget: emptyRepoTarget,
},
{
RuleInfo: types.RuleInfo{
SpacePath: "space",
RepoPath: "",
ID: 2,
Identifier: "rule2",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Pattern: []byte(`{"default":true}`),
Definition: []byte(`{
"pullreq": {
"approvals": {
"require_code_owners": true,
"require_minimum_count": 3,
"require_no_change_request": true,
"require_latest_commit": true
},
"comments":{
"require_resolve_all": true
},
"merge":{
"delete_branch": true,
"strategies_allowed": ["rebase","squash"]
}
}
}`),
RepoTarget: emptyRepoTarget,
},
{
RuleInfo: types.RuleInfo{
SpacePath: "",
RepoPath: "space/repo",
ID: 3,
Identifier: "rule3",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Pattern: []byte(`{"default":true}`),
Definition: []byte(`{
"pullreq": {
"approvals": {
"require_code_owners": true,
"require_minimum_count": 2,
"require_no_change_request": false,
"require_latest_commit": false
},
"comments":{
"require_resolve_all": false
},
"merge":{
"delete_branch": false,
"strategies_allowed": ["rebase"]
}
}
}`),
RepoTarget: emptyRepoTarget,
},
},
input: MergeVerifyInput{
Actor: &types.Principal{ID: 1},
TargetRepo: &types.RepositoryCore{ID: 1, DefaultBranch: "main"},
PullReq: &types.PullReq{ID: 1, SourceBranch: "pr", TargetBranch: "main"},
CodeOwners: &codeowners.Evaluation{},
Reviewers: []*types.PullReqReviewer{},
},
expOut: MergeVerifyOutput{
AllowedMethods: []enum.MergeMethod{enum.MergeMethodRebase},
DeleteSourceBranch: true,
MinimumRequiredApprovalsCount: 2,
MinimumRequiredApprovalsCountLatest: 3,
RequiresCodeOwnersApproval: true,
RequiresCodeOwnersApprovalLatest: true,
RequiresCommentResolution: true,
RequiresNoChangeRequests: true,
},
expViol: []types.RuleViolations{
{
Rule: types.RuleInfo{
SpacePath: "",
RepoPath: "space/repo",
ID: 1,
Identifier: "rule1",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Bypassed: false,
Violations: []types.Violation{
{Code: codePullReqApprovalReqMinCountLatest},
},
},
{
Rule: types.RuleInfo{
SpacePath: "space",
RepoPath: "",
ID: 2,
Identifier: "rule2",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Bypassed: false,
Violations: []types.Violation{
{Code: codePullReqApprovalReqMinCountLatest},
},
},
{
Rule: types.RuleInfo{
SpacePath: "",
RepoPath: "space/repo",
ID: 3,
Identifier: "rule3",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Bypassed: false,
Violations: []types.Violation{
{Code: codePullReqApprovalReqMinCount},
},
},
},
},
}
ctx := context.Background()
m := NewManager(nil)
_ = m.Register(TypeBranch, func() Definition {
return &Branch{}
})
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
set := branchRuleSet{
rules: test.rules,
manager: m,
}
out, violations, err := set.MergeVerify(ctx, test.input)
if err != nil {
t.Errorf("got error: %s", err.Error())
}
if want, got := test.expOut, out; !reflect.DeepEqual(want, got) {
t.Errorf("output: want=%+v got=%+v", want, got)
}
if want, got := len(test.expViol), len(violations); want != got {
t.Errorf("violations count: want=%d got=%d", want, got)
return
}
for i := range test.expViol {
if want, got := test.expViol[i].Rule, violations[i].Rule; want != got {
t.Errorf("violation %d rule: want=%+v got=%+v", i, want, got)
}
if want, got := test.expViol[i].Bypassed, violations[i].Bypassed; want != got {
t.Errorf("violation %d bypassed: want=%t got=%t", i, want, got)
}
if want, got := len(test.expViol[i].Violations), len(violations[i].Violations); want != got {
t.Errorf("violation %d violations count: want=%d got=%d", i, want, got)
continue
}
for j := range test.expViol[i].Violations {
if want, got := test.expViol[i].Violations[j].Code, violations[i].Violations[j].Code; want != got {
t.Errorf("violation %d violation %d code: want=%s got=%s", i, j, want, got)
}
}
}
})
}
}
func TestRuleSet_RequiredChecks(t *testing.T) {
tests := []struct {
name string
rules []types.RuleInfoInternal
input RequiredChecksInput
expOut RequiredChecksOutput
}{
{
name: "empty",
rules: []types.RuleInfoInternal{},
input: RequiredChecksInput{
Actor: &types.Principal{ID: 1},
Repo: &types.RepositoryCore{ID: 1, DefaultBranch: "main"},
PullReq: &types.PullReq{ID: 1, SourceBranch: "pr", TargetBranch: "main"},
},
expOut: RequiredChecksOutput{
RequiredIdentifiers: map[string]struct{}{},
BypassableIdentifiers: map[string]struct{}{},
},
},
{
name: "two-rules",
rules: []types.RuleInfoInternal{
{
RuleInfo: types.RuleInfo{
SpacePath: "",
RepoPath: "space/repo",
ID: 1,
Identifier: "rule1",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Pattern: []byte(`{"default":true}`),
Definition: []byte(`{
"bypass":{"repo_owners":true},
"pullreq":{"status_checks":{"require_identifiers":["a", "b"]}}
}`),
RepoTarget: emptyRepoTarget,
},
{
RuleInfo: types.RuleInfo{
SpacePath: "space",
RepoPath: "",
ID: 2,
Identifier: "rule2",
Type: TypeBranch,
State: enum.RuleStateActive,
},
Pattern: []byte(`{"default":true}`),
Definition: []byte(`{"pullreq":{"status_checks":{"require_identifiers":["b","c"]}}}`),
RepoTarget: emptyRepoTarget,
},
},
input: RequiredChecksInput{
Actor: &types.Principal{ID: 1},
IsRepoOwner: true,
Repo: &types.RepositoryCore{ID: 1, DefaultBranch: "main"},
PullReq: &types.PullReq{ID: 1, SourceBranch: "pr", TargetBranch: "main"},
},
expOut: RequiredChecksOutput{
RequiredIdentifiers: map[string]struct{}{"b": {}, "c": {}},
BypassableIdentifiers: map[string]struct{}{"a": {}},
},
},
}
ctx := context.Background()
m := NewManager(nil)
_ = m.Register(TypeBranch, func() Definition {
return &Branch{}
})
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
set := branchRuleSet{
rules: test.rules,
manager: m,
}
out, err := set.RequiredChecks(ctx, test.input)
if err != nil {
t.Errorf("got error: %s", err.Error())
}
if want, got := test.expOut, out; !reflect.DeepEqual(want, got) {
t.Errorf("output: want=%+v got=%+v", want, got)
}
})
}
}
func TestIntersectSorted(t *testing.T) {
tests := []struct {
name string
a, b []int
exp []int
}{
{
name: "empty",
a: []int{},
b: []int{},
exp: []int{},
},
{
name: "remove last",
a: []int{3, 4},
b: []int{2, 3},
exp: []int{3},
},
{
name: "remove first",
a: []int{3, 4, 6},
b: []int{4, 5, 6},
exp: []int{4, 6},
},
{
name: "remove all",
a: []int{3, 4},
b: []int{},
exp: []int{},
},
{
name: "leave all",
a: []int{3, 4},
b: []int{1, 2, 3, 4, 5, 6},
exp: []int{3, 4},
},
{
name: "remove first and last",
a: []int{3, 4, 4, 4, 5},
b: []int{4, 6},
exp: []int{4, 4, 4},
},
{
name: "remove duplicated",
a: []int{3, 4},
b: []int{3, 3, 3, 5, 5},
exp: []int{3},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if want, got := test.exp, intersectSorted(test.a, test.b); !reflect.DeepEqual(want, got) {
t.Errorf("want=%v got=%v", want, got)
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/rule_branch_test.go | app/services/protection/rule_branch_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"reflect"
"testing"
"github.com/harness/gitness/app/services/codeowners"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
// nolint:gocognit // it's a unit test
func TestBranch_MergeVerify(t *testing.T) {
user := &types.Principal{ID: 42}
admin := &types.Principal{ID: 66, Admin: true}
tests := []struct {
name string
branch Branch
in MergeVerifyInput
expOut MergeVerifyOutput
expVs []types.RuleViolations
}{
{
name: "empty",
branch: Branch{},
in: MergeVerifyInput{
Actor: user,
},
expOut: MergeVerifyOutput{
DeleteSourceBranch: false,
AllowedMethods: enum.MergeMethods,
},
expVs: []types.RuleViolations{},
},
{
name: "admin-no-owner",
branch: Branch{
Bypass: DefBypass{},
PullReq: DefPullReq{
StatusChecks: DefStatusChecks{RequireIdentifiers: []string{"abc"}},
Comments: DefComments{RequireResolveAll: true},
Merge: DefMerge{DeleteBranch: true},
},
},
in: MergeVerifyInput{
Actor: admin,
IsRepoOwner: false,
AllowBypass: true,
PullReq: &types.PullReq{UnresolvedCount: 1},
},
expOut: MergeVerifyOutput{
DeleteSourceBranch: true,
AllowedMethods: enum.MergeMethods,
RequiresCommentResolution: true,
},
expVs: []types.RuleViolations{
{
Bypassable: false,
Bypassed: false,
Violations: []types.Violation{
{Code: codePullReqCommentsReqResolveAll},
{Code: codePullReqStatusChecksReqIdentifiers},
},
},
},
},
{
name: "user-bypass",
branch: Branch{
Bypass: DefBypass{UserIDs: []int64{user.ID}},
PullReq: DefPullReq{
StatusChecks: DefStatusChecks{RequireIdentifiers: []string{"abc"}},
Comments: DefComments{RequireResolveAll: true},
Merge: DefMerge{DeleteBranch: true},
},
},
in: MergeVerifyInput{
Actor: user,
AllowBypass: true,
PullReq: &types.PullReq{UnresolvedCount: 1},
},
expOut: MergeVerifyOutput{
DeleteSourceBranch: true,
AllowedMethods: enum.MergeMethods,
RequiresCommentResolution: true,
},
expVs: []types.RuleViolations{
{
Bypassable: true,
Bypassed: true,
Violations: []types.Violation{
{Code: codePullReqCommentsReqResolveAll},
{Code: codePullReqStatusChecksReqIdentifiers},
},
},
},
},
{
name: "user-no-bypass",
branch: Branch{
PullReq: DefPullReq{
StatusChecks: DefStatusChecks{RequireIdentifiers: []string{"abc"}},
Comments: DefComments{RequireResolveAll: true},
Merge: DefMerge{DeleteBranch: true},
},
},
in: MergeVerifyInput{
Actor: user,
AllowBypass: true,
PullReq: &types.PullReq{UnresolvedCount: 1},
},
expOut: MergeVerifyOutput{
DeleteSourceBranch: true,
AllowedMethods: enum.MergeMethods,
RequiresCommentResolution: true,
},
expVs: []types.RuleViolations{
{
Bypassable: false,
Bypassed: false,
Violations: []types.Violation{
{Code: codePullReqCommentsReqResolveAll},
{Code: codePullReqStatusChecksReqIdentifiers},
},
},
},
},
{
name: "merge-methods",
branch: Branch{
Bypass: DefBypass{},
PullReq: DefPullReq{
StatusChecks: DefStatusChecks{},
Comments: DefComments{},
Merge: DefMerge{
StrategiesAllowed: []enum.MergeMethod{enum.MergeMethodRebase, enum.MergeMethodSquash},
DeleteBranch: false,
},
},
},
in: MergeVerifyInput{
Actor: user,
},
expOut: MergeVerifyOutput{
DeleteSourceBranch: false,
AllowedMethods: []enum.MergeMethod{enum.MergeMethodRebase, enum.MergeMethodSquash},
},
expVs: []types.RuleViolations{},
},
{
name: "verify-output",
branch: Branch{
Bypass: DefBypass{},
PullReq: DefPullReq{
StatusChecks: DefStatusChecks{},
Comments: DefComments{
RequireResolveAll: true,
},
Approvals: DefApprovals{
RequireCodeOwners: true,
RequireMinimumCount: 2,
RequireNoChangeRequest: true,
},
Merge: DefMerge{
DeleteBranch: true,
StrategiesAllowed: []enum.MergeMethod{enum.MergeMethodSquash},
},
},
},
in: MergeVerifyInput{
Actor: user,
ResolveUserGroupIDs: mockUserGroupResolver,
CodeOwners: &codeowners.Evaluation{},
PullReq: &types.PullReq{},
Reviewers: []*types.PullReqReviewer{},
},
expOut: MergeVerifyOutput{
DeleteSourceBranch: true,
AllowedMethods: []enum.MergeMethod{enum.MergeMethodSquash},
RequiresCodeOwnersApproval: true,
RequiresNoChangeRequests: true,
RequiresCommentResolution: true,
MinimumRequiredApprovalsCount: 2,
},
expVs: []types.RuleViolations{
{
Violations: []types.Violation{
{Code: codePullReqApprovalReqMinCount},
},
},
},
},
{
name: "verify-output-latest",
branch: Branch{
Bypass: DefBypass{},
PullReq: DefPullReq{
StatusChecks: DefStatusChecks{},
Comments: DefComments{},
Approvals: DefApprovals{
RequireCodeOwners: true,
RequireMinimumCount: 2,
RequireNoChangeRequest: true,
RequireLatestCommit: true,
},
Merge: DefMerge{},
},
},
in: MergeVerifyInput{
Actor: user,
CodeOwners: &codeowners.Evaluation{},
PullReq: &types.PullReq{},
Reviewers: []*types.PullReqReviewer{},
},
expOut: MergeVerifyOutput{
AllowedMethods: []enum.MergeMethod{
enum.MergeMethodFastForward,
enum.MergeMethodMerge,
enum.MergeMethodRebase,
enum.MergeMethodSquash,
},
RequiresCodeOwnersApprovalLatest: true,
RequiresNoChangeRequests: true,
MinimumRequiredApprovalsCountLatest: 2,
},
expVs: []types.RuleViolations{
{
Violations: []types.Violation{
{Code: codePullReqApprovalReqMinCountLatest},
},
},
},
},
}
ctx := context.Background()
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if err := test.branch.Sanitize(); err != nil {
t.Errorf("invalid: %s", err.Error())
return
}
out, results, err := test.branch.MergeVerify(ctx, test.in)
if err != nil {
t.Errorf("error: %s", err.Error())
return
}
if want, got := test.expOut, out; !reflect.DeepEqual(want, got) {
t.Errorf("output: want=%+v got=%+v", want, got)
}
if want, got := len(test.expVs), len(results); want != got {
t.Errorf("number of violations mismatch: want=%d got=%d", want, got)
return
}
for i := range results {
if want, got := test.expVs[i].Bypassable, results[i].Bypassable; want != got {
t.Errorf("rule result %d, bypassable mismatch: want=%t got=%t", i, want, got)
return
}
if want, got := test.expVs[i].Bypassed, results[i].Bypassed; want != got {
t.Errorf("rule result %d, bypassed mismatch: want=%t got=%t", i, want, got)
return
}
if want, got := len(test.expVs[i].Violations), len(results[i].Violations); want != got {
t.Errorf("rule result %d, violations count mismatch: want=%d got=%d", i, want, got)
return
}
for j := range results[i].Violations {
if want, got := test.expVs[i].Violations[j].Code, results[i].Violations[j].Code; want != got {
t.Errorf("rule result %d, violation %d, code mismatch: want=%s got=%s", i, j, want, got)
}
}
}
})
}
}
func TestBranch_RequiredChecks(t *testing.T) {
user := &types.Principal{ID: 42}
admin := &types.Principal{ID: 66, Admin: true}
tests := []struct {
name string
branch Branch
in RequiredChecksInput
expOut RequiredChecksOutput
}{
{
name: "empty",
branch: Branch{},
in: RequiredChecksInput{Actor: user},
expOut: RequiredChecksOutput{
RequiredIdentifiers: nil,
BypassableIdentifiers: nil,
},
},
{
name: "admin-no-owner",
branch: Branch{
Bypass: DefBypass{},
PullReq: DefPullReq{
StatusChecks: DefStatusChecks{RequireIdentifiers: []string{"abc"}},
},
},
in: RequiredChecksInput{
Actor: admin,
IsRepoOwner: false,
},
expOut: RequiredChecksOutput{
RequiredIdentifiers: map[string]struct{}{"abc": {}},
BypassableIdentifiers: nil,
},
},
{
name: "user-bypass",
branch: Branch{
Bypass: DefBypass{UserIDs: []int64{user.ID}},
PullReq: DefPullReq{
StatusChecks: DefStatusChecks{RequireIdentifiers: []string{"abc"}},
},
},
in: RequiredChecksInput{
Actor: user,
},
expOut: RequiredChecksOutput{
RequiredIdentifiers: nil,
BypassableIdentifiers: map[string]struct{}{"abc": {}},
},
},
{
name: "user-no-bypass",
branch: Branch{
PullReq: DefPullReq{
StatusChecks: DefStatusChecks{RequireIdentifiers: []string{"abc"}},
},
},
in: RequiredChecksInput{
Actor: user,
},
expOut: RequiredChecksOutput{
RequiredIdentifiers: map[string]struct{}{"abc": {}},
BypassableIdentifiers: nil,
},
},
}
ctx := context.Background()
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if err := test.branch.Sanitize(); err != nil {
t.Errorf("invalid: %s", err.Error())
return
}
out, err := test.branch.RequiredChecks(ctx, test.in)
if err != nil {
t.Errorf("error: %s", err.Error())
return
}
if want, got := test.expOut, out; !reflect.DeepEqual(want, got) {
t.Errorf("output: want=%+v got=%+v", want, got)
}
})
}
}
// nolint:gocognit // it's a unit test
func TestBranch_RefChangeVerify(t *testing.T) {
user := &types.Principal{ID: 42}
admin := &types.Principal{ID: 66, Admin: true}
tests := []struct {
name string
branch Branch
in RefChangeVerifyInput
expVs []types.RuleViolations
}{
{
name: "empty",
branch: Branch{
Bypass: DefBypass{},
Lifecycle: DefBranchLifecycle{},
},
in: RefChangeVerifyInput{
Actor: user,
},
expVs: []types.RuleViolations{},
},
{
name: "admin-no-owner",
branch: Branch{
Bypass: DefBypass{},
Lifecycle: DefBranchLifecycle{
DefLifecycle: DefLifecycle{
DeleteForbidden: true},
},
},
in: RefChangeVerifyInput{
Actor: admin,
IsRepoOwner: false,
AllowBypass: true,
RefAction: RefActionDelete,
RefType: RefTypeBranch,
RefNames: []string{"abc"},
},
expVs: []types.RuleViolations{
{
Bypassable: false,
Bypassed: false,
Violations: []types.Violation{
{Code: codeLifecycleDelete},
},
},
},
},
{
name: "owner-bypass",
branch: Branch{
Bypass: DefBypass{RepoOwners: true},
Lifecycle: DefBranchLifecycle{
DefLifecycle: DefLifecycle{
DeleteForbidden: true},
},
},
in: RefChangeVerifyInput{
Actor: user,
AllowBypass: true,
IsRepoOwner: true,
RefAction: RefActionDelete,
RefType: RefTypeBranch,
RefNames: []string{"abc"},
},
expVs: []types.RuleViolations{
{
Bypassable: true,
Bypassed: true,
Violations: []types.Violation{
{Code: codeLifecycleDelete},
},
},
},
},
{
name: "user-no-bypass",
branch: Branch{
Bypass: DefBypass{RepoOwners: true},
Lifecycle: DefBranchLifecycle{
DefLifecycle: DefLifecycle{
DeleteForbidden: true},
},
},
in: RefChangeVerifyInput{
Actor: user,
AllowBypass: true,
IsRepoOwner: false,
RefAction: RefActionDelete,
RefType: RefTypeBranch,
RefNames: []string{"abc"},
},
expVs: []types.RuleViolations{
{
Bypassable: false,
Bypassed: false,
Violations: []types.Violation{
{Code: codeLifecycleDelete},
},
},
},
},
{
name: "usergroup-bypass",
branch: Branch{
Bypass: DefBypass{RepoOwners: true},
Lifecycle: DefBranchLifecycle{
DefLifecycle: DefLifecycle{
DeleteForbidden: true},
},
},
in: RefChangeVerifyInput{
Actor: &types.Principal{ID: 43},
ResolveUserGroupID: mockUserGroupResolver,
AllowBypass: true,
IsRepoOwner: false,
RefAction: RefActionDelete,
RefType: RefTypeBranch,
RefNames: []string{"abc"},
},
expVs: []types.RuleViolations{
{
Bypassable: true,
Bypassed: true,
Violations: []types.Violation{
{Code: codeLifecycleDelete},
},
},
},
},
}
ctx := context.Background()
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if err := test.branch.Sanitize(); err != nil {
t.Errorf("invalid: %s", err.Error())
return
}
results, err := test.branch.RefChangeVerify(ctx, test.in)
if err != nil {
t.Errorf("error: %s", err.Error())
return
}
if want, got := len(test.expVs), len(results); want != got {
t.Errorf("number of violations mismatch: want=%d got=%d", want, got)
return
}
for i := range results {
if want, got := test.expVs[i].Bypassable, results[i].Bypassable; want != got {
t.Errorf("rule result %d, bypassable mismatch: want=%t got=%t", i, want, got)
return
}
if want, got := test.expVs[i].Bypassed, results[i].Bypassed; want != got {
t.Errorf("rule result %d, bypassed mismatch: want=%t got=%t", i, want, got)
return
}
if want, got := len(test.expVs[i].Violations), len(results[i].Violations); want != got {
t.Errorf("rule result %d, violations count mismatch: want=%d got=%d", i, want, got)
return
}
for j := range results[i].Violations {
if want, got := test.expVs[i].Violations[j].Code, results[i].Violations[j].Code; want != got {
t.Errorf("rule result %d, violation %d, code mismatch: want=%s got=%s", i, j, want, got)
}
}
}
})
}
}
func mockUserGroupResolver(_ context.Context, _ []int64) ([]int64, error) {
return []int64{43}, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/verify_lifecycle_test.go | app/services/protection/verify_lifecycle_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"reflect"
"testing"
"github.com/harness/gitness/types"
)
// nolint:gocognit // it's a unit test
func TestDefLifecycle_RefChangeVerify(t *testing.T) {
const refName = "a"
tests := []struct {
name string
def DefBranchLifecycle
action RefAction
expCodes []string
expParams [][]any
}{
{
name: "empty",
},
{
name: "lifecycle.create-fail",
def: DefBranchLifecycle{
DefLifecycle: DefLifecycle{CreateForbidden: true},
},
action: RefActionCreate,
expCodes: []string{"lifecycle.create"},
expParams: [][]any{{refName}},
},
{
name: "lifecycle.delete-fail",
def: DefBranchLifecycle{
DefLifecycle: DefLifecycle{DeleteForbidden: true},
},
action: RefActionDelete,
expCodes: []string{"lifecycle.delete"},
expParams: [][]any{{refName}},
},
{
name: "lifecycle.update-fail",
def: DefBranchLifecycle{UpdateForbidden: true},
action: RefActionUpdate,
expCodes: []string{"lifecycle.update"},
expParams: [][]any{{refName}},
},
{
name: "lifecycle.update.force-fail",
def: DefBranchLifecycle{
DefLifecycle: DefLifecycle{UpdateForceForbidden: true},
},
action: RefActionUpdateForce,
expCodes: []string{"lifecycle.update.force"},
expParams: [][]any{{refName}},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
in := RefChangeVerifyInput{
RefNames: []string{refName},
RefAction: test.action,
RefType: RefTypeBranch,
}
if err := test.def.Sanitize(); err != nil {
t.Errorf("def invalid: %s", err.Error())
return
}
violations, err := test.def.RefChangeVerify(context.Background(), in)
if err != nil {
t.Errorf("got an error: %s", err.Error())
return
}
inspectBranchViolations(t, test.expCodes, test.expParams, violations)
})
}
}
func inspectBranchViolations(t *testing.T,
expCodes []string,
expParams [][]any,
violations []types.RuleViolations,
) {
if len(expCodes) == 0 &&
(len(violations) == 0 || len(violations) == 1 && len(violations[0].Violations) == 0) {
// no violations expected and no violations received
return
}
if len(violations) != 1 {
t.Error("expected size of violation should always be one")
return
}
if want, got := len(expCodes), len(violations[0].Violations); want != got {
t.Errorf("violation count: want=%d got=%d", want, got)
return
}
for i, violation := range violations[0].Violations {
if want, got := expCodes[i], violation.Code; want != got {
t.Errorf("violation %d code mismatch: want=%s got=%s", i, want, got)
}
if want, got := expParams[i], violation.Params; !reflect.DeepEqual(want, got) {
t.Errorf("violation %d params mismatch: want=%v got=%v", i, want, got)
}
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/service.go | app/services/protection/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
type (
Sanitizer interface {
// Sanitize validates if the definition is valid and automatically corrects minor issues.
Sanitize() error
}
Protection interface {
UserIDs() ([]int64, error)
UserGroupIDs() ([]int64, error)
}
RefProtection interface {
RefChangeVerifier
}
BranchProtection interface {
RefProtection
MergeVerifier
CreatePullReqVerifier
Protection
}
TagProtection interface {
RefProtection
Protection
}
PushProtection interface {
PushVerifier
Protection
}
Definition interface {
Sanitizer
Protection
}
// DefinitionGenerator is the function that creates blank rules.
DefinitionGenerator func() Definition
// Manager is used to enforce protection rules.
Manager struct {
defGenMap map[enum.RuleType]DefinitionGenerator
ruleStore store.RuleStore
}
)
var (
ErrUnrecognizedType = errors.New("unrecognized protection type")
ErrAlreadyRegistered = errors.New("protection type already registered")
ErrPatternEmpty = errors.New("name pattern can't be empty")
ErrInvalidGlobstarPattern = errors.New("invalid globstar pattern")
)
func IsCritical(violations []types.RuleViolations) bool {
for i := range violations {
if violations[i].IsCritical() {
return true
}
}
return false
}
func IsBypassed(violations []types.RuleViolations) bool {
for i := range violations {
if violations[i].IsBypassed() {
return true
}
}
return false
}
// NewManager creates new protection Manager.
func NewManager(ruleStore store.RuleStore) *Manager {
return &Manager{
defGenMap: make(map[enum.RuleType]DefinitionGenerator),
ruleStore: ruleStore,
}
}
// Register registers new enum.RuleType.
func (m *Manager) Register(ruleType enum.RuleType, gen DefinitionGenerator) error {
_, ok := m.defGenMap[ruleType]
if ok {
return ErrAlreadyRegistered
}
m.defGenMap[ruleType] = gen
return nil
}
func (m *Manager) FromJSON(
ruleType enum.RuleType, message json.RawMessage, strict bool,
) (Protection, error) {
gen := m.defGenMap[ruleType]
if gen == nil {
return nil, ErrUnrecognizedType
}
decoder := json.NewDecoder(bytes.NewReader(message))
if strict {
decoder.DisallowUnknownFields()
}
r := gen()
if err := decoder.Decode(&r); err != nil {
return nil, err
}
if err := r.Sanitize(); err != nil {
return nil, err
}
return r, nil
}
func (m *Manager) SanitizeJSON(
ruleType enum.RuleType,
message json.RawMessage,
) (json.RawMessage, error) {
r, err := m.FromJSON(ruleType, message, true)
if err != nil {
return nil, fmt.Errorf("failed to get rule protection from JSON: %w", err)
}
rawMsg, err := ToJSON(r)
if err != nil {
return nil, fmt.Errorf("failed to convert rule protection to JSON: %w", err)
}
return rawMsg, nil
}
func (m *Manager) ListRepoRules(
ctx context.Context,
repoID int64,
ruleTypes ...enum.RuleType,
) ([]types.RuleInfoInternal, error) {
ruleInfos, err := m.ruleStore.ListAllRepoRules(ctx, repoID, ruleTypes...)
if err != nil {
return nil, fmt.Errorf("failed to list rules for repository: %w", err)
}
return ruleInfos, nil
}
func (m *Manager) ListRepoBranchRules(
ctx context.Context,
repoID int64,
) (BranchProtection, error) {
ruleInfos, err := m.ListRepoRules(ctx, repoID, TypeBranch)
if err != nil {
return branchRuleSet{}, err
}
return branchRuleSet{
rules: ruleInfos,
manager: m,
}, nil
}
func (m *Manager) ListRepoTagRules(
ctx context.Context,
repoID int64,
) (TagProtection, error) {
ruleInfos, err := m.ListRepoRules(ctx, repoID, TypeTag)
if err != nil {
return tagRuleSet{}, err
}
return tagRuleSet{
rules: ruleInfos,
manager: m,
}, nil
}
func (m *Manager) ListRepoPushRules(
ctx context.Context,
repoID int64,
) (PushProtection, error) {
ruleInfos, err := m.ListRepoRules(ctx, repoID, TypePush)
if err != nil {
return pushRuleSet{}, err
}
return pushRuleSet{
rules: ruleInfos,
manager: m,
}, nil
}
func (m *Manager) FilterCreateBranchProtection(rules []types.RuleInfoInternal) BranchProtection {
var branchRules []types.RuleInfoInternal
for _, rule := range rules {
if rule.Type == TypeBranch {
branchRules = append(branchRules, rule)
}
}
return branchRuleSet{
rules: branchRules,
manager: m,
}
}
func (m *Manager) FilterCreateTagProtection(rules []types.RuleInfoInternal) TagProtection {
var tagRules []types.RuleInfoInternal
for _, rule := range rules {
if rule.Type == TypeTag {
tagRules = append(tagRules, rule)
}
}
return tagRuleSet{
rules: tagRules,
manager: m,
}
}
func (m *Manager) FilterCreatePushProtection(rules []types.RuleInfoInternal) PushProtection {
var pushRules []types.RuleInfoInternal
for _, rule := range rules {
if rule.Type == TypePush {
pushRules = append(pushRules, rule)
}
}
return pushRuleSet{
rules: pushRules,
manager: m,
}
}
func printRuleScope(r types.RuleInfo) string {
switch {
case r.RepoPath != "":
return fmt.Sprintf("repository %q", r.RepoPath)
case r.SpacePath != "":
return fmt.Sprintf("scope %q", r.SpacePath)
default:
return "unknown scope"
}
}
// GenerateErrorMessageForBlockingViolations generates an error message for a given slice of rule violations.
// It simply takes the first blocking rule that has a violation and prints that, with indication if further
// rules were violated.
func GenerateErrorMessageForBlockingViolations(ruleViolations []types.RuleViolations) string {
selectedIDX := -1
blockingRuleViolationCnt := 0
for i := range ruleViolations {
// we don't care about bypassed or non-active rules
if ruleViolations[i].Bypassed || ruleViolations[i].Rule.State != enum.RuleStateActive {
continue
}
blockingRuleViolationCnt++
// We take the first blocking rule violation we find, unless a later one has additional details.
if selectedIDX >= 0 &&
(len(ruleViolations[selectedIDX].Violations) > 0 || len(ruleViolations[i].Violations) == 0) {
continue
}
selectedIDX = i
}
if blockingRuleViolationCnt == 0 {
return "No blocking rule violations found."
}
var msg string
if blockingRuleViolationCnt == 1 {
msg = fmt.Sprintf(
"Operation violates %s protection rule %q in %s",
ruleViolations[selectedIDX].Rule.Type,
ruleViolations[selectedIDX].Rule.Identifier,
printRuleScope(ruleViolations[selectedIDX].Rule),
)
} else {
msg = fmt.Sprintf(
"Operation violates %d protection rules, including %s protection rule %q in %s",
blockingRuleViolationCnt,
ruleViolations[selectedIDX].Rule.Type,
ruleViolations[selectedIDX].Rule.Identifier,
printRuleScope(ruleViolations[selectedIDX].Rule),
)
}
if len(ruleViolations[selectedIDX].Violations) > 0 {
msg += " with violation: " + ruleViolations[selectedIDX].Violations[0].Message
}
return msg
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/service_test.go | app/services/protection/service_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"encoding/json"
"errors"
"testing"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
func TestIsCritical(t *testing.T) {
tests := []struct {
name string
input []types.RuleViolations
exp bool
}{
{
name: "empty",
input: []types.RuleViolations{},
exp: false,
},
{
name: "non-critical",
input: []types.RuleViolations{
{
Rule: types.RuleInfo{State: enum.RuleStateMonitor},
Bypassed: false,
Violations: []types.Violation{{Code: "x"}, {Code: "x"}},
},
{
Rule: types.RuleInfo{State: enum.RuleStateActive},
Bypassed: true,
Violations: []types.Violation{{Code: "x"}, {Code: "x"}},
},
{
Rule: types.RuleInfo{State: enum.RuleStateActive},
Bypassed: false,
Violations: []types.Violation{},
},
},
exp: false,
},
{
name: "critical",
input: []types.RuleViolations{
{
Rule: types.RuleInfo{State: enum.RuleStateActive},
Bypassed: false,
Violations: []types.Violation{{Code: "x"}, {Code: "x"}},
},
},
exp: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if want, got := test.exp, IsCritical(test.input); want != got {
t.Errorf("want=%t got=%t", want, got)
}
})
}
}
func TestManager_SanitizeJSON(t *testing.T) {
tests := []struct {
name string
ruleTypes []enum.RuleType
ruleType enum.RuleType
errReg error
errSan error
}{
{
name: "success branch",
ruleTypes: []enum.RuleType{TypeBranch},
ruleType: TypeBranch,
},
{
name: "success push",
ruleTypes: []enum.RuleType{TypePush},
ruleType: TypePush,
},
{
name: "success tag",
ruleTypes: []enum.RuleType{TypeTag},
ruleType: TypeTag,
},
{
name: "all rule types",
ruleTypes: []enum.RuleType{TypeBranch, TypePush, TypeTag},
ruleType: TypeBranch,
},
{
name: "duplicate",
ruleTypes: []enum.RuleType{TypeBranch, TypeBranch},
ruleType: TypeBranch,
errReg: ErrAlreadyRegistered,
},
{
name: "unregistered",
ruleTypes: []enum.RuleType{},
ruleType: TypeBranch,
errSan: ErrUnrecognizedType,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
m := NewManager(nil)
err := func() error {
for _, ruleType := range test.ruleTypes {
err := m.Register(ruleType, func() Definition { return &Branch{} })
if err != nil {
return err
}
}
return nil
}()
// nolint:errorlint // deliberately comparing errors with ==
if test.errReg != err {
t.Errorf("register type error mismatch: want=%v got=%v", test.errReg, err)
return
}
_, err = m.SanitizeJSON(test.ruleType, json.RawMessage("{}"))
if !errors.Is(err, test.errSan) {
t.Errorf("register type error mismatch: want error containing %v, got %v", test.errSan, err)
}
})
}
}
func TestGenerateErrorMessageForBlockingViolations(t *testing.T) {
type testCase struct {
name string
violations []types.RuleViolations
expected string
}
tests := []testCase{
{
name: "no violations",
violations: nil,
expected: "No blocking rule violations found.",
},
{
name: "no blocking violations",
violations: []types.RuleViolations{
{
Bypassed: true,
},
{
Rule: types.RuleInfo{
State: enum.RuleStateDisabled,
},
},
{
Rule: types.RuleInfo{
State: enum.RuleStateMonitor,
},
},
},
expected: "No blocking rule violations found.",
},
{
name: "single violation without details",
violations: []types.RuleViolations{
{
Rule: types.RuleInfo{
Identifier: "rule1",
State: enum.RuleStateActive,
Type: "branch",
SpacePath: "space/path1",
},
},
},
expected: `Operation violates branch protection rule "rule1" in scope "space/path1"`,
},
{
name: "multiple violations without details",
violations: []types.RuleViolations{
{
Rule: types.RuleInfo{
Identifier: "rule1",
State: enum.RuleStateActive,
Type: "branch",
SpacePath: "space/path1",
},
},
{
Rule: types.RuleInfo{
Identifier: "rule2",
State: enum.RuleStateActive,
Type: "other",
SpacePath: "space/path2",
},
},
},
expected: `Operation violates 2 protection rules, including branch protection rule "rule1" ` +
`in scope "space/path1"`,
}, {
name: "single violation with details",
violations: []types.RuleViolations{
{
Rule: types.RuleInfo{
Identifier: "rule1",
State: enum.RuleStateActive,
Type: "branch",
RepoPath: "repo/path1",
},
Violations: []types.Violation{
{
Message: "violation1.1",
},
{
Message: "violation1.2",
},
},
},
},
expected: `Operation violates branch protection rule "rule1" ` +
`in repository "repo/path1" with violation: violation1.1`,
},
{
name: "multiple violations with details",
violations: []types.RuleViolations{
{
Rule: types.RuleInfo{
Identifier: "rule1",
State: enum.RuleStateActive,
Type: "other",
RepoPath: "repo/path1",
},
},
{
Rule: types.RuleInfo{
Identifier: "rule2",
State: enum.RuleStateActive,
Type: "branch",
RepoPath: "repo/path2",
},
Violations: []types.Violation{
{
Message: "violation2.1",
},
{
Message: "violation2.2",
},
},
},
{
Rule: types.RuleInfo{
Identifier: "rule3",
State: enum.RuleStateActive,
Type: "other",
RepoPath: "repo/path3",
},
Violations: []types.Violation{
{
Message: "violation3.1",
},
},
},
},
expected: `Operation violates 3 protection rules, including branch protection rule "rule2" ` +
`in repository "repo/path2" with violation: violation2.1`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := GenerateErrorMessageForBlockingViolations(tt.violations)
if got != tt.expected {
t.Errorf("Want error message %q, got %q", tt.expected, got)
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/validators.go | app/services/protection/validators.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"slices"
"github.com/harness/gitness/errors"
)
const maxElements = 100
func validateIDSlice(ids []int64) error {
if len(ids) > maxElements {
return errors.InvalidArgument("Too many IDs provided.")
}
for _, id := range ids {
if id <= 0 {
return errors.InvalidArgument("ID must be a positive integer.")
}
}
return nil
}
func validateIdentifierSlice(identifiers []string) error {
if len(identifiers) > maxElements {
return errors.InvalidArgument("Too many Identifiers provided.")
}
if slices.Contains(identifiers, "") {
return errors.InvalidArgument("Identifier mustn't be an empty string.")
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/rule_branch_infos.go | app/services/protection/rule_branch_infos.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"fmt"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
var RuleInfoFilterTypeBranch = func(r *types.RuleInfoInternal) (bool, error) {
return r.Type == TypeBranch, nil
}
var RuleInfoFilterStatusActive = func(r *types.RuleInfoInternal) (bool, error) {
return r.State == enum.RuleStateActive, nil
}
func GetBranchRuleInfos(
repoID int64,
repoIdentifier string,
protection BranchProtection,
defaultBranch string,
branchName string,
filterFns ...func(*types.RuleInfoInternal) (bool, error),
) (ruleInfos []types.RuleInfo, err error) {
v, ok := protection.(branchRuleSet)
if !ok {
return ruleInfos, nil
}
err = v.forEachRuleMatchBranch(
repoID,
repoIdentifier,
defaultBranch,
branchName,
func(r *types.RuleInfoInternal, _ BranchProtection) error {
for _, filterFn := range filterFns {
allow, err := filterFn(r)
if err != nil {
return fmt.Errorf("rule info filter function error: %w", err)
}
if !allow {
return nil
}
}
ruleInfos = append(ruleInfos, r.RuleInfo)
return nil
},
)
if err != nil {
return nil, fmt.Errorf("failed to process each rule in ruleSet: %w", err)
}
return ruleInfos, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/pattern.go | app/services/protection/pattern.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"encoding/json"
"fmt"
"github.com/bmatcuk/doublestar/v4"
)
type Pattern struct {
Default bool `json:"default,omitempty"`
Include []string `json:"include,omitempty"`
Exclude []string `json:"exclude,omitempty"`
}
func (p *Pattern) JSON() json.RawMessage {
message, _ := ToJSON(p)
return message
}
func (p *Pattern) Validate() error {
for _, pattern := range p.Include {
if err := patternValidate(pattern); err != nil {
return err
}
}
for _, pattern := range p.Exclude {
if err := patternValidate(pattern); err != nil {
return err
}
}
return nil
}
func (p *Pattern) Matches(branchName, defaultName string) bool {
// Initially match everything, unless the default is set or the include patterns are defined.
matches := !p.Default && len(p.Include) == 0
// Apply the default branch.
matches = matches || p.Default && branchName == defaultName
// Apply the include patterns.
if !matches {
for _, include := range p.Include {
if matches = patternMatches(include, branchName); matches {
break
}
}
}
// Apply the exclude patterns.
for _, exclude := range p.Exclude {
matches = matches && !patternMatches(exclude, branchName)
}
return matches
}
func patternValidate(pattern string) error {
if pattern == "" {
return ErrPatternEmpty
}
_, err := doublestar.Match(pattern, "test")
if err != nil {
return ErrInvalidGlobstarPattern
}
return nil
}
// patternMatches matches a name against the provided file name pattern. From the doublestar library:
//
// The pattern syntax is:
//
// pattern:
//
// { term }
//
// term:
//
// '*' matches any sequence of non-path-separators
// '**' matches any sequence of characters, including
// path separators.
// '?' matches any single non-path-separator character
// '[' [ '^' ] { character-range } ']'
// character class (must be non-empty)
// '{' { term } [ ',' { term } ... ] '}'
// c matches character c (c != '*', '?', '\\', '[')
// '\\' c matches character c
//
// character-range:
//
// c matches character c (c != '\\', '-', ']')
// '\\' c matches character c
// lo '-' hi matches character c for lo <= c <= hi
func patternMatches(pattern, branchName string) bool {
ok, _ := doublestar.Match(pattern, branchName)
return ok
}
func matchesRef(rawPattern json.RawMessage, defaultRef, ref string) (bool, error) {
pattern := Pattern{}
if err := json.Unmarshal(rawPattern, &pattern); err != nil {
return false, fmt.Errorf("failed to parse ref pattern: %w", err)
}
return pattern.Matches(ref, defaultRef), nil
}
func matchesRefs(rawPattern json.RawMessage, defaultRef string, refs ...string) ([]string, error) {
pattern := Pattern{}
if err := json.Unmarshal(rawPattern, &pattern); err != nil {
return nil, fmt.Errorf("failed to parse ref pattern: %w", err)
}
matched := make([]string, 0, len(refs))
for _, ref := range refs {
if pattern.Matches(ref, defaultRef) {
matched = append(matched, ref)
}
}
return matched, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/verify_pullreq.go | app/services/protection/verify_pullreq.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/harness/gitness/app/services/codeowners"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
type (
MergeVerifier interface {
MergeVerify(ctx context.Context, in MergeVerifyInput) (MergeVerifyOutput, []types.RuleViolations, error)
RequiredChecks(ctx context.Context, in RequiredChecksInput) (RequiredChecksOutput, error)
}
MergeVerifyInput struct {
ResolveUserGroupIDs func(ctx context.Context, userGroupIDs []int64) ([]int64, error)
MapUserGroupIDs func(ctx context.Context, userGroupIDs []int64) (map[int64][]*types.Principal, error)
Actor *types.Principal
AllowBypass bool
IsRepoOwner bool
TargetRepo *types.RepositoryCore
SourceRepo *types.RepositoryCore
PullReq *types.PullReq
Reviewers []*types.PullReqReviewer
Method enum.MergeMethod // the method can be empty for dry run or dry run rules
CheckResults []types.CheckResult
CodeOwners *codeowners.Evaluation
}
MergeVerifyOutput struct {
AllowedMethods []enum.MergeMethod
DeleteSourceBranch bool
MinimumRequiredApprovalsCount int
MinimumRequiredApprovalsCountLatest int
RequiresCodeOwnersApproval bool
RequiresCodeOwnersApprovalLatest bool
RequiresCommentResolution bool
RequiresNoChangeRequests bool
DefaultReviewerApprovals []*types.DefaultReviewerApprovalsResponse
}
RequiredChecksInput struct {
ResolveUserGroupID func(ctx context.Context, userGroupIDs []int64) ([]int64, error)
Actor *types.Principal
IsRepoOwner bool
Repo *types.RepositoryCore
PullReq *types.PullReq
}
RequiredChecksOutput struct {
RequiredIdentifiers map[string]struct{}
BypassableIdentifiers map[string]struct{}
}
CreatePullReqVerifier interface {
CreatePullReqVerify(
ctx context.Context,
in CreatePullReqVerifyInput,
) (CreatePullReqVerifyOutput, []types.RuleViolations, error)
}
CreatePullReqVerifyInput struct {
ResolveUserGroupID func(ctx context.Context, userGroupIDs []int64) ([]int64, error)
Actor *types.Principal
AllowBypass bool
IsRepoOwner bool
DefaultBranch string
TargetBranch string
RepoID int64
RepoIdentifier string
}
CreatePullReqVerifyOutput struct {
RequestCodeOwners bool
DefaultReviewerIDs []int64
DefaultGroupReviewerIDs []int64
}
)
// Ensures that the DefPullReq type implements Sanitizer, MergeVerifier and CreatePullReqVerifier interface.
var (
_ Sanitizer = (*DefPullReq)(nil)
_ MergeVerifier = (*DefPullReq)(nil)
_ CreatePullReqVerifier = (*DefPullReq)(nil)
)
const (
codePullReqApprovalReqMinCount = "pullreq.approvals.require_minimum_count"
codePullReqApprovalReqMinCountLatest = "pullreq.approvals.require_minimum_count:latest_commit"
codePullReqApprovalReqDefaultReviewerMinCount = "pullreq.approvals.require_default_reviewer_minimum_count"
codePullReqApprovalReqDefaultReviewerMinCountLatest = "pullreq.approvals.require_default_reviewer_minimum_count:latest_commit" //nolint:lll
codePullReqApprovalReqLatestCommit = "pullreq.approvals.require_latest_commit"
codePullReqApprovalReqChangeRequested = "pullreq.approvals.require_change_requested"
codePullReqApprovalReqChangeRequestedOldSHA = "pullreq.approvals.require_change_requested_old_SHA"
codePullReqApprovalReqCodeOwnersNoApproval = "pullreq.approvals.require_code_owners:no_approval"
codePullReqApprovalReqCodeOwnersChangeRequested = "pullreq.approvals.require_code_owners:change_requested"
codePullReqApprovalReqCodeOwnersNoLatestApproval = "pullreq.approvals.require_code_owners:no_latest_approval"
codePullReqMergeStrategiesAllowed = "pullreq.merge.strategies_allowed"
codePullReqMergeDeleteBranch = "pullreq.merge.delete_branch"
codePullReqMergeBlock = "pullreq.merge.blocked"
codePullReqCommentsReqResolveAll = "pullreq.comments.require_resolve_all"
codePullReqStatusChecksReqIdentifiers = "pullreq.status_checks.required_identifiers"
)
//nolint:gocognit,gocyclo,cyclop // well aware of this
func (v *DefPullReq) MergeVerify(
ctx context.Context,
in MergeVerifyInput,
) (MergeVerifyOutput, []types.RuleViolations, error) {
var out MergeVerifyOutput
var violations types.RuleViolations
// set static merge verify output that comes from the PR definition
out.DeleteSourceBranch = v.Merge.DeleteBranch
out.RequiresCommentResolution = v.Comments.RequireResolveAll
out.RequiresNoChangeRequests = v.Approvals.RequireNoChangeRequest
// output that depends on approval of latest commit
if v.Approvals.RequireLatestCommit {
out.RequiresCodeOwnersApprovalLatest = v.Approvals.RequireCodeOwners
out.MinimumRequiredApprovalsCountLatest = v.Approvals.RequireMinimumCount
} else {
out.RequiresCodeOwnersApproval = v.Approvals.RequireCodeOwners
out.MinimumRequiredApprovalsCount = v.Approvals.RequireMinimumCount
}
// pullreq.approvals
reviewerMap := make(map[int64]*types.PullReqReviewer)
approvedBy := make(map[int64]struct{})
for _, reviewer := range in.Reviewers {
reviewerMap[reviewer.Reviewer.ID] = reviewer
switch reviewer.ReviewDecision {
case enum.PullReqReviewDecisionApproved:
if v.Approvals.RequireLatestCommit && reviewer.SHA != in.PullReq.SourceSHA {
continue
}
approvedBy[reviewer.Reviewer.ID] = struct{}{}
case enum.PullReqReviewDecisionChangeReq:
if v.Approvals.RequireNoChangeRequest {
if reviewer.SHA == in.PullReq.SourceSHA {
violations.Addf(
codePullReqApprovalReqChangeRequested,
"Reviewer %s requested changes",
reviewer.Reviewer.DisplayName,
)
} else {
violations.Addf(
codePullReqApprovalReqChangeRequestedOldSHA,
"Reviewer %s requested changes for an older commit",
reviewer.Reviewer.DisplayName,
)
}
}
case enum.PullReqReviewDecisionPending,
enum.PullReqReviewDecisionReviewed:
}
}
if len(approvedBy) < v.Approvals.RequireMinimumCount {
if v.Approvals.RequireLatestCommit {
violations.Addf(codePullReqApprovalReqMinCountLatest,
"Insufficient number of approvals of the latest commit. Have %d but need at least %d.",
len(approvedBy), v.Approvals.RequireMinimumCount)
} else {
violations.Addf(codePullReqApprovalReqMinCount,
"Insufficient number of approvals. Have %d but need at least %d.",
len(approvedBy), v.Approvals.RequireMinimumCount)
}
}
defaultReviewerIDs := make([]int64, 0, len(v.Reviewers.DefaultReviewerIDs))
uniqueDefaultReviewerIDs := make(map[int64]struct{})
for _, id := range v.Reviewers.DefaultReviewerIDs {
if id != in.PullReq.Author.ID {
defaultReviewerIDs = append(defaultReviewerIDs, id)
uniqueDefaultReviewerIDs[id] = struct{}{}
}
}
if in.MapUserGroupIDs != nil {
userGroupsMap, err := in.MapUserGroupIDs(ctx, v.Reviewers.DefaultUserGroupReviewerIDs)
if err != nil {
return MergeVerifyOutput{}, []types.RuleViolations{},
fmt.Errorf("failed to map principals to user group ids: %w", err)
}
for _, principals := range userGroupsMap {
for _, principal := range principals {
uniqueDefaultReviewerIDs[principal.ID] = struct{}{}
}
}
}
effectiveDefaultReviewerIDs := maps.Keys(uniqueDefaultReviewerIDs)
var evaluations []*types.ReviewerEvaluation
for id := range uniqueDefaultReviewerIDs {
if reviewer, ok := reviewerMap[id]; ok {
evaluations = append(evaluations, &types.ReviewerEvaluation{
Reviewer: reviewer.Reviewer,
SHA: reviewer.SHA,
Decision: reviewer.ReviewDecision,
})
}
}
// if author is default reviewer and required minimum == number of default reviewers, reduce minimum by one.
effectiveMinimumRequiredDefaultReviewerCount := v.Approvals.RequireMinimumDefaultReviewerCount
if len(effectiveDefaultReviewerIDs) < len(v.Reviewers.DefaultReviewerIDs) &&
len(v.Reviewers.DefaultReviewerIDs) == v.Approvals.RequireMinimumDefaultReviewerCount {
effectiveMinimumRequiredDefaultReviewerCount--
}
//nolint:nestif
if effectiveMinimumRequiredDefaultReviewerCount > 0 {
var defaultReviewerApprovalCount int
for _, id := range effectiveDefaultReviewerIDs {
if _, ok := approvedBy[id]; ok {
defaultReviewerApprovalCount++
}
}
if defaultReviewerApprovalCount < effectiveMinimumRequiredDefaultReviewerCount {
if v.Approvals.RequireLatestCommit {
violations.Addf(codePullReqApprovalReqDefaultReviewerMinCountLatest,
"Insufficient number of default reviewer approvals of the latest commit. Have %d but need at least %d.",
defaultReviewerApprovalCount, effectiveMinimumRequiredDefaultReviewerCount)
} else {
violations.Addf(codePullReqApprovalReqDefaultReviewerMinCount,
"Insufficient number of default reviewer approvals. Have %d but need at least %d.",
defaultReviewerApprovalCount, effectiveMinimumRequiredDefaultReviewerCount)
}
}
out.DefaultReviewerApprovals = []*types.DefaultReviewerApprovalsResponse{{
PrincipalIDs: defaultReviewerIDs,
UserGroupIDs: v.Reviewers.DefaultUserGroupReviewerIDs,
CurrentCount: defaultReviewerApprovalCount,
Evaluations: evaluations,
}}
if v.Approvals.RequireLatestCommit {
out.DefaultReviewerApprovals[0].MinimumRequiredCountLatest = effectiveMinimumRequiredDefaultReviewerCount
} else {
out.DefaultReviewerApprovals[0].MinimumRequiredCount = effectiveMinimumRequiredDefaultReviewerCount
}
}
if v.Approvals.RequireCodeOwners {
for _, entry := range in.CodeOwners.EvaluationEntries {
reviewDecision, approvers := getCodeOwnerApprovalStatus(entry)
if reviewDecision == enum.PullReqReviewDecisionPending {
violations.Addf(codePullReqApprovalReqCodeOwnersNoApproval,
"Code owners approval pending for %q", entry.Pattern)
continue
}
if reviewDecision == enum.PullReqReviewDecisionChangeReq {
violations.Addf(codePullReqApprovalReqCodeOwnersChangeRequested,
"Code owners requested changes for %q", entry.Pattern)
continue
}
// pull req approved. check other settings
if !v.Approvals.RequireLatestCommit {
continue
}
latestSHAApproved := slices.ContainsFunc(approvers, func(ev codeowners.UserEvaluation) bool {
return ev.ReviewSHA == in.PullReq.SourceSHA
})
if !latestSHAApproved {
violations.Addf(codePullReqApprovalReqCodeOwnersNoLatestApproval,
"Code owners approval pending on latest commit for %q", entry.Pattern)
}
}
}
// pullreq.comments
if v.Comments.RequireResolveAll && in.PullReq.UnresolvedCount > 0 {
violations.Addf(codePullReqCommentsReqResolveAll,
"All comments must be resolved. There are %d unresolved comments.",
in.PullReq.UnresolvedCount)
}
// pullreq.status_checks
var violatingStatusCheckIdentifiers []string
for _, requiredIdentifier := range v.StatusChecks.RequireIdentifiers {
var succeeded bool
for i := range in.CheckResults {
if in.CheckResults[i].Identifier == requiredIdentifier {
succeeded = in.CheckResults[i].Status.IsSuccess()
break
}
}
if !succeeded {
violatingStatusCheckIdentifiers = append(violatingStatusCheckIdentifiers, requiredIdentifier)
}
}
if len(violatingStatusCheckIdentifiers) > 0 {
violations.Addf(
codePullReqStatusChecksReqIdentifiers,
"The following status checks are required to be completed successfully: %s",
strings.Join(violatingStatusCheckIdentifiers, ", "),
)
}
// pullreq.merge
out.AllowedMethods = enum.MergeMethods
// Note: Empty allowed strategies list means all are allowed
if len(v.Merge.StrategiesAllowed) > 0 {
// if the Method isn't provided return allowed strategies
out.AllowedMethods = v.Merge.StrategiesAllowed
if in.Method != "" {
// if the Method is provided report violations if any
if !slices.Contains(v.Merge.StrategiesAllowed, in.Method) {
violations.Addf(codePullReqMergeStrategiesAllowed,
"The requested merge strategy %q is not allowed. Allowed strategies are %v.",
in.Method, v.Merge.StrategiesAllowed)
}
}
}
if v.Merge.Block {
violations.Addf(
codePullReqMergeBlock,
"The merge for the branch %s is not allowed.", in.PullReq.TargetBranch)
}
if len(violations.Violations) > 0 {
return out, []types.RuleViolations{violations}, nil
}
return out, nil, nil
}
func (v *DefPullReq) RequiredChecks(
_ context.Context,
_ RequiredChecksInput,
) (RequiredChecksOutput, error) {
m := make(map[string]struct{}, len(v.StatusChecks.RequireIdentifiers))
for _, id := range v.StatusChecks.RequireIdentifiers {
m[id] = struct{}{}
}
return RequiredChecksOutput{
RequiredIdentifiers: m,
}, nil
}
func (v *DefPullReq) CreatePullReqVerify(
context.Context,
CreatePullReqVerifyInput,
) (CreatePullReqVerifyOutput, []types.RuleViolations, error) {
var out CreatePullReqVerifyOutput
out.RequestCodeOwners = v.Reviewers.RequestCodeOwners
out.DefaultReviewerIDs = v.Reviewers.DefaultReviewerIDs
out.DefaultGroupReviewerIDs = v.Reviewers.DefaultUserGroupReviewerIDs
return out, nil, nil
}
type DefApprovals struct {
RequireCodeOwners bool `json:"require_code_owners,omitempty"`
RequireMinimumCount int `json:"require_minimum_count,omitempty"`
RequireLatestCommit bool `json:"require_latest_commit,omitempty"`
RequireNoChangeRequest bool `json:"require_no_change_request,omitempty"`
RequireMinimumDefaultReviewerCount int `json:"require_minimum_default_reviewer_count,omitempty"`
}
func (v *DefApprovals) Sanitize() error {
if v.RequireMinimumCount < 0 {
return errors.InvalidArgument("Require minimum count must be zero or a positive integer.")
}
if v.RequireLatestCommit && !v.RequireCodeOwners &&
v.RequireMinimumCount == 0 && v.RequireMinimumDefaultReviewerCount == 0 {
return errors.InvalidArgument("Require latest commit can only be used with require code owners, " +
"require minimum count or require default reviewer minimum count.")
}
return nil
}
type DefComments struct {
RequireResolveAll bool `json:"require_resolve_all,omitempty"`
}
func (DefComments) Sanitize() error {
return nil
}
type DefStatusChecks struct {
RequireIdentifiers []string `json:"require_identifiers,omitempty"`
}
// TODO [CODE-1363]: remove after identifier migration.
func (c DefStatusChecks) MarshalJSON() ([]byte, error) {
// alias allows us to embed the original object while avoiding an infinite loop of marshaling.
type alias DefStatusChecks
return json.Marshal(&struct {
alias
RequireUIDs []string `json:"require_uids"`
}{
alias: (alias)(c),
RequireUIDs: c.RequireIdentifiers,
})
}
// TODO [CODE-1363]: remove if we don't have any require_uids left in our DB.
func (c *DefStatusChecks) UnmarshalJSON(data []byte) error {
// alias allows us to extract the original object while avoiding an infinite loop of unmarshaling.
type alias DefStatusChecks
res := struct {
alias
RequireUIDs []string `json:"require_uids"`
}{}
err := json.Unmarshal(data, &res)
if err != nil {
return fmt.Errorf("failed to unmarshal to alias type with required uids: %w", err)
}
*c = DefStatusChecks(res.alias)
if len(c.RequireIdentifiers) == 0 {
c.RequireIdentifiers = res.RequireUIDs
}
return nil
}
func (c *DefStatusChecks) Sanitize() error {
if err := validateIdentifierSlice(c.RequireIdentifiers); err != nil {
return fmt.Errorf("required identifiers error: %w", err)
}
return nil
}
type DefMerge struct {
StrategiesAllowed []enum.MergeMethod `json:"strategies_allowed,omitempty"`
DeleteBranch bool `json:"delete_branch,omitempty"`
Block bool `json:"block,omitempty"`
}
func (v *DefMerge) Sanitize() error {
m := make(map[enum.MergeMethod]struct{}, 0)
for _, strategy := range v.StrategiesAllowed {
if _, ok := strategy.Sanitize(); !ok {
return errors.InvalidArgumentf("Unrecognized merge strategy: %q.", strategy)
}
if _, ok := m[strategy]; ok {
return errors.InvalidArgumentf("Duplicate entry in merge strategy list: %q.", strategy)
}
m[strategy] = struct{}{}
}
slices.Sort(v.StrategiesAllowed)
return nil
}
type DefReviewers struct {
RequestCodeOwners bool `json:"request_code_owners,omitempty"`
DefaultReviewerIDs []int64 `json:"default_reviewer_ids,omitempty"`
DefaultUserGroupReviewerIDs []int64 `json:"default_user_group_reviewer_ids,omitempty"`
}
func (v *DefReviewers) Sanitize() error {
if err := validateIDSlice(v.DefaultReviewerIDs); err != nil {
return fmt.Errorf("default reviewer IDs error: %w", err)
}
if err := validateIDSlice(v.DefaultUserGroupReviewerIDs); err != nil {
return fmt.Errorf("default user group reviewer IDs error: %w", err)
}
return nil
}
type DefPullReq struct {
Approvals DefApprovals `json:"approvals"`
Comments DefComments `json:"comments"`
StatusChecks DefStatusChecks `json:"status_checks"`
Merge DefMerge `json:"merge"`
Reviewers DefReviewers `json:"reviewers"`
}
func (v *DefPullReq) Sanitize() error {
if err := v.Approvals.Sanitize(); err != nil {
return fmt.Errorf("approvals: %w", err)
}
if err := v.Comments.Sanitize(); err != nil {
return fmt.Errorf("comments: %w", err)
}
if err := v.StatusChecks.Sanitize(); err != nil {
return fmt.Errorf("status checks: %w", err)
}
if err := v.Merge.Sanitize(); err != nil {
return fmt.Errorf("merge: %w", err)
}
if err := v.Reviewers.Sanitize(); err != nil {
return fmt.Errorf("reviewers: %w", err)
}
return nil
}
func getCodeOwnerApprovalStatus(
entry codeowners.EvaluationEntry,
) (enum.PullReqReviewDecision, []codeowners.UserEvaluation) {
approvers := make([]codeowners.UserEvaluation, 0)
// users
for _, o := range entry.UserEvaluations {
if o.ReviewDecision == enum.PullReqReviewDecisionChangeReq {
return enum.PullReqReviewDecisionChangeReq, nil
}
if o.ReviewDecision == enum.PullReqReviewDecisionApproved {
approvers = append(approvers, o)
}
}
// usergroups
for _, u := range entry.UserGroupEvaluations {
for _, o := range u.Evaluations {
if o.ReviewDecision == enum.PullReqReviewDecisionChangeReq {
return enum.PullReqReviewDecisionChangeReq, nil
}
if o.ReviewDecision == enum.PullReqReviewDecisionApproved {
approvers = append(approvers, o)
}
}
}
if len(approvers) > 0 {
return enum.PullReqReviewDecisionApproved, approvers
}
return enum.PullReqReviewDecisionPending, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/bypass.go | app/services/protection/bypass.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"fmt"
"github.com/harness/gitness/cache"
"github.com/harness/gitness/types"
"golang.org/x/exp/slices"
)
type DefBypass struct {
UserIDs []int64 `json:"user_ids,omitempty"`
UserGroupIDs []int64 `json:"user_group_ids,omitempty"`
RepoOwners bool `json:"repo_owners,omitempty"`
}
func (v DefBypass) matches(
ctx context.Context,
actor *types.Principal,
isRepoOwner bool,
userGroupResolverFn func(context.Context, []int64) ([]int64, error),
) bool {
if userGroupResolverFn != nil {
userIDs, err := userGroupResolverFn(ctx, v.UserGroupIDs)
if err != nil {
return false
}
v.UserIDs = append(v.UserIDs, userIDs...)
v.UserIDs = cache.Deduplicate(v.UserIDs)
}
return actor != nil &&
(v.RepoOwners && isRepoOwner ||
slices.Contains(v.UserIDs, actor.ID))
}
func (v DefBypass) Sanitize() error {
if err := validateIDSlice(v.UserIDs); err != nil {
return fmt.Errorf("user IDs error: %w", err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/bypass_test.go | app/services/protection/bypass_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"testing"
"github.com/harness/gitness/types"
)
func TestBranch_matches(t *testing.T) {
user := &types.Principal{ID: 42}
admin := &types.Principal{ID: 66, Admin: true}
tests := []struct {
name string
bypass DefBypass
actor *types.Principal
owner bool
exp bool
}{
{
name: "empty",
bypass: DefBypass{UserIDs: nil, RepoOwners: false},
actor: user,
exp: false,
},
{
name: "admin-no-owner",
bypass: DefBypass{UserIDs: nil, RepoOwners: true},
actor: admin,
owner: false,
exp: false,
},
{
name: "repo-owners-false",
bypass: DefBypass{UserIDs: nil, RepoOwners: false},
actor: user,
owner: true,
exp: false,
},
{
name: "repo-owners-true",
bypass: DefBypass{UserIDs: nil, RepoOwners: true},
actor: user,
owner: true,
exp: true,
},
{
name: "selected-false",
bypass: DefBypass{UserIDs: []int64{1, 66}, RepoOwners: false},
actor: user,
exp: false,
},
{
name: "selected-true",
bypass: DefBypass{UserIDs: []int64{1, 42, 66}, RepoOwners: false},
actor: user,
exp: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if err := test.bypass.Sanitize(); err != nil {
t.Errorf("invalid: %s", err.Error())
}
if want, got := test.exp, test.bypass.matches(context.TODO(), test.actor, test.owner, nil); want != got {
t.Errorf("want=%t got=%t", want, got)
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/rule_branch.go | app/services/protection/rule_branch.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"fmt"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
const TypeBranch enum.RuleType = "branch"
// Branch implements protection rules for the rule type TypeBranch.
type Branch struct {
Bypass DefBypass `json:"bypass"`
PullReq DefPullReq `json:"pullreq"`
Lifecycle DefBranchLifecycle `json:"lifecycle"`
}
var (
// ensures that the Branch type implements Definition interface.
_ Definition = (*Branch)(nil)
_ BranchProtection = (*Branch)(nil)
)
func (v *Branch) MergeVerify(
ctx context.Context,
in MergeVerifyInput,
) (out MergeVerifyOutput, violations []types.RuleViolations, err error) {
out, violations, err = v.PullReq.MergeVerify(ctx, in)
if err != nil {
return out, violations, fmt.Errorf("merge verify error: %w", err)
}
bypassable := v.Bypass.matches(ctx, in.Actor, in.IsRepoOwner, in.ResolveUserGroupIDs)
bypassed := in.AllowBypass && bypassable
for i := range violations {
violations[i].Bypassable = bypassable
violations[i].Bypassed = bypassed
}
return
}
func (v *Branch) RequiredChecks(
ctx context.Context,
in RequiredChecksInput,
) (RequiredChecksOutput, error) {
out, err := v.PullReq.RequiredChecks(ctx, in)
if err != nil {
return RequiredChecksOutput{}, err
}
ids := out.RequiredIdentifiers
if len(ids) == 0 {
return RequiredChecksOutput{}, nil
}
var (
requiredIDs map[string]struct{}
bypassableIDs map[string]struct{}
)
if bypassable := v.Bypass.matches(ctx, in.Actor, in.IsRepoOwner, in.ResolveUserGroupID); bypassable {
bypassableIDs = ids
} else {
requiredIDs = ids
}
return RequiredChecksOutput{
RequiredIdentifiers: requiredIDs,
BypassableIdentifiers: bypassableIDs,
}, nil
}
func (v *Branch) CreatePullReqVerify(
ctx context.Context,
in CreatePullReqVerifyInput,
) (CreatePullReqVerifyOutput, []types.RuleViolations, error) {
var out CreatePullReqVerifyOutput
out, violations, err := v.PullReq.CreatePullReqVerify(ctx, in)
if err != nil {
return CreatePullReqVerifyOutput{}, nil, err
}
bypassable := v.Bypass.matches(ctx, in.Actor, in.IsRepoOwner, in.ResolveUserGroupID)
bypassed := in.AllowBypass && bypassable
for i := range violations {
violations[i].Bypassable = bypassable
violations[i].Bypassed = bypassed
}
return out, violations, nil
}
func (v *Branch) RefChangeVerify(
ctx context.Context,
in RefChangeVerifyInput,
) (violations []types.RuleViolations, err error) {
if in.RefType != RefTypeBranch || len(in.RefNames) == 0 {
return []types.RuleViolations{}, nil
}
violations, err = v.Lifecycle.RefChangeVerify(ctx, in)
if err != nil {
return nil, fmt.Errorf("lifecycle error: %w", err)
}
bypassable := v.Bypass.matches(ctx, in.Actor, in.IsRepoOwner, in.ResolveUserGroupID)
bypassed := in.AllowBypass && bypassable
for i := range violations {
violations[i].Bypassable = bypassable
violations[i].Bypassed = bypassed
}
return
}
func (v *Branch) UserIDs() ([]int64, error) {
uniqueUserMap := make(map[int64]struct{}, len(v.Bypass.UserIDs)+len(v.PullReq.Reviewers.DefaultReviewerIDs))
for _, id := range v.Bypass.UserIDs {
uniqueUserMap[id] = struct{}{}
}
for _, id := range v.PullReq.Reviewers.DefaultReviewerIDs {
uniqueUserMap[id] = struct{}{}
}
ids := make([]int64, 0, len(uniqueUserMap))
for id := range uniqueUserMap {
ids = append(ids, id)
}
return ids, nil
}
func (v *Branch) UserGroupIDs() ([]int64, error) {
uniqueGroupsMap := make(
map[int64]struct{},
len(v.Bypass.UserGroupIDs)+len(v.PullReq.Reviewers.DefaultUserGroupReviewerIDs),
)
for _, id := range v.Bypass.UserGroupIDs {
uniqueGroupsMap[id] = struct{}{}
}
for _, id := range v.PullReq.Reviewers.DefaultUserGroupReviewerIDs {
uniqueGroupsMap[id] = struct{}{}
}
ids := make([]int64, 0, len(uniqueGroupsMap))
for id := range uniqueGroupsMap {
ids = append(ids, id)
}
return ids, nil
}
func (v *Branch) Sanitize() error {
if err := v.Bypass.Sanitize(); err != nil {
return fmt.Errorf("bypass: %w", err)
}
if err := v.PullReq.Sanitize(); err != nil {
return fmt.Errorf("pull request: %w", err)
}
if err := v.Lifecycle.Sanitize(); err != nil {
return fmt.Errorf("lifecycle: %w", err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/set_push.go | app/services/protection/set_push.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"fmt"
"github.com/harness/gitness/types"
)
type pushRuleSet struct {
rules []types.RuleInfoInternal
manager *Manager
}
var _ PushProtection = pushRuleSet{}
func (s pushRuleSet) PushVerify(
ctx context.Context,
in PushVerifyInput,
) (PushVerifyOutput, []types.RuleViolations, error) {
var violations []types.RuleViolations
var out PushVerifyOutput
out.Protections = make(map[int64]PushProtection, len(s.rules))
for _, r := range s.rules {
matches, err := matchesRepo(r.RepoTarget, in.RepoID, in.RepoIdentifier)
if err != nil {
return out, violations, fmt.Errorf(
"error matching repo for protection definition ID=%d to repo identifier=%s: %w",
r.ID, in.RepoIdentifier, err,
)
}
if !matches {
continue
}
protection, err := s.manager.FromJSON(r.Type, r.Definition, false)
if err != nil {
return out, nil, fmt.Errorf(
"failed to parse protection definition ID=%d Type=%s: %w",
r.ID, r.Type, err,
)
}
pushProtection, ok := protection.(PushProtection)
if !ok {
return out, nil, fmt.Errorf(
"unexpected type for protection: got %T, expected PushProtection",
protection,
)
}
out.Protections[r.ID] = pushProtection
rOut, rViolations, err := pushProtection.PushVerify(ctx, in)
if err != nil {
return out, nil, fmt.Errorf("failed to process push rule in push rule set: %w", err)
}
violations = append(violations, rViolations...)
if out.FileSizeLimit == 0 ||
(rOut.FileSizeLimit > 0 && out.FileSizeLimit > rOut.FileSizeLimit) {
out.FileSizeLimit = rOut.FileSizeLimit
}
out.PrincipalCommitterMatch = out.PrincipalCommitterMatch || rOut.PrincipalCommitterMatch
out.SecretScanningEnabled = out.SecretScanningEnabled || rOut.SecretScanningEnabled
}
return out, violations, nil
}
func (s pushRuleSet) Violations(ctx context.Context, in *PushViolationsInput) (PushViolationsOutput, error) {
output := PushViolationsOutput{}
for _, r := range s.rules {
out, err := in.Protections[r.ID].Violations(ctx, in)
if err != nil {
return PushViolationsOutput{}, fmt.Errorf(
"failed to backfill violations: %w", err,
)
}
output.Violations = append(output.Violations, backFillRule(out.Violations, r.RuleInfo)...)
}
return output, nil
}
func (s pushRuleSet) UserIDs() ([]int64, error) {
return collectIDs(s.manager, s.rules, Protection.UserIDs)
}
func (s pushRuleSet) UserGroupIDs() ([]int64, error) {
return collectIDs(s.manager, s.rules, Protection.UserGroupIDs)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/pattern_test.go | app/services/protection/pattern_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"errors"
"testing"
)
func TestPattern_Matches(t *testing.T) {
const defBranch = "default"
tests := []struct {
name string
pattern Pattern
input string
want bool
}{
{
name: "empty-matches-all",
pattern: Pattern{Default: false, Include: nil, Exclude: nil},
input: "blah",
want: true,
},
{
name: "default-matches-default",
pattern: Pattern{Default: true, Include: nil, Exclude: nil},
input: defBranch,
want: true,
},
{
name: "default-mismatches-non-default",
pattern: Pattern{Default: true, Include: nil, Exclude: nil},
input: "non-" + defBranch,
want: false,
},
{
name: "include-matches",
pattern: Pattern{Default: false, Include: []string{"test*", "dev*"}, Exclude: nil},
input: "test123",
want: true,
},
{
name: "include-mismatches",
pattern: Pattern{Default: false, Include: []string{"test*", "dev*"}, Exclude: nil},
input: "marko42",
want: false,
},
{
name: "exclude-matches",
pattern: Pattern{Default: false, Include: nil, Exclude: []string{"dev*", "pr*"}},
input: "blah",
want: true,
},
{
name: "exclude-mismatches",
pattern: Pattern{Default: false, Include: nil, Exclude: []string{"dev*", "pr*"}},
input: "pr_69",
want: false,
},
{
name: "complex:not-excluded",
pattern: Pattern{
Include: []string{"test/**/*"},
Exclude: []string{"test/release/*"}},
input: "test/dev/1",
want: true,
},
{
name: "complex:excluded",
pattern: Pattern{
Include: []string{"test/**/*"},
Exclude: []string{"test/release/*"}},
input: "test/release/1",
want: false,
},
{
name: "complex:default-excluded",
pattern: Pattern{
Default: true,
Exclude: []string{defBranch}},
input: defBranch,
want: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
got := test.pattern.Matches(test.input, defBranch)
if test.want != got {
t.Errorf("want=%t got=%t", test.want, got)
}
})
}
}
func TestPattern_Validate(t *testing.T) {
tests := []struct {
name string
pattern Pattern
expect error
}{
{
name: "empty",
pattern: Pattern{Default: false, Include: nil, Exclude: nil},
expect: nil,
},
{
name: "default",
pattern: Pattern{Default: true, Include: nil, Exclude: nil},
expect: nil,
},
{
name: "empty-include-globstar",
pattern: Pattern{Default: false, Include: []string{""}, Exclude: nil},
expect: ErrPatternEmpty,
},
{
name: "empty-exclude-globstar",
pattern: Pattern{Default: false, Include: nil, Exclude: []string{""}},
expect: ErrPatternEmpty,
},
{
name: "bad-include-pattern",
pattern: Pattern{Default: false, Include: []string{"["}, Exclude: nil},
expect: ErrInvalidGlobstarPattern,
},
{
name: "bad-exclude-pattern",
pattern: Pattern{Default: false, Include: nil, Exclude: []string{"good", "\\"}},
expect: ErrInvalidGlobstarPattern,
},
{
name: "good-pattern",
pattern: Pattern{Default: true, Include: []string{"test*", "test/**"}, Exclude: []string{"release*"}},
expect: nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
err := test.pattern.Validate()
if test.expect == nil && err == nil {
return
}
if !errors.Is(err, test.expect) {
t.Errorf("want=%v got=%v", test.expect, err)
}
})
}
}
func TestPattern_patternMatches(t *testing.T) {
tests := []struct {
pattern string
positive []string
negative []string
}{
{
pattern: "abc",
positive: []string{"abc"},
negative: []string{"abcd", "/abc"},
},
{
pattern: "*abc",
positive: []string{"abc", "test-abc"},
negative: []string{"marko/abc", "abc-test"},
},
{
pattern: "abc*",
positive: []string{"abc", "abc-test"},
negative: []string{"abc/marko", "test-abc"},
},
{
pattern: "**/abc",
positive: []string{"abc", "test/abc", "some/other/test/abc"},
negative: []string{"test/x-abc", "test/abc-x"},
},
{
pattern: "abc/**",
positive: []string{"abc", "abc/test", "abc/some/other/test"},
negative: []string{"test/abc", "x-abc/test"},
},
{
pattern: "abc[d-e]f",
positive: []string{"abcdf", "abcef"},
negative: []string{"abcf", "abcdef"},
},
}
for _, test := range tests {
t.Run(test.pattern, func(t *testing.T) {
for _, v := range test.positive {
if ok := patternMatches(test.pattern, v); !ok {
t.Errorf("pattern=%s positive=%s, got=%t", test.pattern, v, ok)
}
}
for _, v := range test.negative {
if ok := patternMatches(test.pattern, v); ok {
t.Errorf("pattern=%s negative=%s, got=%t", test.pattern, v, ok)
}
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/verify_push.go | app/services/protection/verify_push.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"github.com/harness/gitness/git"
"github.com/harness/gitness/types"
)
const (
codePushFileSizeLimit = "push.file.size.limit"
codePushPrincipalCommitterMatch = "push.principal.committer.match"
codeSecretScanningEnabled = "push.secret.scanning.enabled"
)
type (
PushVerifyInput struct {
ResolveUserGroupID func(ctx context.Context, userGroupIDs []int64) ([]int64, error)
Actor *types.Principal
IsRepoOwner bool
RepoID int64
RepoIdentifier string
}
PushViolationsInput struct {
ResolveUserGroupID func(ctx context.Context, userGroupIDs []int64) ([]int64, error)
Actor *types.Principal
IsRepoOwner bool
Protections map[int64]PushProtection
FileSizeLimit int64
FindOversizeFilesOutput *git.FindOversizeFilesOutput
PrincipalCommitterMatch bool
CommitterMismatchCount int64
SecretScanningEnabled bool
FoundSecretCount int
}
PushViolationsOutput struct {
Violations []types.RuleViolations
}
PushVerifyOutput struct {
FileSizeLimit int64
PrincipalCommitterMatch bool
SecretScanningEnabled bool
Protections map[int64]PushProtection
}
PushVerifier interface {
PushVerify(
ctx context.Context,
in PushVerifyInput,
) (PushVerifyOutput, []types.RuleViolations, error)
Violations(context.Context, *PushViolationsInput) (PushViolationsOutput, error)
}
DefPush struct {
FileSizeLimit int64 `json:"file_size_limit"`
PrincipalCommitterMatch bool `json:"principal_committer_match"`
SecretScanningEnabled bool `json:"secret_scanning_enabled"`
}
)
func (in *PushViolationsInput) HasViolations() bool {
return in.FindOversizeFilesOutput != nil && (in.FindOversizeFilesOutput.Total > 0) ||
in.CommitterMismatchCount > 0 ||
in.FoundSecretCount > 0
}
func (v *DefPush) PushVerify(
_ context.Context,
_ PushVerifyInput,
) (PushVerifyOutput, []types.RuleViolations, error) {
return PushVerifyOutput{
FileSizeLimit: v.FileSizeLimit,
PrincipalCommitterMatch: v.PrincipalCommitterMatch,
SecretScanningEnabled: v.SecretScanningEnabled,
}, nil, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/rule_tag.go | app/services/protection/rule_tag.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"fmt"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
const TypeTag enum.RuleType = "tag"
// Tag implements protection rules for the rule type TypeTag.
type Tag struct {
Bypass DefBypass `json:"bypass"`
Lifecycle DefTagLifecycle `json:"lifecycle"`
}
var (
// ensures that the Branch type implements Definition interface.
_ Definition = (*Tag)(nil)
_ TagProtection = (*Tag)(nil)
)
func (t *Tag) RefChangeVerify(
ctx context.Context,
in RefChangeVerifyInput,
) ([]types.RuleViolations, error) {
if in.RefType != RefTypeTag || len(in.RefNames) == 0 {
return []types.RuleViolations{}, nil
}
violations, err := t.Lifecycle.RefChangeVerify(ctx, in)
if err != nil {
return nil, fmt.Errorf("lifecycle error: %w", err)
}
bypassable := t.Bypass.matches(ctx, in.Actor, in.IsRepoOwner, in.ResolveUserGroupID)
bypassed := in.AllowBypass && bypassable
for i := range violations {
violations[i].Bypassable = bypassable
violations[i].Bypassed = bypassed
}
return violations, nil
}
func (t *Tag) UserIDs() ([]int64, error) {
return t.Bypass.UserIDs, nil
}
func (t *Tag) UserGroupIDs() ([]int64, error) {
return t.Bypass.UserGroupIDs, nil
}
func (t *Tag) Sanitize() error {
if err := t.Bypass.Sanitize(); err != nil {
return fmt.Errorf("bypass: %w", err)
}
if err := t.Lifecycle.Sanitize(); err != nil {
return fmt.Errorf("lifecycle: %w", err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/set_tag_test.go | app/services/protection/set_tag_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"testing"
"github.com/harness/gitness/types"
)
func TestTagRuleSet_SetRefChangeVerify(t *testing.T) {
dummyRepo := &types.RepositoryCore{ID: 1, Identifier: "dummy"}
tests := []struct {
name string
rules []types.RuleInfoInternal
input RefChangeVerifyInput
expViol []types.RuleViolations
}{
{
name: "empty-with-action-create",
rules: []types.RuleInfoInternal{},
input: RefChangeVerifyInput{
Actor: &types.Principal{ID: 1},
RefAction: RefActionCreate,
RefType: RefTypeTag,
RefNames: []string{"feat-a"},
Repo: dummyRepo,
},
expViol: []types.RuleViolations{},
},
{
name: "empty-with-action-delete",
rules: []types.RuleInfoInternal{},
input: RefChangeVerifyInput{
Actor: &types.Principal{ID: 1},
RefAction: RefActionDelete,
RefType: RefTypeTag,
RefNames: []string{"feat-a"},
Repo: dummyRepo,
},
expViol: []types.RuleViolations{},
},
{
name: "create-forbidden-with-pattern-and-matching-ref",
rules: []types.RuleInfoInternal{
{
RuleInfo: types.RuleInfo{Type: TypeTag},
Definition: []byte(`{"lifecycle": {"create_forbidden": true}}`),
Pattern: []byte(`{"include": ["feat-*"]}`),
RepoTarget: emptyRepoTarget,
},
},
input: RefChangeVerifyInput{
Actor: &types.Principal{ID: 1},
RefAction: RefActionCreate,
RefType: RefTypeTag,
RefNames: []string{"feat-a"},
Repo: dummyRepo,
},
expViol: []types.RuleViolations{
{
Rule: types.RuleInfo{Type: "tag"},
Violations: []types.Violation{
{Code: codeLifecycleCreate},
},
},
},
},
{
name: "create-forbidden-with-pattern-and-mismatching-ref",
rules: []types.RuleInfoInternal{
{
RuleInfo: types.RuleInfo{Type: TypeTag},
Definition: []byte(`{"lifecycle": {"create_forbidden": true}}`),
Pattern: []byte(`{"include": ["feat-*"]}`),
RepoTarget: emptyRepoTarget,
},
},
input: RefChangeVerifyInput{
Actor: &types.Principal{ID: 1},
RefAction: RefActionCreate,
RefType: RefTypeTag,
RefNames: []string{"dev-a"},
Repo: dummyRepo,
},
expViol: []types.RuleViolations{},
},
{
name: "delete-forbidden-with-pattern-and-matching-ref",
rules: []types.RuleInfoInternal{
{
RuleInfo: types.RuleInfo{Type: TypeTag},
Definition: []byte(`{"lifecycle": {"delete_forbidden": true}}`),
Pattern: []byte(`{"include": ["feat-*"]}`),
RepoTarget: emptyRepoTarget,
},
},
input: RefChangeVerifyInput{
Actor: &types.Principal{ID: 1},
RefAction: RefActionDelete,
RefType: RefTypeTag,
RefNames: []string{"feat-a"},
Repo: dummyRepo,
},
expViol: []types.RuleViolations{
{
Rule: types.RuleInfo{Type: "tag"},
Violations: []types.Violation{
{Code: codeLifecycleDelete},
},
},
},
},
{
name: "delete-forbidden-with-pattern-and-mismatching-ref",
rules: []types.RuleInfoInternal{
{
RuleInfo: types.RuleInfo{
Type: TypeTag,
},
Definition: []byte(`{"lifecycle": {"delete_forbidden": true}}`),
Pattern: []byte(`{"include": ["feat-*"]}`),
RepoTarget: emptyRepoTarget,
},
},
input: RefChangeVerifyInput{
Actor: &types.Principal{ID: 1},
RefAction: RefActionDelete,
RefType: RefTypeTag,
RefNames: []string{"dev-a"},
Repo: dummyRepo,
},
expViol: []types.RuleViolations{},
},
{
name: "create-forbidden-with-two-rules-and-pattern-and-matching-ref",
rules: []types.RuleInfoInternal{
{
RuleInfo: types.RuleInfo{Type: TypeTag},
Definition: []byte(`{"lifecycle": {"create_forbidden": true}}`),
Pattern: []byte(`{"include": ["feat-*"]}`),
RepoTarget: emptyRepoTarget,
},
{
RuleInfo: types.RuleInfo{Type: TypeTag},
Definition: []byte(`{"lifecycle": {"create_forbidden": true}}`),
Pattern: []byte(`{"include": ["*-experimental"]}`),
RepoTarget: emptyRepoTarget,
},
},
input: RefChangeVerifyInput{
Actor: &types.Principal{ID: 1},
RefAction: RefActionCreate,
RefType: RefTypeTag,
RefNames: []string{"feat-experimental"},
Repo: dummyRepo,
},
expViol: []types.RuleViolations{
{
Rule: types.RuleInfo{Type: "tag"},
Violations: []types.Violation{
{Code: codeLifecycleCreate},
},
},
{
Rule: types.RuleInfo{Type: "tag"},
Violations: []types.Violation{
{Code: codeLifecycleCreate},
},
},
},
},
{
name: "delete-forbidden-with-two-rules-and-pattern-and-matching-ref",
rules: []types.RuleInfoInternal{
{
RuleInfo: types.RuleInfo{Type: TypeTag},
Definition: []byte(`{"lifecycle": {"create_forbidden": true}}`),
Pattern: []byte(`{"include": ["feat-*"]}`),
RepoTarget: emptyRepoTarget,
},
{
RuleInfo: types.RuleInfo{Type: TypeTag},
Definition: []byte(`{"lifecycle": {"delete_forbidden": true}}`),
Pattern: []byte(`{"include": ["*-experimental"]}`),
RepoTarget: emptyRepoTarget,
},
},
input: RefChangeVerifyInput{
Actor: &types.Principal{ID: 1},
RefAction: RefActionDelete,
RefType: RefTypeTag,
RefNames: []string{"feat-experimental"},
Repo: dummyRepo,
},
expViol: []types.RuleViolations{
{
Rule: types.RuleInfo{Type: "tag"},
Violations: []types.Violation{
{Code: codeLifecycleDelete},
},
},
},
},
}
ctx := context.Background()
m := NewManager(nil)
_ = m.Register(TypeTag, func() Definition {
return &Tag{}
})
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
set := tagRuleSet{
rules: test.rules,
manager: m,
}
violations, err := set.RefChangeVerify(ctx, test.input)
if err != nil {
t.Errorf("got error: %s", err.Error())
}
if want, got := len(test.expViol), len(violations); want != got {
t.Errorf("violations count: want=%d got=%d", want, got)
return
}
for i := range test.expViol {
if want, got := test.expViol[i].Rule, violations[i].Rule; want != got {
t.Errorf("violation %d rule: want=%+v got=%+v", i, want, got)
}
if want, got := test.expViol[i].Bypassed, violations[i].Bypassed; want != got {
t.Errorf("violation %d bypassed: want=%t got=%t", i, want, got)
}
if want, got := len(test.expViol[i].Violations), len(violations[i].Violations); want != got {
t.Errorf("violation %d violations count: want=%d got=%d", i, want, got)
continue
}
for j := range test.expViol[i].Violations {
if want, got := test.expViol[i].Violations[j].Code, violations[i].Violations[j].Code; want != got {
t.Errorf("violation %d violation %d code: want=%s got=%s", i, j, want, got)
}
}
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/verify_pullreq_test.go | app/services/protection/verify_pullreq_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"reflect"
"testing"
"github.com/harness/gitness/app/services/codeowners"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"golang.org/x/exp/slices"
)
var (
reviewer1 = types.PrincipalInfo{ID: 1, DisplayName: "Reviewer 1", UID: "reviewer-1"}
reviewer2 = types.PrincipalInfo{ID: 2, DisplayName: "Reviewer 2", UID: "reviewer-2"}
reviewer3 = types.PrincipalInfo{ID: 3, DisplayName: "Reviewer 3", UID: "reviewer-3"}
)
// nolint:gocognit // it's a unit test
func TestDefPullReq_MergeVerify(t *testing.T) {
tests := []struct {
name string
def DefPullReq
in MergeVerifyInput
expCodes []string
expParams [][]any
expOut MergeVerifyOutput
}{
{
name: "empty-with-merge-method",
in: MergeVerifyInput{
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
DeleteSourceBranch: false,
AllowedMethods: enum.MergeMethods,
},
},
{
name: "empty-no-merge-method-specified",
in: MergeVerifyInput{},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
DeleteSourceBranch: false,
},
},
{
name: codePullReqApprovalReqMinCount + "-fail",
def: DefPullReq{Approvals: DefApprovals{RequireMinimumCount: 1}},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc"},
Reviewers: []*types.PullReqReviewer{
{ReviewDecision: enum.PullReqReviewDecisionChangeReq, SHA: "abc", Reviewer: reviewer1},
},
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqApprovalReqMinCount},
expParams: [][]any{{0, 1}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
MinimumRequiredApprovalsCount: 1,
},
},
{
name: codePullReqApprovalReqMinCount + "-success",
def: DefPullReq{Approvals: DefApprovals{RequireMinimumCount: 2}},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc"},
Reviewers: []*types.PullReqReviewer{
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abc", Reviewer: reviewer1},
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abc", Reviewer: reviewer2},
},
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
MinimumRequiredApprovalsCount: 2,
},
},
{
name: codePullReqApprovalReqLatestCommit + "-fail",
def: DefPullReq{Approvals: DefApprovals{RequireMinimumCount: 2, RequireLatestCommit: true}},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc"},
Reviewers: []*types.PullReqReviewer{
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abc"},
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abd"},
},
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqApprovalReqMinCountLatest},
expParams: [][]any{{1, 2}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
MinimumRequiredApprovalsCountLatest: 2,
},
},
{
name: codePullReqApprovalReqLatestCommit + "-success",
def: DefPullReq{Approvals: DefApprovals{RequireMinimumCount: 2, RequireLatestCommit: true}},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc"},
Reviewers: []*types.PullReqReviewer{
{ReviewDecision: enum.PullReqReviewDecisionPending, SHA: "abc", Reviewer: reviewer1},
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abc", Reviewer: reviewer2},
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abc", Reviewer: reviewer3},
},
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
MinimumRequiredApprovalsCountLatest: 2,
},
},
{
name: codePullReqApprovalReqDefaultReviewerMinCount + "-fail",
def: DefPullReq{
Approvals: DefApprovals{RequireMinimumDefaultReviewerCount: 1},
Reviewers: DefReviewers{DefaultReviewerIDs: []int64{reviewer1.ID}},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc"},
Reviewers: []*types.PullReqReviewer{
{ReviewDecision: enum.PullReqReviewDecisionChangeReq, SHA: "abc", Reviewer: reviewer1},
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abc", Reviewer: reviewer2},
},
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqApprovalReqDefaultReviewerMinCount},
expParams: [][]any{{0, 1}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
DefaultReviewerApprovals: []*types.DefaultReviewerApprovalsResponse{{
PrincipalIDs: []int64{reviewer1.ID},
CurrentCount: 0,
MinimumRequiredCount: 1,
Evaluations: []*types.ReviewerEvaluation{{
Reviewer: reviewer1,
SHA: "abc",
Decision: enum.PullReqReviewDecisionChangeReq,
}},
}},
},
},
{
name: codePullReqApprovalReqDefaultReviewerMinCount + "-success",
def: DefPullReq{
Approvals: DefApprovals{RequireMinimumDefaultReviewerCount: 1},
Reviewers: DefReviewers{DefaultReviewerIDs: []int64{reviewer1.ID}},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc"},
Reviewers: []*types.PullReqReviewer{
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abc", Reviewer: reviewer1},
{ReviewDecision: enum.PullReqReviewDecisionChangeReq, SHA: "abc", Reviewer: reviewer2},
},
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
DefaultReviewerApprovals: []*types.DefaultReviewerApprovalsResponse{{
PrincipalIDs: []int64{reviewer1.ID},
CurrentCount: 1,
MinimumRequiredCount: 1,
Evaluations: []*types.ReviewerEvaluation{{
Reviewer: reviewer1,
SHA: "abc",
Decision: enum.PullReqReviewDecisionApproved,
}},
}},
},
},
{
name: codePullReqApprovalReqDefaultReviewerMinCount + "-with-author-count-1-exact",
def: DefPullReq{
Approvals: DefApprovals{RequireMinimumDefaultReviewerCount: 1},
Reviewers: DefReviewers{DefaultReviewerIDs: []int64{reviewer1.ID}},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc", Author: reviewer1},
Reviewers: nil,
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
DefaultReviewerApprovals: nil,
},
},
{
name: codePullReqApprovalReqDefaultReviewerMinCount + "-with-author-count-1-more-fail",
def: DefPullReq{
Approvals: DefApprovals{RequireMinimumDefaultReviewerCount: 1},
Reviewers: DefReviewers{DefaultReviewerIDs: []int64{reviewer1.ID, reviewer2.ID}},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc", Author: reviewer1},
Reviewers: nil,
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqApprovalReqDefaultReviewerMinCount},
expParams: [][]any{{0, 1}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
DefaultReviewerApprovals: []*types.DefaultReviewerApprovalsResponse{{
PrincipalIDs: []int64{reviewer2.ID},
CurrentCount: 0,
MinimumRequiredCount: 1,
}},
},
},
{
name: codePullReqApprovalReqDefaultReviewerMinCount + "-with-author-count-1-more-success",
def: DefPullReq{
Approvals: DefApprovals{RequireMinimumDefaultReviewerCount: 1},
Reviewers: DefReviewers{DefaultReviewerIDs: []int64{reviewer1.ID, reviewer2.ID}},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc", Author: reviewer1},
Reviewers: []*types.PullReqReviewer{
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abc", Reviewer: reviewer2},
},
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
DefaultReviewerApprovals: []*types.DefaultReviewerApprovalsResponse{{
PrincipalIDs: []int64{reviewer2.ID},
CurrentCount: 1,
MinimumRequiredCount: 1,
Evaluations: []*types.ReviewerEvaluation{{
Reviewer: reviewer2,
SHA: "abc",
Decision: enum.PullReqReviewDecisionApproved,
}},
}},
},
},
{
name: codePullReqApprovalReqDefaultReviewerMinCount + "-with-author-count-2-exact-fail",
def: DefPullReq{
Approvals: DefApprovals{RequireMinimumDefaultReviewerCount: 2},
Reviewers: DefReviewers{DefaultReviewerIDs: []int64{reviewer1.ID, reviewer2.ID}},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc", Author: reviewer1},
Reviewers: []*types.PullReqReviewer{},
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqApprovalReqDefaultReviewerMinCount},
expParams: [][]any{{0, 1}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
DefaultReviewerApprovals: []*types.DefaultReviewerApprovalsResponse{{
PrincipalIDs: []int64{reviewer2.ID},
CurrentCount: 0,
MinimumRequiredCount: 1,
}},
},
},
{
name: codePullReqApprovalReqDefaultReviewerMinCount + "-with-author-count-2-exact-success",
def: DefPullReq{
Approvals: DefApprovals{RequireMinimumDefaultReviewerCount: 2},
Reviewers: DefReviewers{DefaultReviewerIDs: []int64{reviewer1.ID, reviewer2.ID}},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc", Author: reviewer1},
Reviewers: []*types.PullReqReviewer{
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abc", Reviewer: reviewer2},
},
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
DefaultReviewerApprovals: []*types.DefaultReviewerApprovalsResponse{{
PrincipalIDs: []int64{reviewer2.ID},
CurrentCount: 1,
MinimumRequiredCount: 1,
Evaluations: []*types.ReviewerEvaluation{{
Reviewer: reviewer2,
SHA: "abc",
Decision: enum.PullReqReviewDecisionApproved,
}},
}},
},
},
{
name: codePullReqApprovalReqDefaultReviewerMinCount + "-with-author-count-2-more-fail",
def: DefPullReq{
Approvals: DefApprovals{RequireMinimumDefaultReviewerCount: 2},
Reviewers: DefReviewers{DefaultReviewerIDs: []int64{reviewer1.ID, reviewer2.ID, reviewer3.ID}},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc", Author: reviewer1},
Reviewers: []*types.PullReqReviewer{
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abc", Reviewer: reviewer2},
{ReviewDecision: enum.PullReqReviewDecisionChangeReq, SHA: "abc", Reviewer: reviewer3},
},
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqApprovalReqDefaultReviewerMinCount},
expParams: [][]any{{1, 2}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
DefaultReviewerApprovals: []*types.DefaultReviewerApprovalsResponse{{
PrincipalIDs: []int64{reviewer2.ID, reviewer3.ID},
CurrentCount: 1,
MinimumRequiredCount: 2,
Evaluations: []*types.ReviewerEvaluation{{
Reviewer: reviewer2,
SHA: "abc",
Decision: enum.PullReqReviewDecisionApproved,
}, {
Reviewer: reviewer3,
SHA: "abc",
Decision: enum.PullReqReviewDecisionChangeReq,
}},
}},
},
},
{
name: codePullReqApprovalReqDefaultReviewerMinCount + "-with-author-count-2-more-success",
def: DefPullReq{
Approvals: DefApprovals{RequireMinimumDefaultReviewerCount: 2},
Reviewers: DefReviewers{DefaultReviewerIDs: []int64{reviewer1.ID, reviewer2.ID, reviewer3.ID}},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc", Author: reviewer1},
Reviewers: []*types.PullReqReviewer{
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abc", Reviewer: reviewer2},
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abc", Reviewer: reviewer3},
},
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
DefaultReviewerApprovals: []*types.DefaultReviewerApprovalsResponse{{
PrincipalIDs: []int64{reviewer2.ID, reviewer3.ID},
CurrentCount: 2,
MinimumRequiredCount: 2,
Evaluations: []*types.ReviewerEvaluation{{
Reviewer: reviewer2,
SHA: "abc",
Decision: enum.PullReqReviewDecisionApproved,
}, {
Reviewer: reviewer3,
SHA: "abc",
Decision: enum.PullReqReviewDecisionApproved,
}},
}},
},
},
{
name: codePullReqApprovalReqDefaultReviewerMinCountLatest + "-fail",
def: DefPullReq{
Approvals: DefApprovals{RequireMinimumDefaultReviewerCount: 1, RequireLatestCommit: true},
Reviewers: DefReviewers{DefaultReviewerIDs: []int64{reviewer1.ID}},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc"},
Reviewers: []*types.PullReqReviewer{
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "def", Reviewer: reviewer1},
},
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqApprovalReqDefaultReviewerMinCountLatest},
expParams: [][]any{{0, 1}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
DefaultReviewerApprovals: []*types.DefaultReviewerApprovalsResponse{{
PrincipalIDs: []int64{reviewer1.ID},
CurrentCount: 0,
MinimumRequiredCountLatest: 1,
Evaluations: []*types.ReviewerEvaluation{{
Reviewer: reviewer1,
SHA: "def",
Decision: enum.PullReqReviewDecisionApproved,
}},
}},
},
},
{
name: codePullReqApprovalReqDefaultReviewerMinCountLatest + "-success",
def: DefPullReq{
Approvals: DefApprovals{RequireMinimumDefaultReviewerCount: 1, RequireLatestCommit: true},
Reviewers: DefReviewers{DefaultReviewerIDs: []int64{reviewer1.ID}},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc"},
Reviewers: []*types.PullReqReviewer{
{ReviewDecision: enum.PullReqReviewDecisionApproved, SHA: "abc", Reviewer: reviewer1},
},
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
DefaultReviewerApprovals: []*types.DefaultReviewerApprovalsResponse{{
PrincipalIDs: []int64{reviewer1.ID},
CurrentCount: 1,
MinimumRequiredCountLatest: 1,
Evaluations: []*types.ReviewerEvaluation{{
Reviewer: reviewer1,
SHA: "abc",
Decision: enum.PullReqReviewDecisionApproved,
}},
}},
},
},
{
name: codePullReqApprovalReqCodeOwnersNoApproval + "-fail",
def: DefPullReq{Approvals: DefApprovals{RequireCodeOwners: true}},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc"},
CodeOwners: &codeowners.Evaluation{
EvaluationEntries: []codeowners.EvaluationEntry{
{
Pattern: "app",
UserEvaluations: []codeowners.UserEvaluation{
{ReviewDecision: enum.PullReqReviewDecisionPending, ReviewSHA: "abc"},
},
},
{
Pattern: "doc",
UserEvaluations: []codeowners.UserEvaluation{
{ReviewDecision: enum.PullReqReviewDecisionApproved, ReviewSHA: "abc"},
},
},
{
Pattern: "data",
UserEvaluations: []codeowners.UserEvaluation{},
},
},
FileSha: "xyz",
},
Method: enum.MergeMethodMerge,
},
expCodes: []string{
codePullReqApprovalReqCodeOwnersNoApproval,
codePullReqApprovalReqCodeOwnersNoApproval,
},
expParams: [][]any{{"app"}, {"data"}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
RequiresCodeOwnersApproval: true,
},
},
{
name: codePullReqApprovalReqCodeOwnersNoApproval + "-success",
def: DefPullReq{Approvals: DefApprovals{RequireCodeOwners: true}},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc"},
CodeOwners: &codeowners.Evaluation{
EvaluationEntries: []codeowners.EvaluationEntry{
{
Pattern: "app",
UserEvaluations: []codeowners.UserEvaluation{
{ReviewDecision: enum.PullReqReviewDecisionApproved, ReviewSHA: "abc"},
},
},
{
Pattern: "doc",
UserEvaluations: []codeowners.UserEvaluation{
{ReviewDecision: enum.PullReqReviewDecisionApproved, ReviewSHA: "abc"},
},
},
},
FileSha: "xyz",
},
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
RequiresCodeOwnersApproval: true,
},
},
{
name: codePullReqApprovalReqCodeOwnersChangeRequested + "-fail",
def: DefPullReq{Approvals: DefApprovals{RequireCodeOwners: true}},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc"},
CodeOwners: &codeowners.Evaluation{
EvaluationEntries: []codeowners.EvaluationEntry{
{
Pattern: "app",
UserEvaluations: []codeowners.UserEvaluation{
{ReviewDecision: enum.PullReqReviewDecisionApproved, ReviewSHA: "abc"},
{ReviewDecision: enum.PullReqReviewDecisionChangeReq, ReviewSHA: "abc"},
{ReviewDecision: enum.PullReqReviewDecisionPending, ReviewSHA: "abc"},
},
},
{
Pattern: "data",
UserEvaluations: []codeowners.UserEvaluation{
{ReviewDecision: enum.PullReqReviewDecisionApproved, ReviewSHA: "abc"},
},
},
},
FileSha: "xyz",
},
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqApprovalReqCodeOwnersChangeRequested},
expParams: [][]any{{"app"}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
RequiresCodeOwnersApproval: true,
},
},
{
name: codePullReqApprovalReqCodeOwnersNoLatestApproval + "-fail",
def: DefPullReq{Approvals: DefApprovals{RequireCodeOwners: true, RequireLatestCommit: true}},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0, SourceSHA: "abc"},
CodeOwners: &codeowners.Evaluation{
EvaluationEntries: []codeowners.EvaluationEntry{
{
Pattern: "data",
UserEvaluations: []codeowners.UserEvaluation{
{ReviewDecision: enum.PullReqReviewDecisionApproved, ReviewSHA: "old"},
},
},
{
Pattern: "app",
UserEvaluations: []codeowners.UserEvaluation{
{ReviewDecision: enum.PullReqReviewDecisionApproved, ReviewSHA: "abc"},
{ReviewDecision: enum.PullReqReviewDecisionApproved, ReviewSHA: "old"},
},
},
},
FileSha: "xyz",
},
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqApprovalReqCodeOwnersNoLatestApproval},
expParams: [][]any{{"data"}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
RequiresCodeOwnersApprovalLatest: true,
},
},
{
name: codePullReqCommentsReqResolveAll + "-fail",
def: DefPullReq{Comments: DefComments{RequireResolveAll: true}},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 6},
Method: enum.MergeMethodMerge,
},
expCodes: []string{"pullreq.comments.require_resolve_all"},
expParams: [][]any{{6}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
RequiresCommentResolution: true,
},
},
{
name: codePullReqCommentsReqResolveAll + "-success",
def: DefPullReq{Comments: DefComments{RequireResolveAll: true}},
in: MergeVerifyInput{
PullReq: &types.PullReq{UnresolvedCount: 0},
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
RequiresCommentResolution: true,
},
},
{
name: codePullReqStatusChecksReqIdentifiers + "-fail",
def: DefPullReq{StatusChecks: DefStatusChecks{RequireIdentifiers: []string{"check1"}}},
in: MergeVerifyInput{
CheckResults: []types.CheckResult{
{Identifier: "check1", Status: enum.CheckStatusFailure},
{Identifier: "check2", Status: enum.CheckStatusSuccess},
},
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqStatusChecksReqIdentifiers},
expParams: [][]any{{"check1"}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
},
},
{
name: codePullReqStatusChecksReqIdentifiers + "-missing",
def: DefPullReq{StatusChecks: DefStatusChecks{RequireIdentifiers: []string{"check1"}}},
in: MergeVerifyInput{
CheckResults: []types.CheckResult{
{Identifier: "check2", Status: enum.CheckStatusSuccess},
},
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqStatusChecksReqIdentifiers},
expParams: [][]any{{"check1"}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
},
},
{
name: codePullReqStatusChecksReqIdentifiers + "-success",
def: DefPullReq{StatusChecks: DefStatusChecks{RequireIdentifiers: []string{"check1"}}},
in: MergeVerifyInput{
CheckResults: []types.CheckResult{
{Identifier: "check1", Status: enum.CheckStatusSuccess},
{Identifier: "check2", Status: enum.CheckStatusFailure},
},
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
},
},
{
name: codePullReqMergeStrategiesAllowed + "-fail",
def: DefPullReq{Merge: DefMerge{StrategiesAllowed: []enum.MergeMethod{
enum.MergeMethodRebase,
enum.MergeMethodSquash,
}}},
in: MergeVerifyInput{
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqMergeStrategiesAllowed},
expParams: [][]any{{
enum.MergeMethodMerge,
[]enum.MergeMethod{
enum.MergeMethodRebase,
enum.MergeMethodSquash,
}},
},
expOut: MergeVerifyOutput{
AllowedMethods: []enum.MergeMethod{enum.MergeMethodRebase, enum.MergeMethodSquash},
},
},
{
name: codePullReqMergeStrategiesAllowed + "-success",
def: DefPullReq{Merge: DefMerge{StrategiesAllowed: []enum.MergeMethod{
enum.MergeMethodRebase,
enum.MergeMethodSquash,
}}},
in: MergeVerifyInput{
Method: enum.MergeMethodSquash,
},
expOut: MergeVerifyOutput{
AllowedMethods: []enum.MergeMethod{enum.MergeMethodRebase, enum.MergeMethodSquash},
},
},
{
name: codePullReqMergeDeleteBranch,
def: DefPullReq{Merge: DefMerge{DeleteBranch: true}},
in: MergeVerifyInput{
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
DeleteSourceBranch: true,
},
},
{
name: codePullReqApprovalReqChangeRequested + "-true",
def: DefPullReq{
Approvals: DefApprovals{RequireNoChangeRequest: true},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{SourceSHA: "abc"},
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
RequiresNoChangeRequests: true,
},
},
{
name: codePullReqApprovalReqChangeRequested + "-false",
def: DefPullReq{
Approvals: DefApprovals{RequireNoChangeRequest: false},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{SourceSHA: "abc"},
Reviewers: []*types.PullReqReviewer{
{ReviewDecision: enum.PullReqReviewDecisionChangeReq, SHA: "abc", Reviewer: reviewer1},
},
Method: enum.MergeMethodMerge,
},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
},
},
{
name: codePullReqApprovalReqChangeRequested + "-sameSHA",
def: DefPullReq{
Approvals: DefApprovals{RequireNoChangeRequest: true},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{SourceSHA: "abc"},
Reviewers: []*types.PullReqReviewer{
{
ReviewDecision: enum.PullReqReviewDecisionChangeReq,
Reviewer: reviewer1,
SHA: "abc",
},
},
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqApprovalReqChangeRequested},
expParams: [][]any{{reviewer1.DisplayName}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
RequiresNoChangeRequests: true,
},
},
{
name: codePullReqApprovalReqChangeRequested + "-diffSHA",
def: DefPullReq{
Approvals: DefApprovals{RequireNoChangeRequest: true},
},
in: MergeVerifyInput{
PullReq: &types.PullReq{SourceSHA: "abc"},
Reviewers: []*types.PullReqReviewer{
{
ReviewDecision: enum.PullReqReviewDecisionChangeReq,
Reviewer: reviewer1,
SHA: "def",
},
},
Method: enum.MergeMethodMerge,
},
expCodes: []string{codePullReqApprovalReqChangeRequestedOldSHA},
expParams: [][]any{{reviewer1.DisplayName}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
RequiresNoChangeRequests: true,
},
},
{
name: codePullReqMergeBlock,
def: DefPullReq{
Merge: DefMerge{
Block: true,
},
},
in: MergeVerifyInput{
Method: enum.MergeMethodMerge,
PullReq: &types.PullReq{
TargetBranch: "abc",
},
},
expCodes: []string{codePullReqMergeBlock},
expParams: [][]any{{"abc"}},
expOut: MergeVerifyOutput{
AllowedMethods: enum.MergeMethods,
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if err := test.def.Sanitize(); err != nil {
t.Errorf("def invalid: %s", err.Error())
return
}
out, violations, err := test.def.MergeVerify(context.Background(), test.in)
if err != nil {
t.Errorf("got an error: %s", err.Error())
return
}
sortEvaluations(out.DefaultReviewerApprovals)
if want, got := test.expOut, out; !reflect.DeepEqual(want, got) {
t.Errorf("output mismatch: want=%+v got=%+v", want, got)
}
inspectBranchViolations(t, test.expCodes, test.expParams, violations)
})
}
}
// sortEvaluations sorts the evaluations in DefaultReviewerApprovals by reviewer ID for consistent comparison.
func sortEvaluations(approvals []*types.DefaultReviewerApprovalsResponse) {
if approvals == nil {
return
}
for _, approval := range approvals {
if approval == nil || approval.Evaluations == nil {
continue
}
slices.SortFunc(approval.Evaluations, func(a, b *types.ReviewerEvaluation) int {
return int(a.Reviewer.ID - b.Reviewer.ID)
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/set_tag.go | app/services/protection/set_tag.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"fmt"
"github.com/harness/gitness/types"
)
type tagRuleSet struct {
rules []types.RuleInfoInternal
manager *Manager
}
var _ Protection = tagRuleSet{} // ensure that ruleSet implements the Protection interface.
func (s tagRuleSet) RefChangeVerify(ctx context.Context, in RefChangeVerifyInput) ([]types.RuleViolations, error) {
var violations []types.RuleViolations
err := forEachRuleMatchRefs(
s.manager,
s.rules,
in.Repo.ID,
in.Repo.Identifier,
"",
in.RefNames,
refChangeVerifyFunc(ctx, in, &violations),
)
if err != nil {
return nil, fmt.Errorf("failed to process each rule in ruleSet: %w", err)
}
return violations, nil
}
func (s tagRuleSet) UserIDs() ([]int64, error) {
return collectIDs(s.manager, s.rules, Protection.UserIDs)
}
func (s tagRuleSet) UserGroupIDs() ([]int64, error) {
return collectIDs(s.manager, s.rules, Protection.UserGroupIDs)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/repo_target.go | app/services/protection/repo_target.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"encoding/json"
"fmt"
"slices"
)
type RepoTargetFilter struct {
IDs []int64 `json:"ids,omitempty"`
Patterns []string `json:"patterns,omitempty"`
}
type RepoTarget struct {
Include RepoTargetFilter `json:"include"`
Exclude RepoTargetFilter `json:"exclude"`
}
func (p *RepoTarget) JSON() json.RawMessage {
message, _ := ToJSON(p)
return message
}
func (p *RepoTarget) Validate() error {
if err := validateIDSlice(p.Include.IDs); err != nil {
return err
}
for _, pattern := range p.Include.Patterns {
if err := patternValidate(pattern); err != nil {
return err
}
}
for _, pattern := range p.Exclude.Patterns {
if err := patternValidate(pattern); err != nil {
return err
}
}
if err := validateIDSlice(p.Exclude.IDs); err != nil {
return err
}
return nil
}
func (p *RepoTarget) Matches(repoID int64, repoIdentifier string) bool {
// Note: exclusion always "wins" — if a repo is excluded, nothing can override it
if slices.Contains(p.Exclude.IDs, repoID) {
return false
}
for _, pattern := range p.Exclude.Patterns {
if patternMatches(pattern, repoIdentifier) {
return false
}
}
// If either includes are unspecified (empty), "match all"
if len(p.Include.IDs) == 0 && len(p.Include.Patterns) == 0 {
return true
}
// If includes are specified, a repo must match at least one
if slices.Contains(p.Include.IDs, repoID) {
return true
}
for _, include := range p.Include.Patterns {
if patternMatches(include, repoIdentifier) {
return true
}
}
return false
}
func matchesRepo(rawPattern json.RawMessage, repoID int64, repoIdentifier string) (bool, error) {
repoTarget := RepoTarget{}
if err := json.Unmarshal(rawPattern, &repoTarget); err != nil {
return false, fmt.Errorf("failed to parse repo target: %w", err)
}
return repoTarget.Matches(repoID, repoIdentifier), nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/protection/set_branch.go | app/services/protection/set_branch.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protection
import (
"context"
"errors"
"fmt"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
"golang.org/x/exp/constraints"
"golang.org/x/exp/slices"
)
type branchRuleSet struct {
rules []types.RuleInfoInternal
manager *Manager
}
var _ Protection = branchRuleSet{} // ensure that ruleSet implements the Protection interface.
func (s branchRuleSet) MergeVerify(
ctx context.Context,
in MergeVerifyInput,
) (MergeVerifyOutput, []types.RuleViolations, error) {
var out MergeVerifyOutput
var violations []types.RuleViolations
out.AllowedMethods = slices.Clone(enum.MergeMethods)
err := s.forEachRuleMatchBranch(
in.TargetRepo.ID,
in.TargetRepo.Identifier,
in.TargetRepo.DefaultBranch,
in.PullReq.TargetBranch,
func(r *types.RuleInfoInternal, p BranchProtection) error {
rOut, rVs, err := p.MergeVerify(ctx, in)
if err != nil {
return err
}
// combine output across rules
violations = append(violations, backFillRule(rVs, r.RuleInfo)...)
out.AllowedMethods = intersectSorted(out.AllowedMethods, rOut.AllowedMethods)
out.DeleteSourceBranch = out.DeleteSourceBranch || rOut.DeleteSourceBranch
out.MinimumRequiredApprovalsCount = maxInt(out.MinimumRequiredApprovalsCount, rOut.MinimumRequiredApprovalsCount)
out.MinimumRequiredApprovalsCountLatest = maxInt(out.MinimumRequiredApprovalsCountLatest, rOut.MinimumRequiredApprovalsCountLatest) //nolint:lll
out.RequiresCodeOwnersApproval = out.RequiresCodeOwnersApproval || rOut.RequiresCodeOwnersApproval
out.RequiresCodeOwnersApprovalLatest = out.RequiresCodeOwnersApprovalLatest || rOut.RequiresCodeOwnersApprovalLatest
out.RequiresCommentResolution = out.RequiresCommentResolution || rOut.RequiresCommentResolution
out.RequiresNoChangeRequests = out.RequiresNoChangeRequests || rOut.RequiresNoChangeRequests
out.DefaultReviewerApprovals = append(out.DefaultReviewerApprovals, rOut.DefaultReviewerApprovals...)
return nil
})
if err != nil {
return out, nil, fmt.Errorf("failed to process each rule in ruleSet: %w", err)
}
return out, violations, nil
}
func (s branchRuleSet) RequiredChecks(
ctx context.Context,
in RequiredChecksInput,
) (RequiredChecksOutput, error) {
requiredIDMap := map[string]struct{}{}
bypassableIDMap := map[string]struct{}{}
err := s.forEachRuleMatchBranch(
in.Repo.ID,
in.Repo.Identifier,
in.Repo.DefaultBranch,
in.PullReq.TargetBranch,
func(_ *types.RuleInfoInternal, p BranchProtection) error {
out, err := p.RequiredChecks(ctx, in)
if err != nil {
return err
}
for reqCheckID := range out.RequiredIdentifiers {
requiredIDMap[reqCheckID] = struct{}{}
delete(bypassableIDMap, reqCheckID)
}
for reqCheckID := range out.BypassableIdentifiers {
if _, ok := requiredIDMap[reqCheckID]; ok {
continue
}
bypassableIDMap[reqCheckID] = struct{}{}
}
return nil
})
if err != nil {
return RequiredChecksOutput{}, fmt.Errorf("failed to process each rule in ruleSet: %w", err)
}
return RequiredChecksOutput{
RequiredIdentifiers: requiredIDMap,
BypassableIdentifiers: bypassableIDMap,
}, nil
}
func (s branchRuleSet) CreatePullReqVerify(
ctx context.Context,
in CreatePullReqVerifyInput,
) (CreatePullReqVerifyOutput, []types.RuleViolations, error) {
var out CreatePullReqVerifyOutput
var violations []types.RuleViolations
err := s.forEachRuleMatchBranch(
in.RepoID,
in.RepoIdentifier,
in.DefaultBranch,
in.TargetBranch,
func(r *types.RuleInfoInternal, p BranchProtection) error {
rOut, rVs, err := p.CreatePullReqVerify(ctx, in)
if err != nil {
return err
}
// combine output across rules
violations = append(violations, backFillRule(rVs, r.RuleInfo)...)
out.RequestCodeOwners = out.RequestCodeOwners || rOut.RequestCodeOwners
out.DefaultReviewerIDs = append(out.DefaultReviewerIDs, rOut.DefaultReviewerIDs...)
out.DefaultGroupReviewerIDs = append(out.DefaultGroupReviewerIDs, rOut.DefaultGroupReviewerIDs...)
return nil
})
if err != nil {
return out, nil, fmt.Errorf("failed to process each rule in ruleSet: %w", err)
}
out.DefaultReviewerIDs = deduplicateInt64Slice(out.DefaultReviewerIDs)
out.DefaultGroupReviewerIDs = deduplicateInt64Slice(out.DefaultGroupReviewerIDs)
return out, violations, nil
}
func (s branchRuleSet) RefChangeVerify(ctx context.Context, in RefChangeVerifyInput) ([]types.RuleViolations, error) {
var violations []types.RuleViolations
err := forEachRuleMatchRefs(
s.manager,
s.rules,
in.Repo.ID,
in.Repo.Identifier,
in.Repo.DefaultBranch,
in.RefNames,
refChangeVerifyFunc(ctx, in, &violations),
)
if err != nil {
return nil, fmt.Errorf("failed to process each rule in ruleSet: %w", err)
}
return violations, nil
}
func (s branchRuleSet) UserIDs() ([]int64, error) {
return collectIDs(s.manager, s.rules, Protection.UserIDs)
}
func (s branchRuleSet) UserGroupIDs() ([]int64, error) {
return collectIDs(s.manager, s.rules, Protection.UserGroupIDs)
}
func (s branchRuleSet) forEachRuleMatchBranch(
repoID int64,
repoIdentifier string,
defaultBranch string,
branchName string,
fn func(r *types.RuleInfoInternal, p BranchProtection) error,
) error {
for i := range s.rules {
r := s.rules[i]
matchedRepo, err := matchesRepo(r.RepoTarget, repoID, repoIdentifier)
if err != nil {
return err
}
if !matchedRepo {
continue
}
matchedRef, err := matchesRef(r.Pattern, defaultBranch, branchName)
if err != nil {
return err
}
if !matchedRef {
continue
}
protection, err := s.manager.FromJSON(r.Type, r.Definition, false)
if err != nil {
return fmt.Errorf("forEachRuleMatchBranch: failed to parse protection definition ID=%d Type=%s: %w",
r.ID, r.Type, err)
}
branchProtection, ok := protection.(BranchProtection)
if !ok { // theoretically, should never happen
log.Warn().Err(errors.New("failed to type assert Protection to BranchProtection"))
return nil
}
err = fn(&r, branchProtection)
if err != nil {
return fmt.Errorf(
"forEachRuleMatchBranch: failed to process rule ID=%d Type=%s: %w",
r.ID, r.Type, err,
)
}
}
return nil
}
func backFillRule(vs []types.RuleViolations, rule types.RuleInfo) []types.RuleViolations {
for i := range vs {
vs[i].Rule = rule
}
return vs
}
// intersectSorted removed all elements of the "sliceA" that are not also in the "sliceB" slice.
// Assumes both slices are sorted.
func intersectSorted[T constraints.Ordered](sliceA, sliceB []T) []T {
var idxA, idxB int
for idxA < len(sliceA) && idxB < len(sliceB) {
a, b := sliceA[idxA], sliceB[idxB]
if a == b {
idxA++
continue
}
if a < b {
sliceA = append(sliceA[:idxA], sliceA[idxA+1:]...)
continue
}
idxB++
}
sliceA = sliceA[:idxA]
return sliceA
}
func maxInt(a int, b int) int {
if a > b {
return a
}
return b
}
func deduplicateInt64Slice(slice []int64) []int64 {
seen := make(map[int64]bool)
result := []int64{}
for _, val := range slice {
if _, ok := seen[val]; ok {
continue
}
seen[val] = true
result = append(result, val)
}
return result
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/cleanup/wire.go | app/services/cleanup/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cleanup
import (
"github.com/harness/gitness/app/api/controller/repo"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/job"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideService,
)
func ProvideService(
config Config,
scheduler *job.Scheduler,
executor *job.Executor,
webhookExecutionStore store.WebhookExecutionStore,
tokenStore store.TokenStore,
repoStore store.RepoStore,
repoCtrl *repo.Controller,
) (*Service, error) {
return NewService(
config,
scheduler,
executor,
webhookExecutionStore,
tokenStore,
repoStore,
repoCtrl,
)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/cleanup/service.go | app/services/cleanup/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cleanup
import (
"context"
"errors"
"fmt"
"time"
"github.com/harness/gitness/app/api/controller/repo"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/job"
)
type Config struct {
WebhookExecutionsRetentionTime time.Duration
DeletedRepositoriesRetentionTime time.Duration
}
func (c *Config) Prepare() error {
if c == nil {
return errors.New("config is required")
}
if c.WebhookExecutionsRetentionTime <= 0 {
return errors.New("config.WebhookExecutionsRetentionTime has to be provided")
}
if c.DeletedRepositoriesRetentionTime <= 0 {
return errors.New("config.DeletedRepositoriesRetentionTime has to be provided")
}
return nil
}
// Service is responsible for cleaning up data in db / git / ...
type Service struct {
config Config
scheduler *job.Scheduler
executor *job.Executor
webhookExecutionStore store.WebhookExecutionStore
tokenStore store.TokenStore
repoStore store.RepoStore
repoCtrl *repo.Controller
}
func NewService(
config Config,
scheduler *job.Scheduler,
executor *job.Executor,
webhookExecutionStore store.WebhookExecutionStore,
tokenStore store.TokenStore,
repoStore store.RepoStore,
repoCtrl *repo.Controller,
) (*Service, error) {
if err := config.Prepare(); err != nil {
return nil, fmt.Errorf("provided cleanup config is invalid: %w", err)
}
return &Service{
config: config,
scheduler: scheduler,
executor: executor,
webhookExecutionStore: webhookExecutionStore,
tokenStore: tokenStore,
repoStore: repoStore,
repoCtrl: repoCtrl,
}, nil
}
func (s *Service) Register(ctx context.Context) error {
if err := s.registerJobHandlers(); err != nil {
return fmt.Errorf("failed to register cleanup job handlers: %w", err)
}
if err := s.scheduleRecurringCleanupJobs(ctx); err != nil {
return fmt.Errorf("failed to schedule cleanup jobs: %w", err)
}
return nil
}
// scheduleRecurringCleanupJobs schedules the cleanup jobs.
func (s *Service) scheduleRecurringCleanupJobs(ctx context.Context) error {
err := s.scheduler.AddRecurring(
ctx,
jobTypeWebhookExecutions,
jobTypeWebhookExecutions,
jobCronWebhookExecutions,
jobMaxDurationWebhookExecutions,
)
if err != nil {
return fmt.Errorf("failed to schedule webhook executions job: %w", err)
}
err = s.scheduler.AddRecurring(
ctx,
jobTypeTokens,
jobTypeTokens,
jobCronTokens,
jobMaxDurationTokens,
)
if err != nil {
return fmt.Errorf("failed to schedule token job: %w", err)
}
err = s.scheduler.AddRecurring(
ctx,
jobTypeDeletedRepos,
jobTypeDeletedRepos,
jobCronDeletedRepos,
jobMaxDurationDeletedRepos,
)
if err != nil {
return fmt.Errorf("failed to schedule deleted repo cleanup job: %w", err)
}
return nil
}
// registerJobHandlers registers handlers for all cleanup jobs.
func (s *Service) registerJobHandlers() error {
if err := s.executor.Register(
jobTypeWebhookExecutions,
newWebhookExecutionsCleanupJob(
s.config.WebhookExecutionsRetentionTime,
s.webhookExecutionStore,
),
); err != nil {
return fmt.Errorf("failed to register job handler for webhook executions cleanup: %w", err)
}
if err := s.executor.Register(
jobTypeTokens,
newTokensCleanupJob(
s.tokenStore,
),
); err != nil {
return fmt.Errorf("failed to register job handler for token cleanup: %w", err)
}
if err := s.executor.Register(
jobTypeDeletedRepos,
newDeletedReposCleanupJob(
s.config.DeletedRepositoriesRetentionTime,
s.repoStore,
s.repoCtrl,
),
); err != nil {
return fmt.Errorf("failed to register job handler for deleted repos cleanup: %w", err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/cleanup/deleted_repos.go | app/services/cleanup/deleted_repos.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cleanup
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/api/controller/repo"
"github.com/harness/gitness/app/bootstrap"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/job"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
const (
jobTypeDeletedRepos = "gitness:cleanup:deleted-repos"
jobCronDeletedRepos = "50 0 * * *" // At minute 50 past midnight every day.
jobMaxDurationDeletedRepos = 1 * time.Hour
maxDeletedRepoRetrival = 1000 // to avoid loading all deleted repos in memory at once
)
type deletedReposCleanupJob struct {
retentionTime time.Duration
repoStore store.RepoStore
repoCtrl *repo.Controller
}
func newDeletedReposCleanupJob(
retentionTime time.Duration,
repoStore store.RepoStore,
repoCtrl *repo.Controller,
) *deletedReposCleanupJob {
return &deletedReposCleanupJob{
retentionTime: retentionTime,
repoStore: repoStore,
repoCtrl: repoCtrl,
}
}
// Handle purges old deleted repositories that are past the retention time.
func (j *deletedReposCleanupJob) Handle(ctx context.Context, _ string, _ job.ProgressReporter) (string, error) {
olderThan := time.Now().Add(-j.retentionTime)
log.Ctx(ctx).Info().Msgf(
"start purging deleted repositories older than %s (aka created before %s)",
j.retentionTime,
olderThan.Format(time.RFC3339Nano))
deletedBeforeOrAt := olderThan.UnixMilli()
session := bootstrap.NewSystemServiceSession()
purgedRepos := 0
for {
filter := &types.RepoFilter{
Page: 1,
Size: maxDeletedRepoRetrival,
Query: "",
Order: enum.OrderDesc,
Sort: enum.RepoAttrDeleted,
DeletedBeforeOrAt: &deletedBeforeOrAt,
}
toBePurgedRepos, err := j.repoStore.ListAll(ctx, filter)
if err != nil {
return "", fmt.Errorf("failed to list ready-to-delete repositories: %w", err)
}
if len(toBePurgedRepos) == 0 {
break
}
log.Ctx(ctx).Info().Msgf("found %d deleted repositories ready to be purged.", len(toBePurgedRepos))
for _, r := range toBePurgedRepos {
deletedBeforeOrAt = *r.Deleted - 1 // to avoid infinite loop if last repo wasn't purged successfully
err := j.repoCtrl.PurgeNoAuth(ctx, session, r)
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msgf("failed to purge repo with identifier: %s, path: %s, deleted at: %d",
r.Identifier, r.Path, *r.Deleted)
continue
}
log.Ctx(ctx).Info().Msgf("successfully purged repo with identifier: %s, path: %s, deleted at: %d",
r.Identifier, r.Path, *r.Deleted)
purgedRepos++
}
}
result := "no old deleted repositories found"
if purgedRepos > 0 {
result = fmt.Sprintf("purged %d deleted repositories", purgedRepos)
}
log.Ctx(ctx).Info().Msg(result)
return result, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/cleanup/tokens.go | app/services/cleanup/tokens.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cleanup
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/job"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
const (
jobTypeTokens = "gitness:cleanup:tokens"
//nolint:gosec
jobCronTokens = "42 */4 * * *" // At minute 42 past every 4th hour.
jobMaxDurationTokens = 1 * time.Minute
// tokenRetentionTime specifies the time for which session tokens are kept even after they expired.
// This ensures that users can still trace them after expiry for some time.
// NOTE: I don't expect this to change much, so make it a constant instead of exposing it via config.
tokenRetentionTime = 72 * time.Hour // 3d
)
type tokensCleanupJob struct {
tokenStore store.TokenStore
}
func newTokensCleanupJob(
tokenStore store.TokenStore,
) *tokensCleanupJob {
return &tokensCleanupJob{
tokenStore: tokenStore,
}
}
// Handle purges old token that are expired.
func (j *tokensCleanupJob) Handle(ctx context.Context, _ string, _ job.ProgressReporter) (string, error) {
// Don't remove PAT / SAT as they were explicitly created and are managed by user.
expiredBefore := time.Now().Add(-tokenRetentionTime)
log.Ctx(ctx).Info().Msgf(
"start purging expired tokens (expired before: %s)",
expiredBefore.Format(time.RFC3339Nano),
)
n, err := j.tokenStore.DeleteExpiredBefore(ctx, expiredBefore, []enum.TokenType{enum.TokenTypeSession})
if err != nil {
return "", fmt.Errorf("failed to delete expired tokens: %w", err)
}
result := "no expired tokens found"
if n > 0 {
result = fmt.Sprintf("deleted %d tokens", n)
}
log.Ctx(ctx).Info().Msg(result)
return result, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/cleanup/webhook_executions.go | app/services/cleanup/webhook_executions.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cleanup
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/job"
"github.com/rs/zerolog/log"
)
const (
jobTypeWebhookExecutions = "gitness:cleanup:webhook-executions"
jobCronWebhookExecutions = "21 */4 * * *" // At minute 21 past every 4th hour.
jobMaxDurationWebhookExecutions = 1 * time.Minute
)
type webhookExecutionsCleanupJob struct {
retentionTime time.Duration
webhookExecutionStore store.WebhookExecutionStore
}
func newWebhookExecutionsCleanupJob(
retentionTime time.Duration,
webhookExecutionStore store.WebhookExecutionStore,
) *webhookExecutionsCleanupJob {
return &webhookExecutionsCleanupJob{
retentionTime: retentionTime,
webhookExecutionStore: webhookExecutionStore,
}
}
// Handle purges old webhook executions that are past the retention time.
func (j *webhookExecutionsCleanupJob) Handle(ctx context.Context, _ string, _ job.ProgressReporter) (string, error) {
olderThan := time.Now().Add(-j.retentionTime)
log.Ctx(ctx).Info().Msgf(
"start purging webhook executions older than %s (aka created before %s)",
j.retentionTime,
olderThan.Format(time.RFC3339Nano))
n, err := j.webhookExecutionStore.DeleteOld(ctx, olderThan)
if err != nil {
return "", fmt.Errorf("failed to delete old webhook executions: %w", err)
}
result := "no old webhook executions found"
if n > 0 {
result = fmt.Sprintf("deleted %d webhook executions", n)
}
log.Ctx(ctx).Info().Msg(result)
return result, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspace/actions.go | app/services/gitspace/actions.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspace
import (
"context"
"fmt"
"net/http"
"strings"
"time"
"github.com/harness/gitness/app/api/usererror"
events "github.com/harness/gitness/app/events/gitspace"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
gonanoid "github.com/matoous/go-nanoid"
"github.com/rs/zerolog/log"
)
const defaultPasswordRef = "harness_password"
const defaultMachineUser = "harness"
const AllowedUIDAlphabet = "abcdefghijklmnopqrstuvwxyz0123456789"
// gitspaceInstanceCleaningTimedOutMins is timeout for which a gitspace instance can be in cleaning state.
const gitspaceInstanceCleaningTimedOutMins = 10
func (c *Service) gitspaceBusyOperation(
ctx context.Context,
config types.GitspaceConfig,
) error {
if config.GitspaceInstance == nil || !config.GitspaceInstance.State.IsBusyStatus() {
return nil
}
busyStateTimeoutInMillis := int64(c.config.Gitspace.BusyActionInMins * 60 * 1000)
if time.Since(time.UnixMilli(config.GitspaceInstance.Updated)).Milliseconds() <= busyStateTimeoutInMillis {
return usererror.NewWithPayload(http.StatusForbidden, fmt.Sprintf(
"Last session for this gitspace is still %s", config.GitspaceInstance.State))
}
config.GitspaceInstance.State = enum.GitspaceInstanceStateError
if err := c.UpdateInstance(ctx, config.GitspaceInstance); err != nil {
return fmt.Errorf("failed to update gitspace config for %s: %w", config.Identifier, err)
}
return nil
}
func (c *Service) submitAsyncOps(
ctx context.Context,
config types.GitspaceConfig,
action enum.GitspaceActionType,
) {
switch action {
case enum.GitspaceActionTypeStart:
config.GitspaceInstance.State = enum.GitspaceInstanceStateStarting
case enum.GitspaceActionTypeStop:
config.GitspaceInstance.State = enum.GitspaceInstanceStateStopping
case enum.GitspaceActionTypeReset:
config.GitspaceInstance.State = enum.GitSpaceInstanceStateResetting
}
if updateErr := c.UpdateInstance(ctx, config.GitspaceInstance); updateErr != nil {
log.Err(updateErr).Msgf(
"failed to update gitspace instance during exec %s", config.GitspaceInstance.Identifier)
}
errChannel := make(chan *types.GitspaceError)
submitCtx := context.WithoutCancel(ctx)
gitspaceTimedOutInMins := time.Duration(c.config.Gitspace.InfraTimeoutInMins) * time.Minute
ttlExecuteContext, cancel := context.WithTimeout(submitCtx, gitspaceTimedOutInMins)
go c.triggerOrchestrator(ttlExecuteContext, config, action, errChannel)
var err *types.GitspaceError
go func() {
select {
case <-ttlExecuteContext.Done():
if ttlExecuteContext.Err() != nil {
err = &types.GitspaceError{
Error: ttlExecuteContext.Err(),
}
}
case err = <-errChannel:
}
if err != nil {
log.Err(err.Error).Msgf("error during async execution for %s", config.GitspaceInstance.Identifier)
config.GitspaceInstance.State = enum.GitspaceInstanceStateError
config.GitspaceInstance.ErrorMessage = err.ErrorMessage
updateErr := c.UpdateInstance(submitCtx, config.GitspaceInstance)
if updateErr != nil {
log.Err(updateErr).Msgf(
"failed to update gitspace instance during exec %q", config.GitspaceInstance.Identifier)
}
switch action {
case enum.GitspaceActionTypeStart:
c.EmitGitspaceConfigEvent(submitCtx, config, enum.GitspaceEventTypeGitspaceActionStartFailed)
case enum.GitspaceActionTypeStop:
c.EmitGitspaceConfigEvent(submitCtx, config, enum.GitspaceEventTypeGitspaceActionStopFailed)
case enum.GitspaceActionTypeReset:
c.EmitGitspaceConfigEvent(submitCtx, config, enum.GitspaceEventTypeGitspaceActionResetFailed)
}
}
cancel()
}()
}
func (c *Service) triggerOrchestrator(
ctxWithTimedOut context.Context,
config types.GitspaceConfig,
action enum.GitspaceActionType,
errChannel chan *types.GitspaceError,
) {
defer close(errChannel)
var orchestrateErr *types.GitspaceError
switch action {
case enum.GitspaceActionTypeStart:
orchestrateErr = c.orchestrator.TriggerStartGitspace(ctxWithTimedOut, config)
case enum.GitspaceActionTypeStop:
orchestrateErr = c.orchestrator.TriggerStopGitspace(ctxWithTimedOut, config)
case enum.GitspaceActionTypeReset:
orchestrateErr = c.orchestrator.TriggerDeleteGitspace(ctxWithTimedOut, config, false)
}
if orchestrateErr != nil {
orchestrateErr.Error =
fmt.Errorf("failed to start/stop/reset gitspace: %s %w", config.Identifier, orchestrateErr.Error)
errChannel <- orchestrateErr
}
}
func (c *Service) buildGitspaceInstance(config types.GitspaceConfig) (*types.GitspaceInstance, error) {
gitspaceMachineUser := defaultMachineUser
now := time.Now().UnixMilli()
suffixUID, err := gonanoid.Generate(AllowedUIDAlphabet, 6)
if err != nil {
return nil, fmt.Errorf("could not generate UID for gitspace config : %q %w", config.Identifier, err)
}
identifier := strings.ToLower(config.Identifier + "-" + suffixUID)
var gitspaceInstance = &types.GitspaceInstance{
GitSpaceConfigID: config.ID,
Identifier: identifier,
State: enum.GitspaceInstanceStateStarting,
UserID: config.GitspaceUser.Identifier,
SpaceID: config.SpaceID,
SpacePath: config.SpacePath,
Created: now,
Updated: now,
TotalTimeUsed: 0,
}
if config.IDE == enum.IDETypeVSCodeWeb || config.IDE == enum.IDETypeVSCode {
gitspaceInstance.MachineUser = &gitspaceMachineUser
}
gitspaceInstance.AccessType = enum.GitspaceAccessTypeSSHKey
gitspaceInstance.AccessKeyRef = &config.SSHTokenIdentifier
if len(config.SSHTokenIdentifier) == 0 {
ref := strings.Clone(defaultPasswordRef)
gitspaceInstance.AccessKeyRef = &ref
gitspaceInstance.AccessType = enum.GitspaceAccessTypeUserCredentials
}
return gitspaceInstance, nil
}
func (c *Service) EmitGitspaceConfigEvent(
ctx context.Context,
config types.GitspaceConfig,
eventType enum.GitspaceEventType,
) {
c.gitspaceEventReporter.EmitGitspaceEvent(ctx, events.GitspaceEvent, &events.GitspaceEventPayload{
QueryKey: config.Identifier,
EntityID: config.ID,
EntityType: enum.GitspaceEntityTypeGitspaceConfig,
EventType: eventType,
Timestamp: time.Now().UnixNano(),
})
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspace/wire.go | app/services/gitspace/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspace
import (
gitspaceevents "github.com/harness/gitness/app/events/gitspace"
gitspacedeleteevents "github.com/harness/gitness/app/events/gitspacedelete"
"github.com/harness/gitness/app/gitspace/orchestrator"
"github.com/harness/gitness/app/gitspace/orchestrator/ide"
"github.com/harness/gitness/app/gitspace/scm"
"github.com/harness/gitness/app/services/infraprovider"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/services/tokengenerator"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideGitspace,
)
func ProvideGitspace(
tx dbtx.Transactor,
gitspaceStore store.GitspaceConfigStore,
gitspaceInstanceStore store.GitspaceInstanceStore,
eventReporter *gitspaceevents.Reporter,
gitspaceEventStore store.GitspaceEventStore,
spaceFinder refcache.SpaceFinder,
infraProviderSvc *infraprovider.Service,
orchestrator orchestrator.Orchestrator,
scm *scm.SCM,
config *types.Config,
gitspaceDeleteEventReporter *gitspacedeleteevents.Reporter,
ideFactory ide.Factory,
spaceStore store.SpaceStore,
tokenGenerator tokengenerator.TokenGenerator,
) *Service {
return NewService(tx, gitspaceStore, gitspaceInstanceStore, eventReporter,
gitspaceEventStore, spaceFinder, infraProviderSvc, orchestrator, scm, config,
gitspaceDeleteEventReporter, ideFactory, spaceStore, tokenGenerator,
)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspace/update_instance.go | app/services/gitspace/update_instance.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspace
import (
"context"
"fmt"
"net/url"
"time"
"github.com/harness/gitness/types"
)
func (c *Service) UpdateInstance(
ctx context.Context,
gitspaceInstance *types.GitspaceInstance,
) error {
gitspaceInstance.Updated = time.Now().UnixMilli()
if gitspaceInstance.URL != nil {
formatURLStr, err := formatURL(*gitspaceInstance.URL)
if err != nil {
return fmt.Errorf("cannot parse ide url: %w", err)
}
gitspaceInstance.URL = &formatURLStr
}
err := c.gitspaceInstanceStore.Update(ctx, gitspaceInstance)
if err != nil {
return fmt.Errorf("failed to update gitspace instance: %w", err)
}
return nil
}
func formatURL(rawURL string) (string, error) {
parsedURL, err := url.Parse(rawURL)
if err != nil {
return "", err
}
// Parse query parameters
q := parsedURL.Query()
// Remove token parameter
q.Del("token")
// Set updated query back
parsedURL.RawQuery = q.Encode()
return parsedURL.String(), nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspace/action_cleanup.go | app/services/gitspace/action_cleanup.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspace
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
func (c *Service) CleanupGitspace(ctx context.Context, config types.GitspaceConfig) error {
if config.GitspaceInstance.State == enum.GitSpaceInstanceStateCleaning &&
time.Since(time.UnixMilli(config.GitspaceInstance.Updated)).Milliseconds() <=
(gitspaceInstanceCleaningTimedOutMins*60*1000) {
log.Ctx(ctx).Warn().Msgf("gitspace cleaning is already pending for : %q",
config.GitspaceInstance.Identifier)
return fmt.Errorf("gitspace is already pending for : %q", config.GitspaceInstance.Identifier)
}
config.GitspaceInstance.State = enum.GitSpaceInstanceStateCleaning
err := c.UpdateInstance(ctx, config.GitspaceInstance)
if err != nil {
log.Ctx(ctx).Err(err).Msgf("failed to update instance %s before triggering cleanup",
config.GitspaceInstance.Identifier)
return fmt.Errorf("failed to update instance %s before triggering cleanup: %w",
config.GitspaceInstance.Identifier,
err,
)
}
err = c.orchestrator.TriggerCleanupInstanceResources(ctx, config)
if err != nil {
log.Ctx(ctx).Err(err).Msgf("error during triggering cleanup for gitspace instance %s",
config.GitspaceInstance.Identifier)
config.GitspaceInstance.State = enum.GitspaceInstanceStateError
if updateErr := c.UpdateInstance(ctx, config.GitspaceInstance); updateErr != nil {
log.Ctx(ctx).Err(updateErr).Msgf("failed to update instance %s after error in triggering delete",
config.GitspaceInstance.Identifier)
}
return fmt.Errorf("failed to trigger cleanup for gitspace instance %s: %w",
config.GitspaceInstance.Identifier,
err,
)
}
log.Ctx(ctx).Debug().Msgf("successfully triggered cleanup for gitspace instance %s",
config.GitspaceInstance.Identifier)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspace/find.go | app/services/gitspace/find.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspace
import (
"context"
"errors"
"fmt"
"github.com/harness/gitness/app/paths"
"github.com/harness/gitness/store"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
func (c *Service) FindWithLatestInstanceWithSpacePath(
ctx context.Context,
spacePath string,
identifier string,
) (*types.GitspaceConfig, error) {
space, err := c.spaceFinder.FindByRef(ctx, spacePath)
if err != nil {
return nil, fmt.Errorf("failed to find space: %w", err)
}
return c.FindWithLatestInstance(ctx, space.ID, identifier)
}
func (c *Service) FindWithLatestInstance(
ctx context.Context,
spaceID int64,
identifier string,
) (*types.GitspaceConfig, error) {
var gitspaceConfigResult *types.GitspaceConfig
txErr := c.tx.WithTx(ctx, func(ctx context.Context) error {
gitspaceConfig, err := c.gitspaceConfigStore.FindByIdentifier(ctx, spaceID, identifier)
if err != nil {
return fmt.Errorf("failed to find gitspace config: %w", err)
}
latestInstance, err := c.findLatestInstance(ctx, gitspaceConfig)
if err != nil {
return err
}
configState, err := getGitspaceConfigState(latestInstance)
if err != nil {
return err
}
// update gitspace config parameters based on latest instance
gitspaceConfig.GitspaceInstance = latestInstance
gitspaceConfig.State = configState
// store result in return variable
gitspaceConfigResult = gitspaceConfig
return nil
}, dbtx.TxDefaultReadOnly)
if txErr != nil {
return nil, txErr
}
gitspaceConfigResult.BranchURL = c.GetBranchURL(ctx, gitspaceConfigResult)
return gitspaceConfigResult, nil
}
// findLatestInstance return latest gitspace instance for given gitspace config.
// If no instance is found, it returns nil.
func (c *Service) findLatestInstance(
ctx context.Context,
gitspaceConfig *types.GitspaceConfig,
) (*types.GitspaceInstance, error) {
instance, err := c.gitspaceInstanceStore.FindLatestByGitspaceConfigID(ctx, gitspaceConfig.ID)
if err != nil && !errors.Is(err, store.ErrResourceNotFound) {
return nil, err
}
if errors.Is(err, store.ErrResourceNotFound) {
// nolint:nilnil // return value is based on no resource
return nil, nil
}
// add or update various parameters of gitspace instance.
return c.addOrUpdateInstanceParameters(ctx, instance, gitspaceConfig)
}
func (c *Service) getToken(
ctx context.Context,
gitspaceConfig *types.GitspaceConfig,
) (string, error) {
if gitspaceConfig.IDE != enum.IDETypeVSCodeWeb {
return "", nil
}
resourceSpace, err := c.spaceStore.FindByRef(ctx, gitspaceConfig.InfraProviderResource.SpacePath)
if err != nil || resourceSpace == nil {
return "", fmt.Errorf("failed to find space ref: %w", err)
}
infraProviderConfigIdentifier := gitspaceConfig.InfraProviderResource.InfraProviderConfigIdentifier
infraProviderConfig, err := c.infraProviderSvc.Find(ctx, resourceSpace.Core(), infraProviderConfigIdentifier)
if err != nil {
log.Warn().Msgf(
"Cannot get infraProviderConfig for resource : %s/%s",
resourceSpace.Path, infraProviderConfigIdentifier)
return "", err
}
return c.tokenGenerator.GenerateToken(
ctx,
gitspaceConfig,
gitspaceConfig.GitspaceUser.Identifier,
enum.PrincipalTypeUser,
infraProviderConfig,
)
}
func getProjectName(spacePath string) string {
_, projectName, err := paths.DisectLeaf(spacePath)
if err != nil {
return ""
}
return projectName
}
func getGitspaceConfigState(instance *types.GitspaceInstance) (enum.GitspaceStateType, error) {
if instance == nil {
return enum.GitspaceStateUninitialized, nil
}
return instance.GetGitspaceState()
}
func (c *Service) addOrUpdateInstanceParameters(
ctx context.Context,
instance *types.GitspaceInstance,
gitspaceConfig *types.GitspaceConfig,
) (*types.GitspaceInstance, error) {
if instance == nil || gitspaceConfig == nil {
// nolint:nilnil // return value is based on nil pointers
return nil, nil
}
ideSvc, err := c.ideFactory.GetIDE(gitspaceConfig.IDE)
if err != nil {
return nil, err
}
projectName := getProjectName(gitspaceConfig.SpacePath)
pluginURL := ideSvc.GeneratePluginURL(projectName, instance.Identifier)
if pluginURL != "" {
instance.PluginURL = &pluginURL
}
if instance.URL != nil && gitspaceConfig.IDE == enum.IDETypeVSCodeWeb {
// token is jwt token issue by cde-manager which is validated in cde-gateway when accessing vscode web.
gitspaceConfig.GitspaceInstance = instance
token, err := c.getToken(ctx, gitspaceConfig)
if err != nil {
return nil, fmt.Errorf("unable to generate JWT token for vscode web: %w", err)
}
if token != "" {
urlWithToken := fmt.Sprintf("%s&token=%s", *instance.URL, token)
instance.URL = &urlWithToken
}
}
return instance, nil
}
func (c *Service) FindWithLatestInstanceByID(
ctx context.Context,
id int64,
includeDeleted bool,
) (*types.GitspaceConfig, error) {
var gitspaceConfigResult *types.GitspaceConfig
txErr := c.tx.WithTx(ctx, func(ctx context.Context) error {
gitspaceConfig, err := c.gitspaceConfigStore.Find(ctx, id, includeDeleted)
if err != nil {
return fmt.Errorf("failed to find gitspace config: %w", err)
}
latestInstance, err := c.findLatestInstance(ctx, gitspaceConfig)
if err != nil {
return err
}
configState, err := getGitspaceConfigState(latestInstance)
if err != nil {
return err
}
// update gitspace config parameters based on latest instance
gitspaceConfig.GitspaceInstance = latestInstance
gitspaceConfig.State = configState
// store result in return variable
gitspaceConfigResult = gitspaceConfig
return nil
}, dbtx.TxDefaultReadOnly)
if txErr != nil {
return nil, txErr
}
return gitspaceConfigResult, nil
}
func (c *Service) FindAll(
ctx context.Context,
ids []int64,
) ([]*types.GitspaceConfig, error) {
var gitspaceConfigResult []*types.GitspaceConfig
txErr := c.tx.WithTx(ctx, func(ctx context.Context) error {
gitspaceConfigs, err := c.gitspaceConfigStore.FindAll(ctx, ids)
if err != nil {
return fmt.Errorf("failed to find gitspace config: %w", err)
}
gitspaceConfigResult = append(gitspaceConfigResult, gitspaceConfigs...)
return nil
}, dbtx.TxDefaultReadOnly)
if txErr != nil {
return nil, txErr
}
return gitspaceConfigResult, nil
}
func (c *Service) FindAllByIdentifier(
ctx context.Context,
spaceID int64,
identifiers []string,
) ([]types.GitspaceConfig, error) {
var gitspaceConfigResult []types.GitspaceConfig
txErr := c.tx.WithTx(ctx, func(ctx context.Context) error {
gitspaceConfigs, err := c.gitspaceConfigStore.FindAllByIdentifier(ctx, spaceID, identifiers)
if err != nil {
return fmt.Errorf("failed to find gitspace config: %w", err)
}
gitspaceConfigResult = gitspaceConfigs
return nil
}, dbtx.TxDefaultReadOnly)
if txErr != nil {
return nil, txErr
}
return gitspaceConfigResult, nil
}
func (c *Service) FindInstanceByIdentifier(
ctx context.Context,
identifier string,
) (*types.GitspaceInstance, error) {
var gitspaceInstanceResult *types.GitspaceInstance
txErr := c.tx.WithTx(ctx, func(ctx context.Context) error {
gitspaceInstance, err := c.gitspaceInstanceStore.FindByIdentifier(ctx, identifier)
if err != nil {
return fmt.Errorf("failed to find gitspace instance: %w", err)
}
gitspaceInstanceResult = gitspaceInstance
return nil
}, dbtx.TxDefaultReadOnly)
if txErr != nil {
return nil, txErr
}
gitspaceConfig, err := c.gitspaceConfigStore.Find(ctx, gitspaceInstanceResult.GitSpaceConfigID, true)
if err != nil {
return nil, fmt.Errorf("could not find gitspace config: %w", err)
}
return c.addOrUpdateInstanceParameters(ctx, gitspaceInstanceResult, gitspaceConfig)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspace/action_reset.go | app/services/gitspace/action_reset.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspace
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
func (c *Service) ResetGitspaceAction(
ctx context.Context,
gitspaceConfig types.GitspaceConfig,
) error {
if gitspaceConfig.GitspaceInstance.State == enum.GitspaceInstanceStateRunning {
activeTimeEnded := time.Now().UnixMilli()
gitspaceConfig.GitspaceInstance.ActiveTimeEnded = &activeTimeEnded
gitspaceConfig.GitspaceInstance.TotalTimeUsed =
*(gitspaceConfig.GitspaceInstance.ActiveTimeEnded) - *(gitspaceConfig.GitspaceInstance.ActiveTimeStarted)
}
gitspaceConfig.IsMarkedForReset = true
if err := c.UpdateConfig(ctx, &gitspaceConfig); err != nil {
return fmt.Errorf("failed to update gitspace config for resetting: %w", err)
}
c.submitAsyncOps(ctx, gitspaceConfig, enum.GitspaceActionTypeReset)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspace/action_stop.go | app/services/gitspace/action_stop.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspace
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
func (c *Service) StopGitspaceAction(
ctx context.Context,
config types.GitspaceConfig,
now time.Time,
) error {
savedGitspaceInstance, err := c.gitspaceInstanceStore.FindLatestByGitspaceConfigID(ctx, config.ID)
if err != nil {
return fmt.Errorf("failed to find gitspace with config ID : %s %w", config.Identifier, err)
}
if savedGitspaceInstance.State.IsFinalStatus() {
return fmt.Errorf("gitspace instance cannot be stopped with ID %s", savedGitspaceInstance.Identifier)
}
config.GitspaceInstance = savedGitspaceInstance
err = c.gitspaceBusyOperation(ctx, config)
if err != nil {
return err
}
activeTimeEnded := now.UnixMilli()
config.GitspaceInstance.ActiveTimeEnded = &activeTimeEnded
config.GitspaceInstance.TotalTimeUsed =
*(config.GitspaceInstance.ActiveTimeEnded) - *(config.GitspaceInstance.ActiveTimeStarted)
config.GitspaceInstance.State = enum.GitspaceInstanceStateStopping
if err = c.UpdateInstance(ctx, config.GitspaceInstance); err != nil {
return fmt.Errorf("failed to update gitspace config for stopping %s %w", config.Identifier, err)
}
c.submitAsyncOps(ctx, config, enum.GitspaceActionTypeStop)
return nil
}
func (c *Service) GitspaceAutostopAction(
ctx context.Context,
config types.GitspaceConfig,
now time.Time,
) error {
c.EmitGitspaceConfigEvent(ctx, config, enum.GitspaceEventTypeGitspaceAutoStop)
if err := c.StopGitspaceAction(ctx, config, now); err != nil {
return err
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspace/update_config.go | app/services/gitspace/update_config.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspace
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/types"
)
func (c *Service) UpdateConfig(
ctx context.Context,
gitspaceConfig *types.GitspaceConfig,
) error {
gitspaceConfig.Updated = time.Now().UnixMilli()
err := c.gitspaceConfigStore.Update(ctx, gitspaceConfig)
if err != nil {
return fmt.Errorf("failed to update gitspace config: %w", err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspace/action_delete.go | app/services/gitspace/action_delete.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspace
import (
"context"
"fmt"
"time"
events "github.com/harness/gitness/app/events/gitspacedelete"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
func (c *Service) DeleteGitspaceByIdentifier(ctx context.Context, spaceRef string, identifier string) error {
gitspaceConfig, err := c.FindWithLatestInstanceWithSpacePath(ctx, spaceRef, identifier)
if err != nil {
log.Err(err).Msgf("Failed to find latest gitspace config : %s", identifier)
return err
}
return c.deleteGitspace(ctx, gitspaceConfig)
}
func (c *Service) deleteGitspace(ctx context.Context, gitspaceConfig *types.GitspaceConfig) error {
if gitspaceConfig.GitspaceInstance == nil ||
gitspaceConfig.GitspaceInstance.State == enum.GitspaceInstanceStateUninitialized {
gitspaceConfig.IsMarkedForDeletion = true
gitspaceConfig.IsDeleted = true
if err := c.UpdateConfig(ctx, gitspaceConfig); err != nil {
return fmt.Errorf("failed to mark gitspace config as deleted: %w", err)
}
return nil
}
// mark can_delete for gitconfig as true so that if delete operation fails, cron job can clean up resources.
gitspaceConfig.IsMarkedForDeletion = true
if err := c.UpdateConfig(ctx, gitspaceConfig); err != nil {
return fmt.Errorf("failed to mark gitspace config is_marked_for_deletion column: %w", err)
}
c.gitspaceDeleteEventReporter.EmitGitspaceDeleteEvent(ctx, events.GitspaceDeleteEvent,
&events.GitspaceDeleteEventPayload{GitspaceConfigIdentifier: gitspaceConfig.Identifier,
SpaceID: gitspaceConfig.SpaceID})
return nil
}
func (c *Service) RemoveGitspace(ctx context.Context, config types.GitspaceConfig, canDeleteUserData bool) error {
if config.GitspaceInstance.State == enum.GitSpaceInstanceStateCleaning &&
time.Since(time.UnixMilli(config.GitspaceInstance.Updated)).Milliseconds() <=
(gitspaceInstanceCleaningTimedOutMins*60*1000) {
log.Ctx(ctx).Warn().Msgf("gitspace cleaning is already pending for : %q",
config.GitspaceInstance.Identifier)
return fmt.Errorf("gitspace is already pending for : %q", config.GitspaceInstance.Identifier)
}
if config.GitspaceInstance.State == enum.GitspaceInstanceStateRunning {
activeTimeEnded := time.Now().UnixMilli()
config.GitspaceInstance.ActiveTimeEnded = &activeTimeEnded
config.GitspaceInstance.TotalTimeUsed =
*(config.GitspaceInstance.ActiveTimeEnded) - *(config.GitspaceInstance.ActiveTimeStarted)
config.GitspaceInstance.State = enum.GitspaceInstanceStateStopping
} else {
config.GitspaceInstance.State = enum.GitSpaceInstanceStateCleaning
}
err := c.UpdateInstance(ctx, config.GitspaceInstance)
if err != nil {
log.Ctx(ctx).Err(err).Msgf("failed to update instance %s before triggering delete",
config.GitspaceInstance.Identifier)
return fmt.Errorf("failed to update instance %s before triggering delete: %w",
config.GitspaceInstance.Identifier,
err,
)
}
if err := c.orchestrator.TriggerDeleteGitspace(ctx, config, canDeleteUserData); err != nil {
log.Ctx(ctx).Err(err.Error).Msgf("error during triggering delete for gitspace instance %s",
config.GitspaceInstance.Identifier)
config.GitspaceInstance.State = enum.GitspaceInstanceStateError
if updateErr := c.UpdateInstance(ctx, config.GitspaceInstance); updateErr != nil {
log.Ctx(ctx).Err(updateErr).Msgf("failed to update instance %s after error in triggering delete",
config.GitspaceInstance.Identifier)
}
return fmt.Errorf("failed to trigger delete for gitspace instance %s: %w",
config.GitspaceInstance.Identifier,
err.Error,
)
}
log.Ctx(ctx).Debug().Msgf("successfully triggered delete for gitspace instance %s",
config.GitspaceInstance.Identifier)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspace/gitspace.go | app/services/gitspace/gitspace.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspace
import (
"context"
"fmt"
gitspaceevents "github.com/harness/gitness/app/events/gitspace"
gitspacedeleteevents "github.com/harness/gitness/app/events/gitspacedelete"
"github.com/harness/gitness/app/gitspace/orchestrator"
"github.com/harness/gitness/app/gitspace/orchestrator/ide"
"github.com/harness/gitness/app/gitspace/scm"
"github.com/harness/gitness/app/services/infraprovider"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/services/tokengenerator"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/rs/zerolog/log"
)
func NewService(
tx dbtx.Transactor,
gitspaceStore store.GitspaceConfigStore,
gitspaceInstanceStore store.GitspaceInstanceStore,
eventReporter *gitspaceevents.Reporter,
gitspaceEventStore store.GitspaceEventStore,
spaceFinder refcache.SpaceFinder,
infraProviderSvc *infraprovider.Service,
orchestrator orchestrator.Orchestrator,
scm *scm.SCM,
config *types.Config,
gitspaceDeleteEventReporter *gitspacedeleteevents.Reporter,
ideFactory ide.Factory,
spaceStore store.SpaceStore,
tokenGenerator tokengenerator.TokenGenerator,
) *Service {
return &Service{
tx: tx,
gitspaceConfigStore: gitspaceStore,
gitspaceInstanceStore: gitspaceInstanceStore,
gitspaceEventReporter: eventReporter,
gitspaceEventStore: gitspaceEventStore,
spaceFinder: spaceFinder,
infraProviderSvc: infraProviderSvc,
orchestrator: orchestrator,
scm: scm,
config: config,
gitspaceDeleteEventReporter: gitspaceDeleteEventReporter,
ideFactory: ideFactory,
spaceStore: spaceStore,
tokenGenerator: tokenGenerator,
}
}
type Service struct {
gitspaceConfigStore store.GitspaceConfigStore
gitspaceInstanceStore store.GitspaceInstanceStore
gitspaceEventReporter *gitspaceevents.Reporter
gitspaceDeleteEventReporter *gitspacedeleteevents.Reporter
gitspaceEventStore store.GitspaceEventStore
spaceFinder refcache.SpaceFinder
tx dbtx.Transactor
infraProviderSvc *infraprovider.Service
orchestrator orchestrator.Orchestrator
scm *scm.SCM
config *types.Config
ideFactory ide.Factory
spaceStore store.SpaceStore
tokenGenerator tokengenerator.TokenGenerator
}
func (c *Service) ListGitspacesWithInstance(
ctx context.Context,
filter types.GitspaceFilter,
useTransaction bool,
) ([]*types.GitspaceConfig, int64, int64, error) {
var gitspaceConfigs []*types.GitspaceConfig
var filterCount, allGitspacesCount int64
var err error
findFunc := func(ctx context.Context) (err error) {
gitspaceConfigs, err = c.gitspaceConfigStore.ListWithLatestInstance(ctx, &filter)
if err != nil {
return fmt.Errorf("failed to list gitspace configs: %w", err)
}
filterCount, err = c.gitspaceConfigStore.Count(ctx, &filter)
if err != nil {
return fmt.Errorf("failed to filterCount gitspaces in space: %w", err)
}
// Only filter from RBAC and Space is applied for this count, the user filter will be empty for admin users.
instanceFilter := types.GitspaceInstanceFilter{
UserIdentifier: filter.UserIdentifier,
SpaceIDs: filter.SpaceIDs,
}
allGitspacesCount, err = c.gitspaceConfigStore.Count(ctx, &types.GitspaceFilter{
Deleted: filter.Deleted,
MarkedForDeletion: filter.MarkedForDeletion,
GitspaceInstanceFilter: instanceFilter,
})
if err != nil {
return fmt.Errorf("failed to count all gitspace configs in space: %w", err)
}
return nil
}
if useTransaction {
err = c.tx.WithTx(ctx, findFunc, dbtx.TxDefaultReadOnly)
} else {
err = findFunc(ctx)
}
if err != nil {
return nil, 0, 0, err
}
for _, gitspaceConfig := range gitspaceConfigs {
updatedInstance, err := c.addOrUpdateInstanceParameters(ctx, gitspaceConfig.GitspaceInstance, gitspaceConfig)
if err != nil {
return nil, 0, 0, err
}
gitspaceConfig.GitspaceInstance = updatedInstance
gitspaceConfig.BranchURL = c.GetBranchURL(ctx, gitspaceConfig)
}
return gitspaceConfigs, filterCount, allGitspacesCount, nil
}
func (c *Service) GetBranchURL(ctx context.Context, config *types.GitspaceConfig) string {
branchURL, err := c.scm.GetBranchURL(config.SpacePath, config.CodeRepo.Type, config.CodeRepo.URL,
config.CodeRepo.Branch)
if err != nil {
log.Warn().Ctx(ctx).Err(err).Msgf("failed to get branch URL for gitspace config %s, returning repo url",
config.Identifier)
branchURL = config.CodeRepo.URL
}
return branchURL
}
func (c *Service) Create(ctx context.Context, config *types.GitspaceConfig) error {
return c.gitspaceConfigStore.Create(ctx, config)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspace/action_start.go | app/services/gitspace/action_start.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspace
import (
"context"
"errors"
"fmt"
"github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
func (c *Service) StartGitspaceAction(
ctx context.Context,
config types.GitspaceConfig,
) error {
savedGitspaceInstance, err := c.gitspaceInstanceStore.FindLatestByGitspaceConfigID(ctx, config.ID)
if err != nil && !errors.Is(err, store.ErrResourceNotFound) {
return err
}
if config.IsMarkedForInfraReset && savedGitspaceInstance != nil && !savedGitspaceInstance.State.IsFinalStatus() {
savedGitspaceInstance.State = enum.GitspaceInstanceStatePendingCleanup
err = c.UpdateInstance(ctx, savedGitspaceInstance)
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msgf(
"failed to mark old gitspace instance as pending cleanup for config ID: %s",
config.Identifier,
)
}
// Don't return here - continue to create a new instance
// The old instance will be cleaned up by the background job
savedGitspaceInstance = nil // Treat as if no instance exists so a new one is created
}
config.GitspaceInstance = savedGitspaceInstance
err = c.gitspaceBusyOperation(ctx, config)
if err != nil {
return err
}
if savedGitspaceInstance == nil || savedGitspaceInstance.State.IsFinalStatus() {
gitspaceInstance, err := c.buildGitspaceInstance(config)
if err != nil {
return err
}
if savedGitspaceInstance != nil {
gitspaceInstance.HasGitChanges = savedGitspaceInstance.HasGitChanges
}
if err = c.gitspaceInstanceStore.Create(ctx, gitspaceInstance); err != nil {
return fmt.Errorf("failed to create gitspace instance for %s %w", config.Identifier, err)
}
}
newGitspaceInstance, err := c.gitspaceInstanceStore.FindLatestByGitspaceConfigID(ctx, config.ID)
if err != nil {
return fmt.Errorf("failed to find gitspace with config ID : %s %w", config.Identifier, err)
}
config.GitspaceInstance = newGitspaceInstance
c.submitAsyncOps(ctx, config, enum.GitspaceActionTypeStart)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspace/delete_all_for_spaces.go | app/services/gitspace/delete_all_for_spaces.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspace
import (
"context"
"fmt"
"github.com/harness/gitness/types"
"github.com/gotidy/ptr"
"github.com/rs/zerolog/log"
)
func (c *Service) DeleteAllForSpaces(ctx context.Context, spaces []*types.Space) error {
spaceIDs := make([]int64, 0, len(spaces))
for _, space := range spaces {
spaceIDs = append(spaceIDs, space.ID)
}
log.Debug().Msgf("Deleting all gitspaces for spaces %+v", spaceIDs)
var gitspaceFilter = types.GitspaceFilter{}
gitspaceFilter.SpaceIDs = spaceIDs
gitspaceFilter.Deleted = ptr.Bool(false)
gitspaceFilter.MarkedForDeletion = ptr.Bool(false)
gitspaces, _, _, err := c.ListGitspacesWithInstance(ctx, gitspaceFilter, false)
if err != nil {
return fmt.Errorf("error while listing gitspaces with instance before deleting all for spaces: %w", err)
}
for _, gitspace := range gitspaces {
log.Debug().Msgf("Deleting gitspace %s for space %d", gitspace.Identifier, gitspace.SpaceID)
err = c.deleteGitspace(ctx, gitspace)
if err != nil {
return fmt.Errorf("error while deleting gitspace %s while deleting all for spaces: %w",
gitspace.Identifier, err)
}
log.Debug().Msgf("Deleted gitspace %s for space %d", gitspace.Identifier, gitspace.SpaceID)
}
log.Debug().Msgf("Deleted all gitspaces for spaces %+v", spaceIDs)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/keyfetcher/wire.go | app/services/keyfetcher/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keyfetcher
import "github.com/harness/gitness/app/store"
func ProvideService(
publicKeyStore store.PublicKeyStore,
) Service {
return NewService(publicKeyStore)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/keyfetcher/service_fetcher.go | app/services/keyfetcher/service_fetcher.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keyfetcher
import (
"context"
"fmt"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
type Service interface {
FetchByFingerprint(
ctx context.Context,
keyFingerprint string,
principalID int64,
usages []enum.PublicKeyUsage,
schemes []enum.PublicKeyScheme,
) ([]types.PublicKey, error)
FetchBySubKeyID(
ctx context.Context,
subKeyID string,
principalID int64,
usages []enum.PublicKeyUsage,
schemes []enum.PublicKeyScheme,
) ([]types.PublicKey, error)
}
func NewService(
publicKeyStore store.PublicKeyStore,
) Service {
return service{
publicKeyStore: publicKeyStore,
}
}
type service struct {
publicKeyStore store.PublicKeyStore
}
func (s service) FetchByFingerprint(
ctx context.Context,
keyFingerprint string,
principalID int64,
usages []enum.PublicKeyUsage,
schemes []enum.PublicKeyScheme,
) ([]types.PublicKey, error) {
keys, err := s.publicKeyStore.ListByFingerprint(ctx, keyFingerprint, &principalID, usages, schemes)
if err != nil {
return nil, fmt.Errorf("failed to list public keys by fingerprint: %w", err)
}
return keys, nil
}
func (s service) FetchBySubKeyID(
ctx context.Context,
subKeyID string,
principalID int64,
usages []enum.PublicKeyUsage,
schemes []enum.PublicKeyScheme,
) ([]types.PublicKey, error) {
keys, err := s.publicKeyStore.ListBySubKeyID(ctx, subKeyID, &principalID, usages, schemes)
if err != nil {
return nil, fmt.Errorf("failed to list public keys by subkey ID: %w", err)
}
return keys, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/pullreq/wire.go | app/services/pullreq/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pullreq
import (
"context"
"github.com/harness/gitness/app/auth/authz"
gitevents "github.com/harness/gitness/app/events/git"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/app/services/codecomments"
"github.com/harness/gitness/app/services/label"
"github.com/harness/gitness/app/services/protection"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/sse"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/events"
"github.com/harness/gitness/git"
"github.com/harness/gitness/pubsub"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideService,
ProvideListService,
)
func ProvideService(ctx context.Context,
config *types.Config,
gitReaderFactory *events.ReaderFactory[*gitevents.Reader],
pullReqEvFactory *events.ReaderFactory[*pullreqevents.Reader],
pullReqEvReporter *pullreqevents.Reporter,
git git.Interface,
repoFinder refcache.RepoFinder,
repoStore store.RepoStore,
pullreqStore store.PullReqStore,
activityStore store.PullReqActivityStore,
principalInfoCache store.PrincipalInfoCache,
codeCommentView store.CodeCommentView,
codeCommentMigrator *codecomments.Migrator,
fileViewStore store.PullReqFileViewStore,
pubsub pubsub.PubSub,
urlProvider url.Provider,
sseStreamer sse.Streamer,
) (*Service, error) {
return New(ctx,
config,
gitReaderFactory,
pullReqEvFactory,
pullReqEvReporter,
git,
repoFinder,
repoStore,
pullreqStore,
activityStore,
codeCommentView,
codeCommentMigrator,
fileViewStore,
principalInfoCache,
pubsub,
urlProvider,
sseStreamer,
)
}
func ProvideListService(
tx dbtx.Transactor,
git git.Interface,
authorizer authz.Authorizer,
spaceStore store.SpaceStore,
pullreqStore store.PullReqStore,
checkStore store.CheckStore,
repoFinder refcache.RepoFinder,
labelSvc *label.Service,
protectionManager *protection.Manager,
) *ListService {
return NewListService(
tx,
git,
authorizer,
spaceStore,
pullreqStore,
checkStore,
repoFinder,
labelSvc,
protectionManager,
)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/pullreq/service.go | app/services/pullreq/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pullreq
import (
"context"
"fmt"
"sync"
"time"
"github.com/harness/gitness/app/bootstrap"
gitevents "github.com/harness/gitness/app/events/git"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/app/githook"
"github.com/harness/gitness/app/services/codecomments"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/sse"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/events"
"github.com/harness/gitness/git"
"github.com/harness/gitness/pubsub"
"github.com/harness/gitness/stream"
"github.com/harness/gitness/types"
)
type Service struct {
pullreqEvReporter *pullreqevents.Reporter
git git.Interface
repoFinder refcache.RepoFinder
repoStore store.RepoStore
pullreqStore store.PullReqStore
activityStore store.PullReqActivityStore
codeCommentView store.CodeCommentView
principalInfoCache store.PrincipalInfoCache
codeCommentMigrator *codecomments.Migrator
fileViewStore store.PullReqFileViewStore
sseStreamer sse.Streamer
urlProvider url.Provider
cancelMutex sync.Mutex
cancelMergeability map[string]context.CancelFunc
pubsub pubsub.PubSub
}
//nolint:funlen // needs refactoring
func New(ctx context.Context,
config *types.Config,
gitReaderFactory *events.ReaderFactory[*gitevents.Reader],
pullreqEvReaderFactory *events.ReaderFactory[*pullreqevents.Reader],
pullreqEvReporter *pullreqevents.Reporter,
git git.Interface,
repoFinder refcache.RepoFinder,
repoStore store.RepoStore,
pullreqStore store.PullReqStore,
activityStore store.PullReqActivityStore,
codeCommentView store.CodeCommentView,
codeCommentMigrator *codecomments.Migrator,
fileViewStore store.PullReqFileViewStore,
principalInfoCache store.PrincipalInfoCache,
bus pubsub.PubSub,
urlProvider url.Provider,
sseStreamer sse.Streamer,
) (*Service, error) {
service := &Service{
pullreqEvReporter: pullreqEvReporter,
git: git,
repoFinder: repoFinder,
repoStore: repoStore,
pullreqStore: pullreqStore,
activityStore: activityStore,
principalInfoCache: principalInfoCache,
codeCommentView: codeCommentView,
urlProvider: urlProvider,
codeCommentMigrator: codeCommentMigrator,
fileViewStore: fileViewStore,
cancelMergeability: make(map[string]context.CancelFunc),
pubsub: bus,
sseStreamer: sseStreamer,
}
var err error
// handle git branch events to trigger specific pull request events
const groupGit = "gitness:pullreq:git"
_, err = gitReaderFactory.Launch(ctx, groupGit, config.InstanceID,
func(r *gitevents.Reader) error {
const idleTimeout = 15 * time.Second
r.Configure(
stream.WithConcurrency(1),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(3),
))
_ = r.RegisterBranchUpdated(service.updatePullReqOnBranchUpdate)
_ = r.RegisterBranchDeleted(service.closePullReqOnBranchDelete)
return nil
})
if err != nil {
return nil, err
}
// pull request file viewed maintenance
const groupPullReqFileViewed = "gitness:pullreq:fileviewed"
_, err = pullreqEvReaderFactory.Launch(ctx, groupPullReqFileViewed, config.InstanceID,
func(r *pullreqevents.Reader) error {
const idleTimeout = 30 * time.Second
r.Configure(
stream.WithConcurrency(3),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(1),
))
_ = r.RegisterBranchUpdated(service.handleFileViewedOnBranchUpdate)
return nil
})
if err != nil {
return nil, err
}
const groupPullReqCounters = "gitness:pullreq:counters"
_, err = pullreqEvReaderFactory.Launch(ctx, groupPullReqCounters, config.InstanceID,
func(r *pullreqevents.Reader) error {
const idleTimeout = 10 * time.Second
r.Configure(
stream.WithConcurrency(1),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(2),
))
_ = r.RegisterCreated(service.updatePRCountersOnCreated)
_ = r.RegisterReopened(service.updatePRCountersOnReopened)
_ = r.RegisterClosed(service.updatePRCountersOnClosed)
_ = r.RegisterMerged(service.updatePRCountersOnMerged)
return nil
})
if err != nil {
return nil, err
}
// mergeability check
const groupPullReqMergeable = "gitness:pullreq:mergeable"
_, err = pullreqEvReaderFactory.Launch(ctx, groupPullReqMergeable, config.InstanceID,
func(r *pullreqevents.Reader) error {
const idleTimeout = 30 * time.Second
r.Configure(
stream.WithConcurrency(3),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(2),
))
_ = r.RegisterCreated(service.mergeCheckOnCreated)
_ = r.RegisterBranchUpdated(service.mergeCheckOnBranchUpdate)
_ = r.RegisterReopened(service.mergeCheckOnReopen)
_ = r.RegisterTargetBranchChanged(service.mergeCheckOnTargetBranchChange)
return nil
})
if err != nil {
return nil, err
}
// cancel any previous pr mergeability check
// payload is oldsha.
_ = bus.Subscribe(ctx, cancelMergeCheckKey, func(payload []byte) error {
oldSHA := string(payload)
if oldSHA == "" {
return nil
}
service.cancelMutex.Lock()
defer service.cancelMutex.Unlock()
cancel := service.cancelMergeability[oldSHA]
if cancel != nil {
cancel()
}
delete(service.cancelMergeability, oldSHA)
return nil
}, pubsub.WithChannelNamespace("pullreq"))
// mergeability check
const groupPullReqCodeComments = "gitness:pullreq:codecomments"
_, err = pullreqEvReaderFactory.Launch(ctx, groupPullReqCodeComments, config.InstanceID,
func(r *pullreqevents.Reader) error {
const idleTimeout = 10 * time.Second
r.Configure(
stream.WithConcurrency(3),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(2),
))
_ = r.RegisterBranchUpdated(service.updateCodeCommentsOnBranchUpdate)
_ = r.RegisterReopened(service.updateCodeCommentsOnReopen)
return nil
})
if err != nil {
return nil, err
}
return service, nil
}
// createRPCSystemReferencesWriteParams creates base write parameters for write operations.
func createRPCSystemReferencesWriteParams(
ctx context.Context,
urlProvider url.Provider,
repoID int64,
repoGITUID string,
) (git.WriteParams, error) {
principal := bootstrap.NewSystemServiceSession().Principal
// generate envars - skip githook execution since it's system references only
envVars, err := githook.GenerateEnvironmentVariables(
ctx,
urlProvider.GetInternalAPIURL(ctx),
repoID,
principal.ID,
true,
true,
)
if err != nil {
return git.WriteParams{}, fmt.Errorf("failed to generate git hook environment variables: %w", err)
}
return git.WriteParams{
Actor: git.Identity{
Name: principal.DisplayName,
Email: principal.Email,
},
RepoUID: repoGITUID,
EnvVars: envVars,
}, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/pullreq/handlers_mergeable.go | app/services/pullreq/handlers_mergeable.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pullreq
import (
"context"
"fmt"
"strconv"
"time"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/events"
"github.com/harness/gitness/git"
gitenum "github.com/harness/gitness/git/enum"
"github.com/harness/gitness/git/sha"
"github.com/harness/gitness/pubsub"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/gotidy/ptr"
)
const (
cancelMergeCheckKey = "cancel_merge_check_for_sha"
)
// mergeCheckOnCreated handles pull request Created events.
// It creates the PR head git ref.
func (s *Service) mergeCheckOnCreated(ctx context.Context,
event *events.Event[*pullreqevents.CreatedPayload],
) error {
return s.updateMergeData(
ctx,
event.Payload.TargetRepoID,
event.Payload.Number,
sha.Nil.String(),
event.Payload.SourceSHA,
)
}
// mergeCheckOnBranchUpdate handles pull request Branch Updated events.
// It updates the PR head git ref to point to the latest commit.
func (s *Service) mergeCheckOnBranchUpdate(ctx context.Context,
event *events.Event[*pullreqevents.BranchUpdatedPayload],
) error {
return s.updateMergeData(
ctx,
event.Payload.TargetRepoID,
event.Payload.Number,
event.Payload.OldSHA,
event.Payload.NewSHA,
)
}
// mergeCheckOnTargetBranchChange handles pull request target branch changed events.
func (s *Service) mergeCheckOnTargetBranchChange(
ctx context.Context,
event *events.Event[*pullreqevents.TargetBranchChangedPayload],
) error {
return s.updateMergeData(
ctx,
event.Payload.TargetRepoID,
event.Payload.Number,
sha.None.String(),
event.Payload.SourceSHA,
)
}
// mergeCheckOnReopen handles pull request StateChanged events.
// It updates the PR head git ref to point to the source branch commit SHA.
func (s *Service) mergeCheckOnReopen(ctx context.Context,
event *events.Event[*pullreqevents.ReopenedPayload],
) error {
return s.updateMergeData(
ctx,
event.Payload.TargetRepoID,
event.Payload.Number,
sha.None.String(),
event.Payload.SourceSHA,
)
}
//nolint:funlen // refactor if required.
func (s *Service) updateMergeData(
ctx context.Context,
repoID int64,
prNum int64,
oldSHA string,
newSHA string,
) error {
pr, err := s.pullreqStore.FindByNumber(ctx, repoID, prNum)
if err != nil {
return fmt.Errorf("failed to get pull request number %d: %w", prNum, err)
}
// TODO: Merge check should not update the merge base.
// TODO: Instead it should accept it as an argument and fail if it doesn't match.
// Then is would not longer be necessary to cancel already active mergeability checks.
if pr.State != enum.PullReqStateOpen {
return fmt.Errorf("cannot do mergability check on closed PR %d", pr.Number)
}
// cancel all previous mergability work for this PR based on oldSHA
if err := s.pubsub.Publish(ctx, cancelMergeCheckKey, []byte(oldSHA),
pubsub.WithPublishNamespace("pullreq")); err != nil {
return err
}
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
s.cancelMutex.Lock()
// NOTE: Temporary workaround to avoid overwriting existing cancel method on same machine.
// This doesn't avoid same SHA running on multiple machines
if _, ok := s.cancelMergeability[newSHA]; ok {
s.cancelMutex.Unlock()
cancel()
return nil
}
s.cancelMergeability[newSHA] = cancel
s.cancelMutex.Unlock()
defer func() {
cancel()
s.cancelMutex.Lock()
delete(s.cancelMergeability, newSHA)
s.cancelMutex.Unlock()
}()
// load repository objects
targetRepo, err := s.repoFinder.FindByID(ctx, pr.TargetRepoID)
if err != nil {
return err
}
writeParams, err := createRPCSystemReferencesWriteParams(ctx, s.urlProvider, targetRepo.ID, targetRepo.GitUID)
if err != nil {
return fmt.Errorf("failed to generate rpc write params: %w", err)
}
refName, err := git.GetRefPath(strconv.Itoa(int(pr.Number)), gitenum.RefTypePullReqMerge)
if err != nil {
return fmt.Errorf("failed to generate pull request merge ref name: %w", err)
}
refs := []git.RefUpdate{
{
Name: refName,
Old: sha.SHA{}, // no matter what the value of the reference is
New: sha.SHA{}, // update it to point to result of the merge
},
}
// call merge and store output in pr merge reference.
now := time.Now()
mergeOutput, err := s.git.Merge(ctx, &git.MergeParams{
WriteParams: writeParams,
BaseBranch: pr.TargetBranch,
HeadSHA: sha.Must(newSHA),
Refs: refs,
Force: true,
// set committer date to ensure repeatability of merge commit across replicas
CommitterDate: &now,
})
if errors.AsStatus(err) == errors.StatusPreconditionFailed {
return events.NewDiscardEventErrorf("Source branch %q is not on SHA %q anymore.",
pr.SourceBranch, newSHA)
}
if err != nil {
return fmt.Errorf("failed to run git merge with base %q and head %q: %w", pr.TargetBranch, pr.SourceBranch, err)
}
// Update DB in both cases (failure or success)
_, err = s.pullreqStore.UpdateMergeCheckMetadataOptLock(ctx, pr, func(pr *types.PullReq) error {
// to avoid racing conditions with merge
if pr.State != enum.PullReqStateOpen {
return errPRNotOpen
}
if pr.SourceSHA != newSHA {
return events.NewDiscardEventErrorf("PR SHA %s is newer than %s", pr.SourceSHA, newSHA)
}
pr.MergeBaseSHA = mergeOutput.MergeBaseSHA.String()
pr.MergeTargetSHA = ptr.String(mergeOutput.BaseSHA.String())
if mergeOutput.MergeSHA.IsEmpty() {
pr.MergeSHA = nil
} else {
pr.MergeSHA = ptr.String(mergeOutput.MergeSHA.String())
}
pr.UpdateMergeOutcome(enum.MergeMethodMerge, mergeOutput.ConflictFiles)
pr.Stats.DiffStats = types.NewDiffStats(
mergeOutput.CommitCount,
mergeOutput.ChangedFileCount,
mergeOutput.Additions,
mergeOutput.Deletions,
)
return nil
})
if err != nil {
return fmt.Errorf("failed to update PR merge ref in db with error: %w", err)
}
s.sseStreamer.Publish(ctx, targetRepo.ParentID, enum.SSETypePullReqUpdated, pr)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/pullreq/handlers_file_viewed.go | app/services/pullreq/handlers_file_viewed.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pullreq
import (
"context"
"errors"
"fmt"
"io"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/events"
"github.com/harness/gitness/git"
"github.com/harness/gitness/git/enum"
)
// handleFileViewedOnBranchUpdate handles pull request Branch Updated events.
// It marks existing file reviews as obsolete for the PR depending on the change to the file.
//
// The major reason of this handler is to allow detect changes that occurred to a file since last reviewed,
// even if the file content is the same - e.g. file got deleted and readded with the same content.
func (s *Service) handleFileViewedOnBranchUpdate(ctx context.Context,
event *events.Event[*pullreqevents.BranchUpdatedPayload],
) error {
repoGit, err := s.repoFinder.FindByID(ctx, event.Payload.TargetRepoID)
if err != nil {
return fmt.Errorf("failed to get repo git info: %w", err)
}
reader := git.NewStreamReader(s.git.Diff(ctx, &git.DiffParams{
ReadParams: git.ReadParams{
RepoUID: repoGit.GitUID,
},
BaseRef: event.Payload.OldSHA,
HeadRef: event.Payload.NewSHA,
MergeBase: false, // we want the direct changes
IncludePatch: false, // we don't care about the actual file changes
}))
obsoletePaths := []string{}
for {
fileDiff, err := reader.Next()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return fmt.Errorf("failed to read next file diff: %w", err)
}
// DELETED: mark as obsolete - handles open pr file deletions
// CREATED: mark as obsolete - handles cases in which file deleted while PR was closed
// RENAMED: mark old + new path as obsolete - similar to deleting old file and creating new one
// UPDATED: mark as obsolete - in case pr is closed file SHA is handling it
// This strategy leads to a behavior very similar to what github is doing
switch fileDiff.Status {
case enum.FileDiffStatusAdded:
obsoletePaths = append(obsoletePaths, fileDiff.Path)
case enum.FileDiffStatusDeleted:
obsoletePaths = append(obsoletePaths, fileDiff.OldPath)
case enum.FileDiffStatusRenamed:
obsoletePaths = append(obsoletePaths, fileDiff.OldPath, fileDiff.Path)
case enum.FileDiffStatusModified:
obsoletePaths = append(obsoletePaths, fileDiff.Path)
case enum.FileDiffStatusCopied:
case enum.FileDiffStatusUndefined:
// other cases we don't care
}
}
if len(obsoletePaths) == 0 {
return nil
}
err = s.fileViewStore.MarkObsolete(
ctx,
event.Payload.PullReqID,
obsoletePaths)
if err != nil {
return fmt.Errorf(
"failed to mark files obsolete for repo %d and pr %d: %w",
repoGit.ID,
event.Payload.PullReqID,
err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/pullreq/service_list.go | app/services/pullreq/service_list.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pullreq
import (
"context"
"fmt"
apiauth "github.com/harness/gitness/app/api/auth"
"github.com/harness/gitness/app/auth"
"github.com/harness/gitness/app/auth/authz"
"github.com/harness/gitness/app/services/label"
"github.com/harness/gitness/app/services/protection"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/git"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/gotidy/ptr"
"github.com/rs/zerolog/log"
)
type ListService struct {
tx dbtx.Transactor
git git.Interface
authorizer authz.Authorizer
spaceStore store.SpaceStore
pullreqStore store.PullReqStore
checkStore store.CheckStore
repoFinder refcache.RepoFinder
labelSvc *label.Service
protectionManager *protection.Manager
}
func NewListService(
tx dbtx.Transactor,
git git.Interface,
authorizer authz.Authorizer,
spaceStore store.SpaceStore,
pullreqStore store.PullReqStore,
checkStore store.CheckStore,
repoFinder refcache.RepoFinder,
labelSvc *label.Service,
protectionManager *protection.Manager,
) *ListService {
return &ListService{
tx: tx,
git: git,
authorizer: authorizer,
spaceStore: spaceStore,
pullreqStore: pullreqStore,
checkStore: checkStore,
repoFinder: repoFinder,
labelSvc: labelSvc,
protectionManager: protectionManager,
}
}
// CountForSpace returns number of pull requests in a specific space (and optionally in subspaces).
// The API doesn't do a permission check for repositories.
func (c *ListService) CountForSpace(
ctx context.Context,
space *types.SpaceCore,
includeSubspaces bool,
filter *types.PullReqFilter,
) (int64, error) {
if includeSubspaces {
subspaces, err := c.spaceStore.GetDescendantsData(ctx, space.ID)
if err != nil {
return 0, fmt.Errorf("failed to get space descendant data: %w", err)
}
filter.SpaceIDs = make([]int64, 0, len(subspaces))
for i := range subspaces {
filter.SpaceIDs = append(filter.SpaceIDs, subspaces[i].ID)
}
} else {
filter.SpaceIDs = []int64{space.ID}
}
count, err := c.pullreqStore.Count(ctx, filter)
if err != nil {
return 0, fmt.Errorf("failed to count pull requests: %w", err)
}
return count, nil
}
// ListForSpace returns a list of pull requests and their respective repositories for a specific space.
//
//nolint:gocognit
func (c *ListService) ListForSpace(
ctx context.Context,
session *auth.Session,
space *types.SpaceCore,
includeSubspaces bool,
filter *types.PullReqFilter,
) ([]types.PullReqRepo, error) {
// list of unsupported filter options
filter.Sort = enum.PullReqSortUpdated // the only supported option, hardcoded in the SQL query
filter.Order = enum.OrderDesc // the only supported option, hardcoded in the SQL query
filter.Page = 0 // unsupported, pagination should be done with the UpdatedLt parameter
filter.UpdatedGt = 0 // unsupported
if includeSubspaces {
subspaces, err := c.spaceStore.GetDescendantsData(ctx, space.ID)
if err != nil {
return nil, fmt.Errorf("failed to get space descendant data: %w", err)
}
filter.SpaceIDs = make([]int64, 0, len(subspaces))
for i := range subspaces {
filter.SpaceIDs = append(filter.SpaceIDs, subspaces[i].ID)
}
} else {
filter.SpaceIDs = []int64{space.ID}
}
repoWhitelist := make(map[int64]struct{})
list := make([]*types.PullReq, 0, 16)
repoMap := make(map[int64]*types.RepositoryCore)
for loadMore := true; loadMore; {
const prLimit = 100
const repoLimit = 10
pullReqs, repoUnchecked, err := c.streamPullReqs(ctx, filter, prLimit, repoLimit, repoWhitelist)
if err != nil {
return nil, fmt.Errorf("failed to load pull requests: %w", err)
}
loadMore = len(pullReqs) == prLimit || len(repoUnchecked) == repoLimit
if loadMore && len(pullReqs) > 0 {
filter.UpdatedLt = pullReqs[len(pullReqs)-1].Updated
}
for repoID := range repoUnchecked {
repo, err := c.repoFinder.FindByID(ctx, repoID)
if errors.Is(err, gitness_store.ErrResourceNotFound) {
filter.RepoIDBlacklist = append(filter.RepoIDBlacklist, repoID)
continue
} else if err != nil {
return nil, fmt.Errorf("failed to find repo: %w", err)
}
err = apiauth.CheckRepo(ctx, c.authorizer, session, repo, enum.PermissionRepoView)
switch {
case err == nil:
repoWhitelist[repoID] = struct{}{}
repoMap[repoID] = repo
case errors.Is(err, apiauth.ErrForbidden):
filter.RepoIDBlacklist = append(filter.RepoIDBlacklist, repoID)
default:
return nil, fmt.Errorf("failed to check access check: %w", err)
}
}
for _, pullReq := range pullReqs {
if _, ok := repoWhitelist[pullReq.TargetRepoID]; ok {
list = append(list, pullReq)
}
}
if len(list) >= filter.Size {
list = list[:filter.Size]
loadMore = false
}
}
if err := c.labelSvc.BackfillMany(ctx, list); err != nil {
return nil, fmt.Errorf("failed to backfill labels assigned to pull requests: %w", err)
}
response := make([]types.PullReqRepo, len(list))
for i := range list {
response[i] = types.PullReqRepo{
PullRequest: list[i],
Repository: repoMap[list[i].TargetRepoID],
}
}
if err := c.BackfillMetadata(ctx, response, filter.PullReqMetadataOptions); err != nil {
return nil, fmt.Errorf("failed to backfill metadata: %w", err)
}
return response, nil
}
// streamPullReqs loads pull requests until it gets either pullReqLimit pull requests
// or newRepoLimit distinct repositories.
func (c *ListService) streamPullReqs(
ctx context.Context,
opts *types.PullReqFilter,
pullReqLimit, newRepoLimit int,
repoWhitelist map[int64]struct{},
) ([]*types.PullReq, map[int64]struct{}, error) {
ctx, cancelFn := context.WithCancel(ctx)
defer cancelFn()
repoUnchecked := map[int64]struct{}{}
pullReqs := make([]*types.PullReq, 0, opts.Size)
ch, chErr := c.pullreqStore.Stream(ctx, opts)
for pr := range ch {
if len(pullReqs) >= pullReqLimit || len(repoUnchecked) >= newRepoLimit {
cancelFn() // the loop must be exited by canceling the context
continue
}
if _, ok := repoWhitelist[pr.TargetRepoID]; !ok {
repoUnchecked[pr.TargetRepoID] = struct{}{}
}
pullReqs = append(pullReqs, pr)
}
if err := <-chErr; err != nil && !errors.Is(err, context.Canceled) {
return nil, nil, fmt.Errorf("failed to stream pull requests: %w", err)
}
return pullReqs, repoUnchecked, nil
}
func clearStats(list []types.PullReqRepo) {
for _, entry := range list {
entry.PullRequest.Stats.DiffStats = types.DiffStats{}
}
}
func (c *ListService) backfillStats(
ctx context.Context,
list []types.PullReqRepo,
) error {
for _, entry := range list {
pr := entry.PullRequest
s := pr.Stats.DiffStats
if s.Commits != nil && s.FilesChanged != nil && s.Additions != nil && s.Deletions != nil {
return nil
}
repo, err := c.repoFinder.FindByID(ctx, pr.TargetRepoID)
if err != nil {
return fmt.Errorf("failed get repo git info to fetch diff stats: %w", err)
}
output, err := c.git.DiffStats(ctx, &git.DiffParams{
ReadParams: git.CreateReadParams(repo),
BaseRef: pr.MergeBaseSHA,
HeadRef: pr.SourceSHA,
})
if err != nil {
return fmt.Errorf("failed get diff stats: %w", err)
}
pr.Stats.DiffStats = types.NewDiffStats(output.Commits, output.FilesChanged, output.Additions, output.Deletions)
}
return nil
}
// backfillChecks collects the check metadata for the provided list of pull requests.
func (c *ListService) backfillChecks(
ctx context.Context,
list []types.PullReqRepo,
) error {
// prepare list of commit SHAs per repository
repoCommitSHAs := make(map[int64][]string)
for _, entry := range list {
repoID := entry.Repository.ID
commitSHAs := repoCommitSHAs[repoID]
repoCommitSHAs[repoID] = append(commitSHAs, entry.PullRequest.SourceSHA)
}
// fetch checks for every repository
type repoSHA struct {
repoID int64
sha string
}
repoCheckSummaryMap := make(map[repoSHA]types.CheckCountSummary)
for repoID, commitSHAs := range repoCommitSHAs {
commitCheckSummaryMap, err := c.checkStore.ResultSummary(ctx, repoID, commitSHAs)
if err != nil {
return fmt.Errorf("fail to fetch check summary for commits: %w", err)
}
for commitSHA, checkSummary := range commitCheckSummaryMap {
repoCheckSummaryMap[repoSHA{repoID: repoID, sha: commitSHA.String()}] = checkSummary
}
}
// backfill the list with check count summary
for _, entry := range list {
entry.PullRequest.CheckSummary =
ptr.Of(repoCheckSummaryMap[repoSHA{repoID: entry.Repository.ID, sha: entry.PullRequest.SourceSHA}])
}
return nil
}
// backfillRules collects the rule metadata for the provided list of pull requests.
func (c *ListService) backfillRules(
ctx context.Context,
list []types.PullReqRepo,
) error {
// prepare list of branch names per repository
repoBranchNames := make(map[int64][]string)
repoDefaultBranch := make(map[int64]string)
repoIdentifier := make(map[int64]string)
for _, entry := range list {
repoID := entry.Repository.ID
branchNames := repoBranchNames[repoID]
repoBranchNames[repoID] = append(branchNames, entry.PullRequest.TargetBranch)
repoDefaultBranch[repoID] = entry.Repository.DefaultBranch
repoIdentifier[repoID] = entry.Repository.Identifier
}
// fetch checks for every repository
type repoBranchName struct {
repoID int64
branchName string
}
repoBranchNameMap := make(map[repoBranchName][]types.RuleInfo)
for repoID, branchNames := range repoBranchNames {
repoProtection, err := c.protectionManager.ListRepoBranchRules(ctx, repoID)
if err != nil {
return fmt.Errorf("fail to fetch protection rules for repository: %w", err)
}
for _, branchName := range branchNames {
branchRuleInfos, err := protection.GetBranchRuleInfos(
repoID,
repoIdentifier[repoID],
repoProtection,
repoDefaultBranch[repoID],
branchName,
protection.RuleInfoFilterStatusActive,
protection.RuleInfoFilterTypeBranch)
if err != nil {
return fmt.Errorf("fail to get rule infos for branch %s: %w", branchName, err)
}
repoBranchNameMap[repoBranchName{repoID: repoID, branchName: branchName}] = branchRuleInfos
}
}
// backfill the list with check count summary
for _, entry := range list {
key := repoBranchName{repoID: entry.Repository.ID, branchName: entry.PullRequest.TargetBranch}
entry.PullRequest.Rules = repoBranchNameMap[key]
}
return nil
}
func (c *ListService) BackfillMetadata(
ctx context.Context,
list []types.PullReqRepo,
options types.PullReqMetadataOptions,
) error {
for _, entry := range list {
if entry.PullRequest.SourceRepoID == nil {
entry.PullRequest.SourceRepo = deletedSourceRepo
} else if *entry.PullRequest.SourceRepoID != entry.PullRequest.TargetRepoID {
sourceRepo, err := c.repoFinder.FindByID(ctx, *entry.PullRequest.SourceRepoID)
if errors.Is(err, gitness_store.ErrResourceNotFound) {
sourceRepo = deletedSourceRepo
} else if err != nil {
return fmt.Errorf("failed to fetch source repository: %w", err)
}
entry.PullRequest.SourceRepo = sourceRepo
}
}
if options.IncludeChecks {
if err := c.backfillChecks(ctx, list); err != nil {
return fmt.Errorf("failed to backfill checks")
}
}
if options.IncludeRules {
if err := c.backfillRules(ctx, list); err != nil {
return fmt.Errorf("failed to backfill rules")
}
}
if options.IncludeGitStats {
if err := c.backfillStats(ctx, list); err != nil {
log.Ctx(ctx).Warn().Err(err).Msg("failed to backfill PR stats")
}
} else {
clearStats(list)
}
return nil
}
func (c *ListService) BackfillMetadataForRepo(
ctx context.Context,
repo *types.RepositoryCore,
pullReqs []*types.PullReq,
options types.PullReqMetadataOptions,
) error {
list := make([]types.PullReqRepo, len(pullReqs))
for i, pr := range pullReqs {
list[i] = types.PullReqRepo{
PullRequest: pr,
Repository: repo,
}
}
return c.BackfillMetadata(ctx, list, options)
}
func (c *ListService) BackfillMetadataForPullReq(
ctx context.Context,
repo *types.RepositoryCore,
pr *types.PullReq,
options types.PullReqMetadataOptions,
) error {
list := []types.PullReqRepo{{
PullRequest: pr,
Repository: repo,
}}
return c.BackfillMetadata(ctx, list, options)
}
var deletedSourceRepo = &types.RepositoryCore{
Identifier: "<deleted-repo>",
Path: "<deleted-repo>",
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/pullreq/handlers_branch.go | app/services/pullreq/handlers_branch.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pullreq
import (
"context"
"fmt"
"strconv"
"strings"
gitevents "github.com/harness/gitness/app/events/git"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/events"
"github.com/harness/gitness/git"
gitapi "github.com/harness/gitness/git/api"
gitenum "github.com/harness/gitness/git/enum"
"github.com/harness/gitness/git/sha"
gitness_store "github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/gotidy/ptr"
"github.com/rs/zerolog/log"
)
var (
errPRNotOpen = errors.New("PR is not open")
)
// triggerPREventOnBranchUpdate handles branch update events. For every open pull request
// it writes an activity entry and triggers the pull request Branch Updated event.
//
//nolint:gocognit // refactor if needed
func (s *Service) updatePullReqOnBranchUpdate(ctx context.Context,
event *events.Event[*gitevents.BranchUpdatedPayload],
) error {
// we should always update PR mergeable status check when target branch is updated.
// - main
// |- develop
// |- feature1
// |- feature2
// when feature2 merge changes into develop branch then feature1 branch is not consistent anymore
// and need to run mergeable check even nothing was changed on feature1, same applies to main if someone
// push new commit to main then develop should merge status should be unchecked.
if branch, err := getBranchFromRef(event.Payload.Ref); err == nil {
err = s.pullreqStore.ResetMergeCheckStatus(ctx, event.Payload.RepoID, branch)
if err != nil {
return err
}
}
var commitTitle string
err := func() error {
repo, err := s.repoFinder.FindByID(ctx, event.Payload.RepoID)
if err != nil {
return fmt.Errorf("failed to get repo git info: %w", err)
}
commit, err := s.git.GetCommit(ctx, &git.GetCommitParams{
ReadParams: git.ReadParams{RepoUID: repo.GitUID},
Revision: event.Payload.NewSHA,
})
if err != nil {
return fmt.Errorf("failed to get commit info: %w", err)
}
commitTitle = commit.Commit.Title
return nil
}()
if err != nil {
// non critical error
log.Ctx(ctx).Warn().Err(err).Msgf("failed to get commit info from git")
}
s.forEveryOpenPR(ctx, event.Payload.RepoID, event.Payload.Ref, func(pr *types.PullReq) error {
targetRepo, err := s.repoFinder.FindByID(ctx, pr.TargetRepoID)
if err != nil {
return fmt.Errorf("failed to get target repo git info: %w", err)
}
readParams := git.CreateReadParams(targetRepo)
writeParams, err := createRPCSystemReferencesWriteParams(ctx, s.urlProvider, targetRepo.ID, targetRepo.GitUID)
if err != nil {
return fmt.Errorf("failed to generate target repo write params: %w", err)
}
oldSHA, err := sha.New(event.Payload.OldSHA)
if err != nil {
return fmt.Errorf("failed to convert old commit SHA %q: %w",
event.Payload.OldSHA,
events.NewDiscardEventError(err),
)
}
newSHA, err := sha.New(event.Payload.NewSHA)
if err != nil {
return fmt.Errorf("failed to convert new commit SHA %s: %w",
event.Payload.NewSHA,
events.NewDiscardEventError(err),
)
}
// Pull git objects from the source repo into the target repo if this is a cross repo pull request.
if pr.SourceRepoID == nil {
return events.NewDiscardEventError(fmt.Errorf("pull request ID=%d has no source repo ID", pr.ID))
}
if *pr.SourceRepoID != pr.TargetRepoID {
sourceRepo, err := s.repoFinder.FindByID(ctx, *pr.SourceRepoID)
if errors.Is(err, gitness_store.ErrResourceNotFound) {
return events.NewDiscardEventError(fmt.Errorf("pull request ID=%d source repo not found ID", pr.ID))
} else if err != nil {
return fmt.Errorf("failed to get source repo git info: %w", err)
}
_, err = s.git.FetchObjects(ctx, &git.FetchObjectsParams{
WriteParams: writeParams,
Source: sourceRepo.GitUID,
ObjectSHAs: []sha.SHA{newSHA},
})
if err != nil {
return fmt.Errorf("failed to fetch git objects from the source repository: %w", err)
}
}
// Update pull request's head reference.
err = s.git.UpdateRef(ctx, git.UpdateRefParams{
WriteParams: writeParams,
Name: strconv.Itoa(int(pr.Number)),
Type: gitenum.RefTypePullReqHead,
NewValue: newSHA,
OldValue: oldSHA,
})
if err != nil {
return fmt.Errorf("failed to update PR head ref after new commit: %w", err)
}
// Check if the merge base has changed
targetRef, err := s.git.GetRef(ctx, git.GetRefParams{
ReadParams: readParams,
Name: pr.TargetBranch,
Type: gitenum.RefTypeBranch,
})
if err != nil {
return fmt.Errorf("failed to resolve target branch reference: %w", err)
}
targetSHA := targetRef.SHA
mergeBaseInfo, err := s.git.MergeBase(ctx, git.MergeBaseParams{
ReadParams: git.ReadParams{RepoUID: targetRepo.GitUID},
Ref1: event.Payload.NewSHA,
Ref2: targetSHA.String(),
})
if errors.IsInvalidArgument(err) || gitapi.IsUnrelatedHistoriesError(err) {
in := NonUniqueMergeBaseInput{
PullReqStore: s.pullreqStore,
ActivityStore: s.activityStore,
PullReqEvReporter: s.pullreqEvReporter,
SSEStreamer: s.sseStreamer,
}
err = CloseBecauseNonUniqueMergeBase(ctx, in, targetSHA, newSHA, pr)
if err != nil {
return fmt.Errorf("failed to close pull request after non-unique merge base: %w", err)
}
return nil
}
if err != nil {
return fmt.Errorf("failed to get merge base after branch update to=%s for PR=%d: %w",
event.Payload.NewSHA, pr.Number, err)
}
oldMergeBase := pr.MergeBaseSHA
newMergeBase := mergeBaseInfo.MergeBaseSHA
// Update the database with the latest source commit SHA and the merge base SHA.
pr, err = s.pullreqStore.UpdateOptLock(ctx, pr, func(pr *types.PullReq) error {
// to avoid racing conditions with merge
if pr.State != enum.PullReqStateOpen {
return errPRNotOpen
}
pr.ActivitySeq++
if pr.SourceSHA != event.Payload.OldSHA {
return fmt.Errorf(
"failed to set SourceSHA for PR %d to value '%s', expected SHA '%s' but current pr has '%s'",
pr.Number, event.Payload.NewSHA, event.Payload.OldSHA, pr.SourceSHA)
}
pr.SourceSHA = event.Payload.NewSHA
pr.MergeTargetSHA = ptr.String(targetSHA.String())
pr.MergeBaseSHA = newMergeBase.String()
// reset merge-check fields for new run
pr.MergeSHA = nil
pr.Stats.DiffStats.Commits = nil
pr.Stats.DiffStats.FilesChanged = nil
pr.MarkAsMergeUnchecked()
return nil
})
if errors.Is(err, errPRNotOpen) {
return nil
}
if err != nil {
return err
}
payload := &types.PullRequestActivityPayloadBranchUpdate{
Old: event.Payload.OldSHA,
New: event.Payload.NewSHA,
Forced: event.Payload.Forced,
CommitTitle: commitTitle,
}
_, err = s.activityStore.CreateWithPayload(ctx, pr, event.Payload.PrincipalID, payload, nil)
if err != nil {
// non-critical error
log.Ctx(ctx).Err(err).Msgf("failed to write pull request activity after branch update")
}
s.pullreqEvReporter.BranchUpdated(ctx, &pullreqevents.BranchUpdatedPayload{
Base: pullreqevents.Base{
PullReqID: pr.ID,
SourceRepoID: pr.SourceRepoID,
TargetRepoID: pr.TargetRepoID,
PrincipalID: event.Payload.PrincipalID,
Number: pr.Number,
},
OldSHA: event.Payload.OldSHA,
NewSHA: event.Payload.NewSHA,
OldMergeBaseSHA: oldMergeBase,
NewMergeBaseSHA: newMergeBase.String(),
Forced: event.Payload.Forced,
})
s.sseStreamer.Publish(ctx, targetRepo.ParentID, enum.SSETypePullReqUpdated, pr)
return nil
})
return nil
}
// closePullReqOnBranchDelete handles branch delete events.
// It closes every open pull request for the branch and triggers the pull request BranchDeleted event.
func (s *Service) closePullReqOnBranchDelete(ctx context.Context,
event *events.Event[*gitevents.BranchDeletedPayload],
) error {
s.forEveryOpenPR(ctx, event.Payload.RepoID, event.Payload.Ref, func(pr *types.PullReq) error {
targetRepo, err := s.repoFinder.FindByID(ctx, pr.TargetRepoID)
if err != nil {
return fmt.Errorf("failed to get repo info: %w", err)
}
var activitySeqBranchDeleted, activitySeqPRClosed int64
pr, err = s.pullreqStore.UpdateOptLock(ctx, pr, func(pr *types.PullReq) error {
// to avoid racing conditions with merge
if pr.State != enum.PullReqStateOpen {
return errPRNotOpen
}
// get sequence numbers for both activities (branch deletion should be first)
pr.ActivitySeq += 2
activitySeqBranchDeleted = pr.ActivitySeq - 1
activitySeqPRClosed = pr.ActivitySeq
pr.State = enum.PullReqStateClosed
pr.MergeSHA = nil
pr.MarkAsMergeUnchecked()
return nil
})
if errors.Is(err, errPRNotOpen) {
return nil
}
if err != nil {
return fmt.Errorf("failed to close pull request after branch delete: %w", err)
}
// NOTE: We use the latest PR source sha for the branch deleted activity.
// There is a chance the PR is behind, but we can't guarantee any missing commit exists after branch deletion.
// Whatever is the source sha of the PR is most likely to be pointed at by the PR head ref.
pr.ActivitySeq = activitySeqBranchDeleted
_, err = s.activityStore.CreateWithPayload(ctx, pr, event.Payload.PrincipalID,
&types.PullRequestActivityPayloadBranchDelete{SHA: pr.SourceSHA}, nil)
if err != nil {
// non-critical error
log.Ctx(ctx).Err(err).Msg("failed to write pull request activity for branch deletion")
}
pr.ActivitySeq = activitySeqPRClosed
payload := &types.PullRequestActivityPayloadStateChange{
Old: enum.PullReqStateOpen,
New: enum.PullReqStateClosed,
OldDraft: pr.IsDraft,
NewDraft: pr.IsDraft,
}
if _, err := s.activityStore.CreateWithPayload(ctx, pr, event.Payload.PrincipalID, payload, nil); err != nil {
// non-critical error
log.Ctx(ctx).Err(err).Msg(
"failed to write pull request activity for pullrequest closure after branch deletion",
)
}
s.pullreqEvReporter.Closed(ctx, &pullreqevents.ClosedPayload{
Base: pullreqevents.Base{
PullReqID: pr.ID,
SourceRepoID: pr.SourceRepoID,
TargetRepoID: pr.TargetRepoID,
PrincipalID: event.Payload.PrincipalID,
Number: pr.Number,
},
SourceSHA: pr.SourceSHA,
SourceBranch: pr.SourceBranch,
})
s.sseStreamer.Publish(ctx, targetRepo.ParentID, enum.SSETypePullReqUpdated, pr)
return nil
})
return nil
}
// forEveryOpenPR is utility function that executes the provided function
// for every open pull request created with the source branch given as a git ref.
func (s *Service) forEveryOpenPR(ctx context.Context,
repoID int64, ref string,
fn func(pr *types.PullReq) error,
) {
const largeLimit = 1000000
branch, err := getBranchFromRef(ref)
if len(branch) == 0 {
log.Ctx(ctx).Err(err).Send()
return
}
pullreqList, err := s.pullreqStore.List(ctx, &types.PullReqFilter{
Page: 0,
Size: largeLimit,
SourceRepoID: repoID,
SourceBranch: branch,
States: []enum.PullReqState{enum.PullReqStateOpen},
Sort: enum.PullReqSortNumber,
Order: enum.OrderAsc,
})
if err != nil {
log.Ctx(ctx).Err(err).Msg("failed to get list of open pull requests")
return
}
for _, pr := range pullreqList {
if err = fn(pr); err != nil {
log.Ctx(ctx).Err(err).Msg("failed to process pull req")
}
}
}
func getBranchFromRef(ref string) (string, error) {
const refPrefix = "refs/heads/"
if !strings.HasPrefix(ref, refPrefix) {
return "", fmt.Errorf("failed to get branch name from branch ref %s", ref)
}
branch := ref[len(refPrefix):]
if len(branch) == 0 {
return "", fmt.Errorf("got an empty branch name from branch ref %s", ref)
}
return branch, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/pullreq/handlers_code_comments.go | app/services/pullreq/handlers_code_comments.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pullreq
import (
"context"
"fmt"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types"
)
func (s *Service) updateCodeCommentsOnBranchUpdate(ctx context.Context,
event *events.Event[*pullreqevents.BranchUpdatedPayload],
) error {
return s.updateCodeComments(ctx,
event.Payload.TargetRepoID, event.Payload.PullReqID,
event.Payload.NewSHA, event.Payload.NewMergeBaseSHA)
}
func (s *Service) updateCodeCommentsOnReopen(ctx context.Context,
event *events.Event[*pullreqevents.ReopenedPayload],
) error {
return s.updateCodeComments(ctx,
event.Payload.TargetRepoID, event.Payload.PullReqID,
event.Payload.SourceSHA, event.Payload.MergeBaseSHA)
}
func (s *Service) updateCodeComments(ctx context.Context,
targetRepoID, pullreqID int64,
newSourceSHA, newMergeBaseSHA string,
) error {
repoGit, err := s.repoFinder.FindByID(ctx, targetRepoID)
if err != nil {
return fmt.Errorf("failed to get repo git info: %w", err)
}
var codeComments []*types.CodeComment
codeComments, err = s.codeCommentView.ListNotAtMergeBaseSHA(ctx, pullreqID, newMergeBaseSHA)
if err != nil {
return fmt.Errorf("failed to get list of code comments for update after merge base update: %w", err)
}
s.codeCommentMigrator.MigrateOld(ctx, repoGit.GitUID, newMergeBaseSHA, codeComments)
err = s.codeCommentView.UpdateAll(ctx, codeComments)
if err != nil {
return fmt.Errorf("failed to update code comments after merge base update: %w", err)
}
codeComments, err = s.codeCommentView.ListNotAtSourceSHA(ctx, pullreqID, newSourceSHA)
if err != nil {
return fmt.Errorf("failed to get list of code comments for update after source branch update: %w", err)
}
s.codeCommentMigrator.MigrateNew(ctx, repoGit.GitUID, newSourceSHA, codeComments)
err = s.codeCommentView.UpdateAll(ctx, codeComments)
if err != nil {
return fmt.Errorf("failed to update code comments after source branch update: %w", err)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/pullreq/handlers_counters.go | app/services/pullreq/handlers_counters.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pullreq
import (
"context"
"fmt"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types"
)
// updatePRCountersOnCreated increments number of PRs and open PRs.
func (s *Service) updatePRCountersOnCreated(ctx context.Context,
event *events.Event[*pullreqevents.CreatedPayload],
) error {
err := s.updatePRNumbers(ctx, event.Payload.TargetRepoID, 1, 1, 0, 0)
if err != nil {
return fmt.Errorf("failed to update repository pull request numbers after PR creation: %w", err)
}
return nil
}
// updatePRCountersOnReopened increments number of open PRs and decrements number of closed.
func (s *Service) updatePRCountersOnReopened(ctx context.Context,
event *events.Event[*pullreqevents.ReopenedPayload],
) error {
err := s.updatePRNumbers(ctx, event.Payload.TargetRepoID, 0, 1, -1, 0)
if err != nil {
return fmt.Errorf("failed to update repository pull request numbers after PR reopen: %w", err)
}
return nil
}
// updatePRCountersOnClosed increments number of closed PRs and decrements number of open.
func (s *Service) updatePRCountersOnClosed(ctx context.Context,
event *events.Event[*pullreqevents.ClosedPayload],
) error {
err := s.updatePRNumbers(ctx, event.Payload.TargetRepoID, 0, -1, 1, 0)
if err != nil {
return fmt.Errorf("failed to update repository pull request numbers after PR close: %w", err)
}
return nil
}
// updatePRCountersOnMerged increments number of merged PRs and decrements number of open.
func (s *Service) updatePRCountersOnMerged(ctx context.Context,
event *events.Event[*pullreqevents.MergedPayload],
) error {
err := s.updatePRNumbers(ctx, event.Payload.TargetRepoID, 0, -1, 0, 1)
if err != nil {
return fmt.Errorf("failed to update repository pull request numbers after PR merge: %w", err)
}
return nil
}
func (s *Service) updatePRNumbers(ctx context.Context, repoID int64,
deltaNew, deltaOpen, deltaClosed, deltaMerged int,
) error {
repo, err := s.repoStore.Find(ctx, repoID)
if err != nil {
return fmt.Errorf("failed to get repository to update PR numbers: %w", err)
}
_, err = s.repoStore.UpdateOptLock(ctx, repo, func(repo *types.Repository) error {
repo.NumPulls += deltaNew
repo.NumOpenPulls += deltaOpen
repo.NumClosedPulls += deltaClosed
repo.NumMergedPulls += deltaMerged
return nil
})
return err
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/pullreq/close.go | app/services/pullreq/close.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pullreq
import (
"context"
"fmt"
"github.com/harness/gitness/app/bootstrap"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/app/sse"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/git/sha"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/gotidy/ptr"
"github.com/rs/zerolog/log"
)
type NonUniqueMergeBaseInput struct {
PullReqStore store.PullReqStore
ActivityStore store.PullReqActivityStore
PullReqEvReporter *pullreqevents.Reporter
SSEStreamer sse.Streamer
}
func CloseBecauseNonUniqueMergeBase(
ctx context.Context,
in NonUniqueMergeBaseInput,
targetSHA sha.SHA,
sourceSHA sha.SHA,
pr *types.PullReq,
) error {
systemPrincipal := bootstrap.NewSystemServiceSession().Principal
systemPrincipalID := systemPrincipal.ID
var activitySeqMergeBase, activitySeqPRClosed int64
pr, err := in.PullReqStore.UpdateOptLock(ctx, pr, func(pr *types.PullReq) error {
// to avoid racing conditions with merge
if pr.State != enum.PullReqStateOpen {
return errPRNotOpen
}
pr.ActivitySeq += 2
activitySeqMergeBase = pr.ActivitySeq - 1
activitySeqPRClosed = pr.ActivitySeq
pr.SourceSHA = sourceSHA.String()
pr.MergeTargetSHA = ptr.String(targetSHA.String())
pr.State = enum.PullReqStateClosed
pr.MergeSHA = nil
pr.MarkAsMergeUnchecked()
return nil
})
if errors.Is(err, errPRNotOpen) {
return nil
}
if err != nil {
return fmt.Errorf("failed to close pull request after non-unique merge base: %w", err)
}
pr.ActivitySeq = activitySeqMergeBase
payloadNonUniqueMergeBase := &types.PullRequestActivityPayloadNonUniqueMergeBase{
TargetSHA: targetSHA,
SourceSHA: sourceSHA,
}
_, err = in.ActivityStore.CreateWithPayload(ctx, pr, systemPrincipalID, payloadNonUniqueMergeBase, nil)
if err != nil {
// non-critical error
log.Ctx(ctx).Err(err).Msg("failed to write pull request activity for non-unique merge-base")
}
pr.ActivitySeq = activitySeqPRClosed
payloadStateChange := &types.PullRequestActivityPayloadStateChange{
Old: enum.PullReqStateOpen,
New: enum.PullReqStateClosed,
OldDraft: pr.IsDraft,
NewDraft: pr.IsDraft,
}
if _, err := in.ActivityStore.CreateWithPayload(ctx, pr, systemPrincipalID, payloadStateChange, nil); err != nil {
// non-critical error
log.Ctx(ctx).Err(err).Msg(
"failed to write pull request activity for pull request closure after non-unique merge-base",
)
}
in.PullReqEvReporter.Closed(ctx, &pullreqevents.ClosedPayload{
Base: pullreqevents.Base{
PullReqID: pr.ID,
SourceRepoID: pr.SourceRepoID,
TargetRepoID: pr.TargetRepoID,
PrincipalID: systemPrincipalID,
Number: pr.Number,
},
SourceSHA: pr.SourceSHA,
SourceBranch: pr.SourceBranch,
})
in.SSEStreamer.Publish(ctx, pr.TargetRepoID, enum.SSETypePullReqUpdated, pr)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspacedeleteevent/wire.go | app/services/gitspacedeleteevent/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspacedeleteevent
import (
"context"
gitspacedeleteevents "github.com/harness/gitness/app/events/gitspacedelete"
"github.com/harness/gitness/app/services/gitspace"
"github.com/harness/gitness/events"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideService,
)
func ProvideService(ctx context.Context,
config *Config,
gitspaceDeleteEventReaderFactory *events.ReaderFactory[*gitspacedeleteevents.Reader],
gitspaceSvc *gitspace.Service,
) (*Service, error) {
return NewService(
ctx,
config,
gitspaceDeleteEventReaderFactory,
gitspaceSvc,
)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspacedeleteevent/service.go | app/services/gitspacedeleteevent/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspacedeleteevent
import (
"context"
"errors"
"fmt"
"time"
gitspacedeleteevents "github.com/harness/gitness/app/events/gitspacedelete"
"github.com/harness/gitness/app/services/gitspace"
"github.com/harness/gitness/events"
"github.com/harness/gitness/stream"
)
const groupGitspaceDeleteEvents = "gitness:gitspace_delete"
type Config struct {
EventReaderName string
Concurrency int
MaxRetries int
TimeoutInMins int
}
func (c *Config) Sanitize() error {
if c.EventReaderName == "" {
return errors.New("config.EventReaderName is required")
}
if c.Concurrency < 1 {
return errors.New("config.Concurrency has to be a positive number")
}
if c.MaxRetries < 0 {
return errors.New("config.MaxRetries can't be negative")
}
return nil
}
type Service struct {
config *Config
gitspaceSvc *gitspace.Service
}
func NewService(
ctx context.Context,
config *Config,
gitspaceDeleteEventReaderFactory *events.ReaderFactory[*gitspacedeleteevents.Reader],
gitspaceSvc *gitspace.Service,
) (*Service, error) {
if err := config.Sanitize(); err != nil {
return nil, fmt.Errorf("provided gitspace event service config is invalid: %w", err)
}
service := &Service{
config: config,
gitspaceSvc: gitspaceSvc,
}
_, err := gitspaceDeleteEventReaderFactory.Launch(ctx, groupGitspaceDeleteEvents, config.EventReaderName,
func(r *gitspacedeleteevents.Reader) error {
var idleTimeout = time.Duration(config.TimeoutInMins) * time.Minute
r.Configure(
stream.WithConcurrency(config.Concurrency),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(config.MaxRetries),
))
_ = r.RegisterGitspaceDeleteEvent(service.handleGitspaceDeleteEvent)
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to launch gitspace delete event reader: %w", err)
}
return service, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/gitspacedeleteevent/handler.go | app/services/gitspacedeleteevent/handler.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitspacedeleteevent
import (
"context"
"fmt"
gitspacedeleteevents "github.com/harness/gitness/app/events/gitspacedelete"
"github.com/harness/gitness/events"
"github.com/rs/zerolog/log"
)
func (s *Service) handleGitspaceDeleteEvent(
ctx context.Context,
event *events.Event[*gitspacedeleteevents.GitspaceDeleteEventPayload],
) error {
log.Debug().Msgf("handling gitspace delete event with payload: %+v", event.Payload)
gitspaceConfigIdentifier := event.Payload.GitspaceConfigIdentifier
spaceID := event.Payload.SpaceID
gitspaceConfig, err := s.gitspaceSvc.FindWithLatestInstance(ctx, spaceID, gitspaceConfigIdentifier)
if err != nil {
return fmt.Errorf("failed to find gitspace config %s for space %d while handling delete event: %w",
gitspaceConfigIdentifier, spaceID, err)
}
err = s.gitspaceSvc.RemoveGitspace(ctx, *gitspaceConfig, true)
if err != nil {
// NOTE: No need to retry from the event handler. The background job will take care.
log.Debug().Err(err).Msgf("unable to delete gitspace: %s", gitspaceConfigIdentifier)
}
log.Debug().Msgf("handled gitspace delete event with payload: %+v", event.Payload)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/wire.go | app/services/webhook/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
gitevents "github.com/harness/gitness/app/events/git"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/app/sse"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/encrypt"
"github.com/harness/gitness/events"
"github.com/harness/gitness/git"
"github.com/harness/gitness/secret"
"github.com/harness/gitness/store/database/dbtx"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideService,
ProvideURLProvider,
)
func ProvideService(
ctx context.Context,
config Config,
tx dbtx.Transactor,
gitReaderFactory *events.ReaderFactory[*gitevents.Reader],
prReaderFactory *events.ReaderFactory[*pullreqevents.Reader],
webhookStore store.WebhookStore,
webhookExecutionStore store.WebhookExecutionStore,
spaceStore store.SpaceStore,
repoStore store.RepoStore,
pullreqStore store.PullReqStore,
activityStore store.PullReqActivityStore,
urlProvider url.Provider,
principalStore store.PrincipalStore,
git git.Interface,
encrypter encrypt.Encrypter,
labelStore store.LabelStore,
webhookURLProvider URLProvider,
labelValueStore store.LabelValueStore,
auditService audit.Service,
sseStreamer sse.Streamer,
secretService secret.Service,
spacePathStore store.SpacePathStore,
) (*Service, error) {
return NewService(
ctx,
config,
tx,
gitReaderFactory,
prReaderFactory,
webhookStore,
webhookExecutionStore,
spaceStore, repoStore,
pullreqStore,
activityStore,
urlProvider,
principalStore,
git,
encrypter,
labelStore,
webhookURLProvider,
labelValueStore,
auditService,
sseStreamer,
secretService,
spacePathStore,
)
}
func ProvideURLProvider(ctx context.Context) URLProvider {
return NewURLProvider(ctx)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/handler_branch.go | app/services/webhook/handler_branch.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"fmt"
gitevents "github.com/harness/gitness/app/events/git"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/events"
"github.com/harness/gitness/git"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
const MaxWebhookCommitFileStats = 20
// ReferencePayload describes the payload of Reference related webhook triggers.
// Note: Use same payload for all reference operations to make it easier for consumers.
type ReferencePayload struct {
BaseSegment
ReferenceSegment
ReferenceDetailsSegment
ReferenceUpdateSegment
}
// handleEventBranchCreated handles branch created events
// and triggers branch created webhooks for the source repo.
func (s *Service) handleEventBranchCreated(ctx context.Context,
event *events.Event[*gitevents.BranchCreatedPayload]) error {
return s.triggerForEventWithRepo(ctx, enum.WebhookTriggerBranchCreated,
event.ID, event.Payload.PrincipalID, event.Payload.RepoID,
func(principal *types.Principal, repo *types.Repository) (any, error) {
commitInfo, err := s.fetchCommitInfoForEvent(ctx, repo.GitUID, repo.Path, event.Payload.SHA, s.urlProvider)
if err != nil {
return nil, err
}
repoInfo := repositoryInfoFrom(ctx, repo, s.urlProvider)
return &ReferencePayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerBranchCreated,
Repo: repoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: event.Payload.Ref,
Repo: repoInfo,
},
},
ReferenceDetailsSegment: ReferenceDetailsSegment{
SHA: event.Payload.SHA,
Commit: &commitInfo,
HeadCommit: &commitInfo,
},
ReferenceUpdateSegment: ReferenceUpdateSegment{
OldSHA: types.NilSHA,
Forced: false,
},
}, nil
})
}
// handleEventBranchUpdated handles branch updated events
// and triggers branch updated webhooks for the source repo.
func (s *Service) handleEventBranchUpdated(ctx context.Context,
event *events.Event[*gitevents.BranchUpdatedPayload]) error {
return s.triggerForEventWithRepo(ctx, enum.WebhookTriggerBranchUpdated,
event.ID, event.Payload.PrincipalID, event.Payload.RepoID,
func(principal *types.Principal, repo *types.Repository) (any, error) {
commitsInfo, totalCommits, err := s.fetchCommitsInfoForEvent(ctx, repo.GitUID,
repo.Path, event.Payload.OldSHA, event.Payload.NewSHA, s.urlProvider)
if err != nil {
return nil, err
}
commitInfo := commitsInfo[0]
repoInfo := repositoryInfoFrom(ctx, repo, s.urlProvider)
return &ReferencePayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerBranchUpdated,
Repo: repoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: event.Payload.Ref,
Repo: repoInfo,
},
},
ReferenceDetailsSegment: ReferenceDetailsSegment{
SHA: event.Payload.NewSHA,
Commit: &commitInfo,
HeadCommit: &commitInfo,
Commits: &commitsInfo,
TotalCommitsCount: totalCommits,
},
ReferenceUpdateSegment: ReferenceUpdateSegment{
OldSHA: event.Payload.OldSHA,
Forced: event.Payload.Forced,
},
}, nil
})
}
// handleEventBranchDeleted handles branch deleted events
// and triggers branch deleted webhooks for the source repo.
func (s *Service) handleEventBranchDeleted(ctx context.Context,
event *events.Event[*gitevents.BranchDeletedPayload]) error {
return s.triggerForEventWithRepo(ctx, enum.WebhookTriggerBranchDeleted,
event.ID, event.Payload.PrincipalID, event.Payload.RepoID,
func(principal *types.Principal, repo *types.Repository) (any, error) {
repoInfo := repositoryInfoFrom(ctx, repo, s.urlProvider)
return &ReferencePayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerBranchDeleted,
Repo: repoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: event.Payload.Ref,
Repo: repoInfo,
},
},
ReferenceDetailsSegment: ReferenceDetailsSegment{
SHA: types.NilSHA,
Commit: nil,
},
ReferenceUpdateSegment: ReferenceUpdateSegment{
OldSHA: event.Payload.SHA,
Forced: false,
},
}, nil
})
}
func (s *Service) fetchCommitInfoForEvent(
ctx context.Context,
repoUID string,
repoPath string,
commitSHA string,
urlProvider url.Provider,
) (CommitInfo, error) {
out, err := s.git.GetCommit(ctx, &git.GetCommitParams{
ReadParams: git.ReadParams{
RepoUID: repoUID,
},
Revision: commitSHA,
})
if errors.AsStatus(err) == errors.StatusNotFound {
// this could happen if the commit has been deleted and garbage collected by now
// or if the targetSha doesn't point to an event - either way discard the event.
return CommitInfo{}, events.NewDiscardEventErrorf("commit with targetSha '%s' doesn't exist", commitSHA)
}
if err != nil {
return CommitInfo{}, fmt.Errorf("failed to get commit with targetSha '%s': %w", commitSHA, err)
}
return commitInfoFrom(ctx, repoPath, out.Commit, urlProvider), nil
}
func (s *Service) fetchCommitsInfoForEvent(
ctx context.Context,
repoUID string,
repoPath string,
oldSHA string,
newSHA string,
urlProvider url.Provider,
) ([]CommitInfo, int, error) {
listCommitsParams := git.ListCommitsParams{
ReadParams: git.ReadParams{RepoUID: repoUID},
GitREF: newSHA,
After: oldSHA,
Page: 0,
Limit: MaxWebhookCommitFileStats,
IncludeStats: true,
}
listCommitsOutput, err := s.git.ListCommits(ctx, &listCommitsParams)
if errors.AsStatus(err) == errors.StatusNotFound {
// this could happen if the commit has been deleted and garbage collected by now
// or if the targetSha doesn't point to an event - either way discard the event.
return []CommitInfo{}, 0, events.NewDiscardEventErrorf("commit with targetSha '%s' doesn't exist", newSHA)
}
if err != nil {
return []CommitInfo{}, 0, fmt.Errorf("failed to get commit with targetSha '%s': %w", newSHA, err)
}
if len(listCommitsOutput.Commits) == 0 {
return nil, 0, fmt.Errorf("no commit found between %s and %s", oldSHA, newSHA)
}
return commitsInfoFrom(ctx, repoPath, listCommitsOutput.Commits, urlProvider), listCommitsOutput.TotalCommits, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/handler_pullreq.go | app/services/webhook/handler_pullreq.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"fmt"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
const (
// gitReferenceNamePrefixBranch is the prefix of references of type branch.
gitReferenceNamePrefixBranch = "refs/heads/"
)
// PullReqCreatedPayload describes the body of the pullreq created trigger.
// TODO: move in separate package for small import?
type PullReqCreatedPayload struct {
BaseSegment
PullReqSegment
PullReqTargetReferenceSegment
ReferenceSegment
ReferenceDetailsSegment
}
// handleEventPullReqCreated handles created events for pull requests
// and triggers pullreq created webhooks for the source repo.
func (s *Service) handleEventPullReqCreated(
ctx context.Context,
event *events.Event[*pullreqevents.CreatedPayload],
) error {
return s.triggerForEventWithPullReq(ctx, enum.WebhookTriggerPullReqCreated,
event.ID, event.Payload.PrincipalID, event.Payload.PullReqID,
func(principal *types.Principal, pr *types.PullReq, targetRepo, sourceRepo *types.Repository) (any, error) {
commitInfo, err := s.fetchCommitInfoForEvent(ctx, targetRepo.GitUID, targetRepo.Path,
event.Payload.SourceSHA, s.urlProvider)
if err != nil {
return nil, err
}
targetRepoInfo := repositoryInfoFrom(ctx, targetRepo, s.urlProvider)
sourceRepoInfo := repositoryInfoFrom(ctx, sourceRepo, s.urlProvider)
return &PullReqCreatedPayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerPullReqCreated,
Repo: targetRepoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
PullReqSegment: PullReqSegment{
PullReq: pullReqInfoFrom(ctx, pr, targetRepo, s.urlProvider),
},
PullReqTargetReferenceSegment: PullReqTargetReferenceSegment{
TargetRef: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.TargetBranch,
Repo: targetRepoInfo,
},
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.SourceBranch,
Repo: sourceRepoInfo,
},
},
ReferenceDetailsSegment: ReferenceDetailsSegment{
SHA: event.Payload.SourceSHA,
Commit: &commitInfo,
HeadCommit: &commitInfo,
},
}, nil
})
}
// PullReqReopenedPayload describes the body of the pullreq reopened trigger.
// Note: same as payload for created.
type PullReqReopenedPayload PullReqCreatedPayload
// handleEventPullReqReopened handles reopened events for pull requests
// and triggers pullreq reopened webhooks for the source repo.
func (s *Service) handleEventPullReqReopened(
ctx context.Context,
event *events.Event[*pullreqevents.ReopenedPayload],
) error {
return s.triggerForEventWithPullReq(ctx, enum.WebhookTriggerPullReqReopened,
event.ID, event.Payload.PrincipalID, event.Payload.PullReqID,
func(principal *types.Principal, pr *types.PullReq, targetRepo, sourceRepo *types.Repository) (any, error) {
commitInfo, err := s.fetchCommitInfoForEvent(ctx, sourceRepo.GitUID, sourceRepo.Path,
event.Payload.SourceSHA, s.urlProvider)
if err != nil {
return nil, err
}
targetRepoInfo := repositoryInfoFrom(ctx, targetRepo, s.urlProvider)
sourceRepoInfo := repositoryInfoFrom(ctx, sourceRepo, s.urlProvider)
return &PullReqReopenedPayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerPullReqReopened,
Repo: targetRepoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
PullReqSegment: PullReqSegment{
PullReq: pullReqInfoFrom(ctx, pr, targetRepo, s.urlProvider),
},
PullReqTargetReferenceSegment: PullReqTargetReferenceSegment{
TargetRef: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.TargetBranch,
Repo: targetRepoInfo,
},
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.SourceBranch,
Repo: sourceRepoInfo,
},
},
ReferenceDetailsSegment: ReferenceDetailsSegment{
SHA: event.Payload.SourceSHA,
Commit: &commitInfo,
HeadCommit: &commitInfo,
},
}, nil
})
}
// PullReqBranchUpdatedPayload describes the body of the pullreq branch updated trigger.
// TODO: move in separate package for small import?
type PullReqBranchUpdatedPayload struct {
BaseSegment
PullReqSegment
PullReqTargetReferenceSegment
ReferenceSegment
ReferenceDetailsSegment
ReferenceUpdateSegment
}
// handleEventPullReqBranchUpdated handles branch updated events for pull requests
// and triggers pullreq branch updated webhooks for the source repo.
func (s *Service) handleEventPullReqBranchUpdated(
ctx context.Context,
event *events.Event[*pullreqevents.BranchUpdatedPayload],
) error {
return s.triggerForEventWithPullReq(ctx, enum.WebhookTriggerPullReqBranchUpdated,
event.ID, event.Payload.PrincipalID, event.Payload.PullReqID,
func(principal *types.Principal, pr *types.PullReq, targetRepo, sourceRepo *types.Repository) (any, error) {
commitsInfo, totalCommits, err := s.fetchCommitsInfoForEvent(ctx, sourceRepo.GitUID, sourceRepo.Path,
event.Payload.OldSHA, event.Payload.NewSHA, s.urlProvider)
if err != nil {
return nil, err
}
commitInfo := commitsInfo[0]
targetRepoInfo := repositoryInfoFrom(ctx, targetRepo, s.urlProvider)
sourceRepoInfo := repositoryInfoFrom(ctx, sourceRepo, s.urlProvider)
return &PullReqBranchUpdatedPayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerPullReqBranchUpdated,
Repo: targetRepoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
PullReqSegment: PullReqSegment{
PullReq: pullReqInfoFrom(ctx, pr, targetRepo, s.urlProvider),
},
PullReqTargetReferenceSegment: PullReqTargetReferenceSegment{
TargetRef: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.TargetBranch,
Repo: targetRepoInfo,
},
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.SourceBranch,
Repo: sourceRepoInfo,
},
},
ReferenceDetailsSegment: ReferenceDetailsSegment{
SHA: event.Payload.NewSHA,
Commit: &commitInfo,
HeadCommit: &commitInfo,
Commits: &commitsInfo,
TotalCommitsCount: totalCommits,
},
ReferenceUpdateSegment: ReferenceUpdateSegment{
OldSHA: event.Payload.OldSHA,
Forced: event.Payload.Forced,
},
}, nil
})
}
// PullReqClosedPayload describes the body of the pullreq closed trigger.
type PullReqClosedPayload struct {
BaseSegment
PullReqSegment
PullReqTargetReferenceSegment
ReferenceSegment
ReferenceDetailsSegment
}
func (s *Service) handleEventPullReqClosed(
ctx context.Context,
event *events.Event[*pullreqevents.ClosedPayload],
) error {
return s.triggerForEventWithPullReq(ctx, enum.WebhookTriggerPullReqClosed,
event.ID, event.Payload.PrincipalID, event.Payload.PullReqID,
func(principal *types.Principal, pr *types.PullReq, targetRepo, sourceRepo *types.Repository) (any, error) {
commitInfo, err := s.fetchCommitInfoForEvent(ctx, sourceRepo.GitUID, sourceRepo.Path,
event.Payload.SourceSHA, s.urlProvider)
if err != nil {
return nil, err
}
targetRepoInfo := repositoryInfoFrom(ctx, targetRepo, s.urlProvider)
sourceRepoInfo := repositoryInfoFrom(ctx, sourceRepo, s.urlProvider)
return &PullReqClosedPayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerPullReqClosed,
Repo: targetRepoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
PullReqSegment: PullReqSegment{
PullReq: pullReqInfoFrom(ctx, pr, targetRepo, s.urlProvider),
},
PullReqTargetReferenceSegment: PullReqTargetReferenceSegment{
TargetRef: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.TargetBranch,
Repo: targetRepoInfo,
},
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.SourceBranch,
Repo: sourceRepoInfo,
},
},
ReferenceDetailsSegment: ReferenceDetailsSegment{
SHA: event.Payload.SourceSHA,
Commit: &commitInfo,
HeadCommit: &commitInfo,
},
}, nil
})
}
// PullReqMergedPayload describes the body of the pullreq merged trigger.
type PullReqMergedPayload struct {
BaseSegment
PullReqSegment
PullReqTargetReferenceSegment
ReferenceSegment
ReferenceDetailsSegment
}
func (s *Service) handleEventPullReqMerged(
ctx context.Context,
event *events.Event[*pullreqevents.MergedPayload],
) error {
return s.triggerForEventWithPullReq(ctx, enum.WebhookTriggerPullReqMerged,
event.ID, event.Payload.PrincipalID, event.Payload.PullReqID,
func(principal *types.Principal, pr *types.PullReq, targetRepo, sourceRepo *types.Repository) (any, error) {
commitInfo, err := s.fetchCommitInfoForEvent(ctx, sourceRepo.GitUID, sourceRepo.Path,
event.Payload.SourceSHA, s.urlProvider)
if err != nil {
return nil, err
}
targetRepoInfo := repositoryInfoFrom(ctx, targetRepo, s.urlProvider)
sourceRepoInfo := repositoryInfoFrom(ctx, sourceRepo, s.urlProvider)
return &PullReqClosedPayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerPullReqMerged,
Repo: targetRepoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
PullReqSegment: PullReqSegment{
PullReq: pullReqInfoFrom(ctx, pr, targetRepo, s.urlProvider),
},
PullReqTargetReferenceSegment: PullReqTargetReferenceSegment{
TargetRef: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.TargetBranch,
Repo: targetRepoInfo,
},
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.SourceBranch,
Repo: sourceRepoInfo,
},
},
ReferenceDetailsSegment: ReferenceDetailsSegment{
SHA: event.Payload.SourceSHA,
Commit: &commitInfo,
HeadCommit: &commitInfo,
},
}, nil
})
}
// PullReqCommentPayload describes the body of the pullreq comment create trigger.
type PullReqCommentPayload struct {
BaseSegment
PullReqSegment
PullReqTargetReferenceSegment
ReferenceSegment
ReferenceDetailsSegment
PullReqCommentSegment
}
func (s *Service) handleEventPullReqComment(
ctx context.Context,
event *events.Event[*pullreqevents.CommentCreatedPayload],
) error {
return s.triggerForEventWithPullReq(ctx, enum.WebhookTriggerPullReqCommentCreated,
event.ID, event.Payload.PrincipalID, event.Payload.PullReqID,
func(principal *types.Principal, pr *types.PullReq, targetRepo, sourceRepo *types.Repository) (any, error) {
targetRepoInfo := repositoryInfoFrom(ctx, targetRepo, s.urlProvider)
sourceRepoInfo := repositoryInfoFrom(ctx, sourceRepo, s.urlProvider)
activity, err := s.activityStore.Find(ctx, event.Payload.ActivityID)
if err != nil {
return nil, fmt.Errorf(
"failed to get activity by id for acitivity id %d: %w",
event.Payload.ActivityID,
err,
)
}
commitInfo, err := s.fetchCommitInfoForEvent(ctx, sourceRepo.GitUID, sourceRepo.Path,
event.Payload.SourceSHA, s.urlProvider)
if err != nil {
return nil, err
}
return &PullReqCommentPayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerPullReqCommentCreated,
Repo: targetRepoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
PullReqSegment: PullReqSegment{
PullReq: pullReqInfoFrom(ctx, pr, targetRepo, s.urlProvider),
},
PullReqTargetReferenceSegment: PullReqTargetReferenceSegment{
TargetRef: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.TargetBranch,
Repo: targetRepoInfo,
},
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.SourceBranch,
Repo: sourceRepoInfo,
},
},
ReferenceDetailsSegment: ReferenceDetailsSegment{
SHA: event.Payload.SourceSHA,
Commit: &commitInfo,
HeadCommit: &commitInfo,
},
PullReqCommentSegment: PullReqCommentSegment{
CommentInfo: CommentInfo{
Text: activity.Text,
ID: activity.ID,
ParentID: activity.ParentID,
Kind: activity.Kind,
Created: activity.Created,
Updated: activity.Updated,
},
CodeCommentInfo: extractCodeCommentInfoIfAvailable(activity),
},
}, nil
})
}
// handleEventPullReqCommentUpdated handles updated events for pull request comments.
func (s *Service) handleEventPullReqCommentUpdated(
ctx context.Context,
event *events.Event[*pullreqevents.CommentUpdatedPayload],
) error {
return s.triggerForEventWithPullReq(
ctx,
enum.WebhookTriggerPullReqCommentUpdated,
event.ID,
event.Payload.PrincipalID,
event.Payload.PullReqID,
func(principal *types.Principal, pr *types.PullReq, targetRepo, sourceRepo *types.Repository) (any, error) {
targetRepoInfo := repositoryInfoFrom(ctx, targetRepo, s.urlProvider)
sourceRepoInfo := repositoryInfoFrom(ctx, sourceRepo, s.urlProvider)
activity, err := s.activityStore.Find(ctx, event.Payload.ActivityID)
if err != nil {
return nil, fmt.Errorf(
"failed to get activity by id for acitivity id %d: %w",
event.Payload.ActivityID,
err,
)
}
return &PullReqCommentPayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerPullReqCommentUpdated,
Repo: targetRepoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
PullReqSegment: PullReqSegment{
PullReq: pullReqInfoFrom(ctx, pr, targetRepo, s.urlProvider),
},
PullReqTargetReferenceSegment: PullReqTargetReferenceSegment{
TargetRef: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.TargetBranch,
Repo: targetRepoInfo,
},
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.SourceBranch,
Repo: sourceRepoInfo,
},
},
PullReqCommentSegment: PullReqCommentSegment{
CommentInfo: CommentInfo{
Text: activity.Text,
ID: activity.ID,
ParentID: activity.ParentID,
Created: activity.Created,
Updated: activity.Updated,
Kind: activity.Kind,
},
CodeCommentInfo: extractCodeCommentInfoIfAvailable(activity),
},
}, nil
})
}
func extractCodeCommentInfoIfAvailable(
activity *types.PullReqActivity,
) *CodeCommentInfo {
if !activity.IsValidCodeComment() {
return nil
}
return (*CodeCommentInfo)(activity.CodeComment)
}
// PullReqLabelAssignedPayload describes the body of the pullreq label assignment trigger.
type PullReqLabelAssignedPayload struct {
BaseSegment
PullReqSegment
PullReqLabelSegment
}
func (s *Service) handleEventPullReqLabelAssigned(
ctx context.Context,
event *events.Event[*pullreqevents.LabelAssignedPayload],
) error {
return s.triggerForEventWithPullReq(
ctx,
enum.WebhookTriggerPullReqLabelAssigned,
event.ID, event.Payload.PrincipalID,
event.Payload.PullReqID,
func(
principal *types.Principal,
pr *types.PullReq,
targetRepo,
_ *types.Repository,
) (any, error) {
label, err := s.labelStore.FindByID(ctx, event.Payload.LabelID)
if err != nil {
return nil, fmt.Errorf("failed to find label by id: %w", err)
}
var labelValue *string
if event.Payload.ValueID != nil {
value, err := s.labelValueStore.FindByID(ctx, *event.Payload.ValueID)
if err != nil {
return nil, fmt.Errorf("failed to find label value by id: %d %w", *event.Payload.ValueID, err)
}
labelValue = &value.Value
}
targetRepoInfo := repositoryInfoFrom(ctx, targetRepo, s.urlProvider)
return &PullReqLabelAssignedPayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerPullReqLabelAssigned,
Repo: targetRepoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
PullReqSegment: PullReqSegment{
PullReq: pullReqInfoFrom(ctx, pr, targetRepo, s.urlProvider),
},
PullReqLabelSegment: PullReqLabelSegment{
LabelInfo: LabelInfo{
ID: event.Payload.LabelID,
Key: label.Key,
ValueID: event.Payload.ValueID,
Value: labelValue,
},
},
}, nil
})
}
// PullReqUpdatedPayload describes the body of the pullreq updated trigger.
type PullReqUpdatedPayload struct {
BaseSegment
PullReqSegment
PullReqTargetReferenceSegment
ReferenceSegment
PullReqUpdateSegment
}
// handleEventPullReqUpdated handles updated events for pull requests
// and triggers pullreq updated webhooks for the target repo.
func (s *Service) handleEventPullReqUpdated(
ctx context.Context,
event *events.Event[*pullreqevents.UpdatedPayload],
) error {
return s.triggerForEventWithPullReq(ctx, enum.WebhookTriggerPullReqUpdated,
event.ID, event.Payload.PrincipalID, event.Payload.PullReqID,
func(principal *types.Principal, pr *types.PullReq, targetRepo, sourceRepo *types.Repository) (any, error) {
targetRepoInfo := repositoryInfoFrom(ctx, targetRepo, s.urlProvider)
sourceRepoInfo := repositoryInfoFrom(ctx, sourceRepo, s.urlProvider)
return &PullReqUpdatedPayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerPullReqUpdated,
Repo: targetRepoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
PullReqSegment: PullReqSegment{
PullReq: pullReqInfoFrom(ctx, pr, targetRepo, s.urlProvider),
},
PullReqTargetReferenceSegment: PullReqTargetReferenceSegment{
TargetRef: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.TargetBranch,
Repo: targetRepoInfo,
},
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.SourceBranch,
Repo: sourceRepoInfo,
},
},
PullReqUpdateSegment: PullReqUpdateSegment{
TitleChanged: event.Payload.TitleChanged,
TitleOld: event.Payload.TitleOld,
TitleNew: event.Payload.TitleNew,
DescriptionChanged: event.Payload.DescriptionChanged,
DescriptionOld: event.Payload.DescriptionOld,
DescriptionNew: event.Payload.DescriptionNew,
},
}, nil
})
}
// PullReqActivityStatusUpdatedPayload describes the body of the pullreq comment updated trigger.
type PullReqActivityStatusUpdatedPayload struct {
BaseSegment
PullReqSegment
PullReqTargetReferenceSegment
ReferenceSegment
PullReqCommentSegment
PullReqCommentStatusUpdatedSegment
}
// handleEventPullReqCommentStatusUpdated handles status updated events for pull request comments.
func (s *Service) handleEventPullReqCommentStatusUpdated(
ctx context.Context,
event *events.Event[*pullreqevents.CommentStatusUpdatedPayload],
) error {
return s.triggerForEventWithPullReq(
ctx,
enum.WebhookTriggerPullReqCommentStatusUpdated,
event.ID,
event.Payload.PrincipalID,
event.Payload.PullReqID,
func(principal *types.Principal, pr *types.PullReq, targetRepo, sourceRepo *types.Repository) (any, error) {
targetRepoInfo := repositoryInfoFrom(ctx, targetRepo, s.urlProvider)
sourceRepoInfo := repositoryInfoFrom(ctx, sourceRepo, s.urlProvider)
activity, err := s.activityStore.Find(ctx, event.Payload.ActivityID)
if err != nil {
return nil, fmt.Errorf(
"failed to get activity by id for acitivity id %d: %w",
event.Payload.ActivityID,
err,
)
}
status := enum.PullReqCommentStatusActive
if activity.Resolved != nil {
status = enum.PullReqCommentStatusResolved
}
return &PullReqActivityStatusUpdatedPayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerPullReqCommentStatusUpdated,
Repo: targetRepoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
PullReqSegment: PullReqSegment{
PullReq: pullReqInfoFrom(ctx, pr, targetRepo, s.urlProvider),
},
PullReqTargetReferenceSegment: PullReqTargetReferenceSegment{
TargetRef: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.TargetBranch,
Repo: targetRepoInfo,
},
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.SourceBranch,
Repo: sourceRepoInfo,
},
},
PullReqCommentSegment: PullReqCommentSegment{
CommentInfo: CommentInfo{
ID: activity.ID,
Text: activity.Text,
Kind: activity.Kind,
ParentID: activity.ParentID,
Created: activity.Created,
Updated: activity.Updated,
},
CodeCommentInfo: extractCodeCommentInfoIfAvailable(activity),
},
PullReqCommentStatusUpdatedSegment: PullReqCommentStatusUpdatedSegment{
Status: status,
},
}, nil
})
}
// PullReqReviewSubmittedPayload describes the body of the pullreq review submitted trigger.
type PullReqReviewSubmittedPayload struct {
BaseSegment
PullReqSegment
PullReqTargetReferenceSegment
ReferenceSegment
PullReqReviewSegment
}
// handleEventPullReqReviewSubmitted handles review events for pull requests
// and triggers pullreq review submitted webhooks for the target repo.
func (s *Service) handleEventPullReqReviewSubmitted(
ctx context.Context,
event *events.Event[*pullreqevents.ReviewSubmittedPayload],
) error {
return s.triggerForEventWithPullReq(
ctx,
enum.WebhookTriggerPullReqReviewSubmitted,
event.ID, event.Payload.PrincipalID,
event.Payload.PullReqID,
func(
principal *types.Principal,
pr *types.PullReq,
targetRepo,
sourceRepo *types.Repository,
) (any, error) {
targetRepoInfo := repositoryInfoFrom(ctx, targetRepo, s.urlProvider)
sourceRepoInfo := repositoryInfoFrom(ctx, sourceRepo, s.urlProvider)
reviewer, err := s.WebhookExecutor.FindPrincipalForEvent(ctx, event.Payload.ReviewerID)
if err != nil {
return nil, fmt.Errorf("failed to get reviewer by id for reviewer id %d: %w", event.Payload.ReviewerID, err)
}
return &PullReqReviewSubmittedPayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerPullReqReviewSubmitted,
Repo: targetRepoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
PullReqSegment: PullReqSegment{
PullReq: pullReqInfoFrom(ctx, pr, targetRepo, s.urlProvider),
},
PullReqTargetReferenceSegment: PullReqTargetReferenceSegment{
TargetRef: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.TargetBranch,
Repo: targetRepoInfo,
},
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.SourceBranch,
Repo: sourceRepoInfo,
},
},
PullReqReviewSegment: PullReqReviewSegment{
ReviewDecision: event.Payload.Decision,
ReviewerInfo: principalInfoFrom(reviewer.ToPrincipalInfo()),
},
}, nil
})
}
// PullReqTargetBranchChangedPayload describes the body of the pullreq target branch changed trigger.
type PullReqTargetBranchChangedPayload struct {
BaseSegment
PullReqSegment
PullReqTargetReferenceSegment
ReferenceSegment
ReferenceDetailsSegment
PullReqTargetBrancheChangedSegment
}
// handleEventPullReqTargetBranchChanged handles pullreq target branch changed events
// and triggers pullreq target branch changed webhooks.
func (s *Service) handleEventPullReqTargetBranchChanged(
ctx context.Context,
event *events.Event[*pullreqevents.TargetBranchChangedPayload],
) error {
return s.triggerForEventWithPullReq(
ctx,
enum.WebhookTriggerPullReqTargetBranchChanged,
event.ID,
event.Payload.PrincipalID,
event.Payload.PullReqID,
func(
principal *types.Principal,
pr *types.PullReq,
targetRepo, sourceRepo *types.Repository,
) (any, error) {
commitInfo, err := s.fetchCommitInfoForEvent(ctx, sourceRepo.GitUID, sourceRepo.Path,
event.Payload.SourceSHA, s.urlProvider)
if err != nil {
return nil, err
}
targetRepoInfo := repositoryInfoFrom(ctx, targetRepo, s.urlProvider)
sourceRepoInfo := repositoryInfoFrom(ctx, sourceRepo, s.urlProvider)
return &PullReqTargetBranchChangedPayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerPullReqTargetBranchChanged,
Repo: targetRepoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
PullReqSegment: PullReqSegment{
PullReq: pullReqInfoFrom(ctx, pr, targetRepo, s.urlProvider),
},
PullReqTargetReferenceSegment: PullReqTargetReferenceSegment{
TargetRef: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.TargetBranch,
Repo: targetRepoInfo,
},
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: gitReferenceNamePrefixBranch + pr.SourceBranch,
Repo: sourceRepoInfo,
},
},
ReferenceDetailsSegment: ReferenceDetailsSegment{
SHA: event.Payload.SourceSHA,
Commit: &commitInfo,
HeadCommit: &commitInfo,
},
PullReqTargetBrancheChangedSegment: PullReqTargetBrancheChangedSegment{
OldTargetBranch: event.Payload.OldTargetBranch,
OldMergeBaseSHA: event.Payload.OldMergeBaseSHA,
},
}, nil
})
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/http_client.go | app/services/webhook/http_client.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"time"
"github.com/rs/zerolog/log"
)
var (
errLoopbackNotAllowed = errors.New("loopback not allowed")
errPrivateNetworkNotAllowed = errors.New("private network not allowed")
)
func newHTTPClient(allowLoopback bool, allowPrivateNetwork bool, disableSSLVerification bool) *http.Client {
// Clone http.DefaultTransport (used by http.DefaultClient)
tr := http.DefaultTransport.(*http.Transport).Clone() //nolint:errcheck
tr.TLSClientConfig.InsecureSkipVerify = disableSSLVerification
// create basic net.Dialer (Similar to what is used by http.DefaultTransport)
dialer := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}
// overwrite DialContext method to block sending data to localhost
// NOTE: this doesn't block establishing the connection, but closes it before data is send.
// WARNING: this allows scanning of IP addresses based on error types.
tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
// dial connection using
con, err := dialer.DialContext(ctx, network, addr)
if err != nil {
return nil, err
}
// by default close connection unless explicitly marked to keep it
keepConnection := false
defer func() {
// if we decided to keep the connection, nothing to do
if keepConnection {
return
}
// otherwise best effort close connection
cErr := con.Close()
if cErr != nil {
log.Ctx(ctx).Warn().Err(err).
Msgf("failed to close potentially malicious connection to '%s' (resolved: '%s')",
addr, con.RemoteAddr())
}
}()
// ensure a tcp address got established and close if it's localhost or private
tcpAddr, ok := con.RemoteAddr().(*net.TCPAddr)
if !ok {
// not expected to happen, but to be sure
return nil, fmt.Errorf("address resolved to a non-TCP address (original: '%s', resolved: '%s')",
addr, con.RemoteAddr())
}
if !allowLoopback && tcpAddr.IP.IsLoopback() {
return nil, errLoopbackNotAllowed
}
if !allowPrivateNetwork && tcpAddr.IP.IsPrivate() {
return nil, errPrivateNetworkNotAllowed
}
// otherwise keep connection
keepConnection = true
return con, nil
}
// httpClient is similar to http.DefaultClient, just with custom http.Transport
return &http.Client{
Transport: tr,
CheckRedirect: func(*http.Request, []*http.Request) error {
return http.ErrUseLastResponse
},
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/create.go | app/services/webhook/create.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store/database/migrate"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/check"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
const webhookScopeRepo = int64(0)
func (s *Service) sanitizeCreateInput(in *types.WebhookCreateInput, internal bool) error {
// TODO [CODE-1363]: remove after identifier migration.
if in.Identifier == "" {
in.Identifier = in.UID
}
// backfill required data - during migration period we have to accept both, displayname only and identifier only
// TODO [CODE-1364]: Remove once UID/Identifier migration is completed
if in.DisplayName == "" && in.Identifier != "" {
in.DisplayName = in.Identifier
}
if in.Identifier == "" && in.DisplayName != "" {
var err error
in.Identifier, err = migrate.WebhookDisplayNameToIdentifier(in.DisplayName, false)
if err != nil {
return fmt.Errorf("failed to migrate webhook displayname %q to identifier: %w", in.DisplayName, err)
}
}
if err := check.Identifier(in.Identifier); err != nil {
return err
}
if err := check.DisplayName(in.DisplayName); err != nil {
return err
}
if err := check.Description(in.Description); err != nil {
return err
}
if err := CheckURL(in.URL, s.config.AllowLoopback, s.config.AllowPrivateNetwork, internal); err != nil {
return err
}
if err := CheckSecret(in.Secret); err != nil {
return err
}
if err := CheckTriggers(in.Triggers); err != nil { //nolint:revive
return err
}
if err := CheckExtraHeaders(in.ExtraHeaders); err != nil {
return err
}
return nil
}
func (s *Service) Create(
ctx context.Context,
principal *types.Principal,
typ enum.WebhookType,
parentResource ParentResource,
in *types.WebhookCreateInput,
) (*types.Webhook, error) {
err := s.sanitizeCreateInput(in, typ == enum.WebhookTypeInternal)
if err != nil {
return nil, err
}
encryptedSecret, err := s.encrypter.Encrypt(in.Secret)
if err != nil {
return nil, fmt.Errorf("failed to encrypt webhook secret: %w", err)
}
scope := webhookScopeRepo
if parentResource.Type == enum.WebhookParentSpace {
scope, err = s.spaceStore.GetTreeLevel(ctx, parentResource.ID)
if err != nil {
return nil, fmt.Errorf("failed to get parent tree level: %w", err)
}
}
now := time.Now().UnixMilli()
// create new webhook object
hook := &types.Webhook{
ID: 0, // the ID will be populated in the data layer
Version: 0, // the Version will be populated in the data layer
CreatedBy: principal.ID,
Created: now,
Updated: now,
ParentID: parentResource.ID,
ParentType: parentResource.Type,
Type: typ,
Scope: scope,
// user input
Identifier: in.Identifier,
DisplayName: in.DisplayName,
Description: in.Description,
URL: in.URL,
Secret: string(encryptedSecret),
Enabled: in.Enabled,
Insecure: in.Insecure,
Triggers: DeduplicateTriggers(in.Triggers),
LatestExecutionResult: nil,
ExtraHeaders: in.ExtraHeaders,
}
err = s.webhookStore.Create(ctx, hook)
// internal hooks are hidden from non-internal read requests - properly communicate their existence on duplicate.
// This is the best effort, any error we just ignore and fallback to original duplicate error.
if errors.Is(err, store.ErrDuplicate) && (typ != enum.WebhookTypeInternal) {
existingHook, derr := s.webhookStore.FindByIdentifier(
ctx, enum.WebhookParentRepo, parentResource.ID, hook.Identifier)
if derr != nil {
log.Ctx(ctx).Warn().Err(derr).Msgf(
"failed to retrieve webhook for repo %d with identifier %q on duplicate error",
parentResource.ID,
hook.Identifier,
)
}
if derr == nil && existingHook.Type == enum.WebhookTypeInternal {
return nil, errors.Conflict("The provided identifier is reserved for internal purposes.")
}
}
if err != nil {
return nil, fmt.Errorf("failed to store webhook: %w", err)
}
if shouldAuditWebhook(typ) {
resourceType, nameKey := getWebhookAuditInfo(parentResource.Type)
err = s.auditService.Log(ctx,
*principal,
audit.NewResource(resourceType, hook.Identifier, nameKey, parentResource.Identifier),
audit.ActionCreated,
parentResource.Path,
audit.WithNewObject(hook),
)
if err != nil {
log.Ctx(ctx).Warn().Msgf("failed to insert audit log for create webhook operation: %s", err)
}
}
s.sendSSE(ctx, parentResource, enum.SSETypeWebhookCreated, hook)
return hook, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/handler_tag.go | app/services/webhook/handler_tag.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
gitevents "github.com/harness/gitness/app/events/git"
"github.com/harness/gitness/events"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
// handleEventTagCreated handles tag created events
// and triggers tag created webhooks for the source repo.
func (s *Service) handleEventTagCreated(ctx context.Context,
event *events.Event[*gitevents.TagCreatedPayload]) error {
return s.triggerForEventWithRepo(ctx, enum.WebhookTriggerTagCreated,
event.ID, event.Payload.PrincipalID, event.Payload.RepoID,
func(principal *types.Principal, repo *types.Repository) (any, error) {
commitInfo, err := s.fetchCommitInfoForEvent(ctx, repo.GitUID, repo.Path, event.Payload.SHA, s.urlProvider)
if err != nil {
return nil, err
}
repoInfo := repositoryInfoFrom(ctx, repo, s.urlProvider)
return &ReferencePayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerTagCreated,
Repo: repoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: event.Payload.Ref,
Repo: repoInfo,
},
},
ReferenceDetailsSegment: ReferenceDetailsSegment{
SHA: event.Payload.SHA,
Commit: &commitInfo,
HeadCommit: &commitInfo,
},
ReferenceUpdateSegment: ReferenceUpdateSegment{
OldSHA: types.NilSHA,
Forced: false,
},
}, nil
})
}
// handleEventTagUpdated handles tag updated events
// and triggers tag updated webhooks for the source repo.
func (s *Service) handleEventTagUpdated(ctx context.Context,
event *events.Event[*gitevents.TagUpdatedPayload]) error {
return s.triggerForEventWithRepo(ctx, enum.WebhookTriggerTagUpdated,
event.ID, event.Payload.PrincipalID, event.Payload.RepoID,
func(principal *types.Principal, repo *types.Repository) (any, error) {
commitsInfo, totalCommits, err := s.fetchCommitsInfoForEvent(ctx, repo.GitUID, repo.Path,
event.Payload.OldSHA, event.Payload.NewSHA, s.urlProvider)
if err != nil {
return nil, err
}
commitInfo := CommitInfo{}
if len(commitsInfo) > 0 {
commitInfo = commitsInfo[0]
}
repoInfo := repositoryInfoFrom(ctx, repo, s.urlProvider)
return &ReferencePayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerTagUpdated,
Repo: repoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: event.Payload.Ref,
Repo: repoInfo,
},
},
ReferenceDetailsSegment: ReferenceDetailsSegment{
SHA: event.Payload.NewSHA,
Commit: &commitInfo,
HeadCommit: &commitInfo,
Commits: &commitsInfo,
TotalCommitsCount: totalCommits,
},
ReferenceUpdateSegment: ReferenceUpdateSegment{
OldSHA: event.Payload.OldSHA,
Forced: event.Payload.Forced,
},
}, nil
})
}
// handleEventTagDeleted handles tag deleted events
// and triggers tag deleted webhooks for the source repo.
func (s *Service) handleEventTagDeleted(ctx context.Context,
event *events.Event[*gitevents.TagDeletedPayload]) error {
return s.triggerForEventWithRepo(ctx, enum.WebhookTriggerTagDeleted,
event.ID, event.Payload.PrincipalID, event.Payload.RepoID,
func(principal *types.Principal, repo *types.Repository) (any, error) {
repoInfo := repositoryInfoFrom(ctx, repo, s.urlProvider)
return &ReferencePayload{
BaseSegment: BaseSegment{
Trigger: enum.WebhookTriggerTagDeleted,
Repo: repoInfo,
Principal: principalInfoFrom(principal.ToPrincipalInfo()),
},
ReferenceSegment: ReferenceSegment{
Ref: ReferenceInfo{
Name: event.Payload.Ref,
Repo: repoInfo,
},
},
ReferenceDetailsSegment: ReferenceDetailsSegment{
SHA: types.NilSHA,
Commit: nil,
},
ReferenceUpdateSegment: ReferenceUpdateSegment{
OldSHA: event.Payload.SHA,
Forced: false,
},
}, nil
})
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/trigger.go | app/services/webhook/trigger.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"slices"
"time"
gitnessstore "github.com/harness/gitness/app/store"
"github.com/harness/gitness/crypto"
"github.com/harness/gitness/secret"
"github.com/harness/gitness/store"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/harness/gitness/version"
"github.com/rs/zerolog/log"
)
const (
// webhookTimeLimit defines the time limit of a single webhook execution.
// This is similar to other SCM providers.
webhookTimeLimit = 10 * time.Second
// responseHeadersBytesLimit defines the maximum number of bytes processed from the webhook response headers.
responseHeadersBytesLimit = 1024
// responseBodyBytesLimit defines the maximum number of bytes processed from the webhook response body.
responseBodyBytesLimit = 1024
// maskedHeaderValue is the value used to mask sensitive header values in execution history.
maskedHeaderValue = "******"
)
const (
RepoTrigger = "Trigger"
ArtifactRegistryTrigger = "Artifact-Registry-Trigger"
)
var (
// ErrWebhookNotRetriggerable is returned in case the webhook can't be retriggered due to an incomplete execution.
// This should only occur if we failed to generate the request body (most likely out of memory).
ErrWebhookNotRetriggerable = errors.New("webhook execution is incomplete and can't be retriggered")
)
type TriggerResult struct {
TriggerID string
TriggerType enum.WebhookTrigger
Webhook *types.WebhookCore
Execution *types.WebhookExecutionCore
Err error
}
func (r *TriggerResult) Skipped() bool {
return r.Execution == nil
}
func (w *WebhookExecutor) triggerWebhooksFor(
ctx context.Context,
parents []types.WebhookParentInfo,
triggerID string,
triggerType enum.WebhookTrigger,
body any,
) ([]TriggerResult, error) {
webhooks, err := w.webhookExecutorStore.ListWebhooks(ctx, parents)
if err != nil {
return nil, fmt.Errorf("failed to list webhooks for: %w", err)
}
return w.triggerWebhooks(ctx, webhooks, triggerID, triggerType, body)
}
//nolint:gocognit // refactor if needed
func (w *WebhookExecutor) triggerWebhooks(
ctx context.Context, webhooks []*types.WebhookCore,
triggerID string, triggerType enum.WebhookTrigger, body any,
) ([]TriggerResult, error) {
// return immediately if webhooks are empty
if len(webhooks) == 0 {
return []TriggerResult{}, nil
}
// get all previous execution for the same trigger
executions, err := w.webhookExecutorStore.ListForTrigger(ctx, triggerID)
if err != nil && !errors.Is(err, store.ErrResourceNotFound) {
return nil, fmt.Errorf("failed to get executions for trigger '%s'", triggerID)
}
// precalculate whether a webhook should be executed
skipExecution := make(map[int64]bool)
for _, execution := range executions {
// skip execution in case of success or unrecoverable error
if execution.Result == enum.WebhookExecutionResultSuccess ||
execution.Result == enum.WebhookExecutionResultFatalError {
skipExecution[execution.WebhookID] = true
}
}
results := make([]TriggerResult, len(webhooks))
for i, webhook := range webhooks {
results[i] = TriggerResult{
TriggerID: triggerID,
TriggerType: triggerType,
Webhook: webhook,
}
// check if webhook is disabled
if !webhook.Enabled {
continue
}
// check if webhook already got executed (success or fatal error)
if skipExecution[webhook.ID] {
continue
}
// check if webhook is registered for trigger (empty list => all triggers are registered)
triggerRegistered := len(webhook.Triggers) == 0 || slices.Contains(webhook.Triggers, triggerType)
if !triggerRegistered {
continue
}
// execute trigger and store output in result
results[i].Execution, results[i].Err = w.executeWebhook(ctx, webhook, triggerID, triggerType, body, nil)
}
return results, nil
}
func (w *WebhookExecutor) RetriggerWebhookExecution(
ctx context.Context,
webhookExecutionID int64,
) (*TriggerResult, error) {
// find execution
webhookExecution, err := w.webhookExecutorStore.Find(ctx, webhookExecutionID)
if err != nil {
return nil, fmt.Errorf("failed to find webhook execution with id %d: %w", webhookExecutionID, err)
}
// ensure webhook can be retriggered
if !webhookExecution.Retriggerable {
return nil, ErrWebhookNotRetriggerable
}
// find webhook
webhook, err := w.webhookExecutorStore.FindWebhook(ctx, webhookExecution.WebhookID)
if err != nil {
return nil, fmt.Errorf("failed to find webhook with id %d: %w", webhookExecution.WebhookID, err)
}
// reuse same trigger id as original execution
triggerID := webhookExecution.TriggerID
triggerType := webhookExecution.TriggerType
// pass body explicitly
body := &bytes.Buffer{}
// NOTE: bBuff.Write(v) will always return (len(v), nil) - no need to error handle
body.WriteString(webhookExecution.Request.Body)
newExecution, err := w.executeWebhook(ctx, webhook, triggerID, triggerType, body, &webhookExecution.ID)
return &TriggerResult{
TriggerID: triggerID,
TriggerType: triggerType,
Webhook: webhook,
Execution: newExecution,
Err: err,
}, nil
}
//nolint:gocognit // refactor into smaller chunks if necessary.
func (w *WebhookExecutor) executeWebhook(
ctx context.Context, webhook *types.WebhookCore, triggerID string,
triggerType enum.WebhookTrigger, body any, rerunOfID *int64,
) (*types.WebhookExecutionCore, error) {
// build execution entry on the fly (save no matter what)
execution := types.WebhookExecutionCore{
RetriggerOf: rerunOfID,
WebhookID: webhook.ID,
TriggerID: triggerID,
TriggerType: triggerType,
// for unexpected errors we don't retry - protect the system. User can retrigger manually (if body was set)
Result: enum.WebhookExecutionResultFatalError,
Error: "An unknown error occurred",
}
defer func(oCtx context.Context, start time.Time) {
// set total execution time
execution.Duration = int64(time.Since(start))
execution.Created = time.Now().UnixMilli()
// TODO: what if saving execution failed? For now we will rerun it in case of error or not show it in history
err := w.webhookExecutorStore.CreateWebhookExecution(oCtx, &execution)
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msgf(
"failed to store webhook execution that ended with Result: %s, Response.Status: '%s', Error: '%s'",
execution.Result, execution.Response.Status, execution.Error)
}
// update latest execution result of webhook IFF it's different from before (best effort)
if webhook.LatestExecutionResult == nil || *webhook.LatestExecutionResult != execution.Result {
_, err = w.webhookExecutorStore.UpdateOptLock(oCtx, webhook, &execution)
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msgf(
"failed to update latest execution result to %s for webhook %d",
execution.Result, webhook.ID)
}
}
}(ctx, time.Now())
// derive context with time limit
ctx, cancel := context.WithTimeout(ctx, webhookTimeLimit)
defer cancel()
// create request from webhook and body
req, err := w.prepareHTTPRequest(ctx, &execution, triggerType, webhook, body)
if err != nil {
return &execution, err
}
// Execute HTTP Request (insecure if requested)
var resp *http.Response
switch {
case webhook.Type == enum.WebhookTypeInternal && webhook.Insecure:
resp, err = w.insecureHTTPClientInternal.Do(req)
case webhook.Type == enum.WebhookTypeInternal:
resp, err = w.secureHTTPClientInternal.Do(req)
case webhook.Insecure:
resp, err = w.insecureHTTPClient.Do(req)
default:
resp, err = w.secureHTTPClient.Do(req)
}
// always close the body!
if resp != nil && resp.Body != nil {
defer func() {
err = resp.Body.Close()
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msgf("failed to close body after webhook execution %d", execution.ID)
}
}()
}
// handle certain errors explicitly to give more to-the-point error messages
var dnsError *net.DNSError
switch {
case errors.Is(err, context.DeadlineExceeded):
// we assume timeout without any response is not worth retrying - protect the system
tErr := fmt.Errorf("request exceeded time limit of %s", webhookTimeLimit)
execution.Error = tErr.Error()
execution.Result = enum.WebhookExecutionResultFatalError
return &execution, tErr
case errors.As(err, &dnsError) && dnsError.IsNotFound:
// this error is assumed unrecoverable - mark status accordingly and fail execution
execution.Error = fmt.Sprintf("host '%s' was not found", dnsError.Name)
execution.Result = enum.WebhookExecutionResultFatalError
return &execution, fmt.Errorf("failed to resolve host name '%s': %w", dnsError.Name, err)
case err != nil:
// for all other errors we don't retry - protect the system. User can retrigger manually (if body was set)
tErr := fmt.Errorf("an error occurred while sending the request: %w", err)
execution.Error = tErr.Error()
execution.Result = enum.WebhookExecutionResultFatalError
return &execution, tErr
}
// handle response
err = handleWebhookResponse(&execution, resp)
return &execution, err
}
// prepareHTTPRequest prepares a new http.Request object for the webhook using the provided body as request body.
// All execution.Request.XXX values are set accordingly.
// NOTE: if the body is an io.Reader, the value is used as response body as is, otherwise it'll be JSON serialized.
func (w *WebhookExecutor) prepareHTTPRequest(
ctx context.Context, execution *types.WebhookExecutionCore,
triggerType enum.WebhookTrigger, webhook *types.WebhookCore, body any,
) (*http.Request, error) {
url, err := w.webhookURLProvider.GetWebhookURL(ctx, webhook)
if err != nil {
return nil, fmt.Errorf("webhook url is not resolvable: %w", err)
}
execution.Request.URL = url
// Serialize body before anything else.
// This allows the user to retrigger the execution even in case of bad URL.
bBuff := &bytes.Buffer{}
switch v := body.(type) {
case io.Reader:
// if it's already an io.Reader - use value as is and don't serialize (allows to provide custom body)
// NOTE: reader can be read only once - read and store it in buffer to allow storing it in execution object
// and generate hmac.
bBytes, err := io.ReadAll(v)
if err != nil {
// ASSUMPTION: there was an issue with the static user input, not retriable
tErr := fmt.Errorf("failed to generate request body: %w", err)
execution.Error = tErr.Error()
execution.Result = enum.WebhookExecutionResultFatalError
return nil, tErr
}
// NOTE: bBuff.Write(v) will always return (len(v), nil) - no need to error handle
bBuff.Write(bBytes)
default:
// all other types we json serialize
err := json.NewEncoder(bBuff).Encode(body)
if err != nil {
// this is an internal issue, nothing the user can do - don't expose error details
execution.Error = "an error occurred preparing the request body"
execution.Result = enum.WebhookExecutionResultFatalError
return nil, fmt.Errorf("failed to serialize body to json: %w", err)
}
}
// set executioon body and mark it as retriggerable
execution.Request.Body = bBuff.String()
execution.Retriggerable = true
// create request (url + body)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bBuff)
if err != nil {
// ASSUMPTION: there was an issue with the static user input, not retriable
tErr := fmt.Errorf("failed to create request: %w", err)
execution.Error = tErr.Error()
execution.Result = enum.WebhookExecutionResultFatalError
return nil, tErr
}
// Always add Extra headers first so that system headers are not overwritten
for _, h := range webhook.ExtraHeaders {
req.Header.Add(h.Key, h.Value)
}
// setup headers
req.Header.Add("User-Agent", fmt.Sprintf("%s/%s", w.config.UserAgentIdentity, version.Version))
req.Header.Add("Content-Type", "application/json")
req.Header.Add(w.toXHeader("Webhook-Parent-Type"), string(webhook.ParentType))
req.Header.Add(w.toXHeader("Webhook-Parent-Id"), fmt.Sprint(webhook.ParentID))
// TODO [CODE-1363]: remove after identifier migration.
req.Header.Add(w.toXHeader("Webhook-Uid"), fmt.Sprint(webhook.Identifier))
req.Header.Add(w.toXHeader("Webhook-Identifier"), fmt.Sprint(webhook.Identifier))
req.Header.Add(w.toXHeader(w.source), string(triggerType))
var secretValue string
//nolint:gocritic
if webhook.Type == enum.WebhookTypeInternal {
secretValue = w.config.InternalSecret
} else if webhook.Secret != "" {
decryptedSecret, err := w.encrypter.Decrypt([]byte(webhook.Secret))
if err != nil {
return nil, fmt.Errorf("failed to decrypt webhook secret: %w", err)
}
secretValue = decryptedSecret
} else if webhook.SecretIdentifier != "" {
decryptedSecret, err := getSecretValue(ctx, w.spacePathStore, w.secretService,
webhook.SecretSpaceID, webhook.SecretIdentifier)
if err != nil {
return nil, fmt.Errorf("failed get secret secret value: %w", err)
}
secretValue = decryptedSecret
}
// add HMAC only if a secret was provided
if secretValue != "" {
var hmac string
hmac, err = crypto.GenerateHMACSHA256(bBuff.Bytes(), []byte(secretValue))
if err != nil {
return nil, fmt.Errorf("failed to generate SHA256 based HMAC: %w", err)
}
req.Header.Add(w.toXHeader("Signature"), hmac)
}
// Create a copy of headers for execution history with masked values
headersForExecution := req.Header.Clone()
if webhook.ExtraHeaders != nil {
for _, h := range webhook.ExtraHeaders {
if h.Masked {
headersForExecution.Set(h.Key, maskedHeaderValue)
}
}
}
hBuffer := &bytes.Buffer{}
err = headersForExecution.Write(hBuffer)
if err != nil {
tErr := fmt.Errorf("failed to write request headers: %w", err)
execution.Error = tErr.Error()
execution.Result = enum.WebhookExecutionResultRetriableError
return nil, tErr
}
execution.Request.Headers = hBuffer.String()
return req, nil
}
func (w *WebhookExecutor) toXHeader(name string) string {
return fmt.Sprintf("X-%s-%s", w.config.HeaderIdentity, name)
}
//nolint:funlen // refactor if needed
func handleWebhookResponse(execution *types.WebhookExecutionCore, resp *http.Response) error {
// store status (handle status later - want to first read body)
execution.Response.StatusCode = resp.StatusCode
execution.Response.Status = resp.Status
// store response headers
hBuff := &bytes.Buffer{}
err := resp.Header.Write(hBuff)
if err != nil {
tErr := fmt.Errorf("failed to read response headers: %w", err)
execution.Error = tErr.Error()
execution.Result = enum.WebhookExecutionResultRetriableError
return tErr
}
// limit the total number of bytes we store in headers
headerLength := min(hBuff.Len(), responseHeadersBytesLimit)
execution.Response.Headers = string(hBuff.Bytes()[0:headerLength])
// handle body (if exists)
if resp.Body != nil {
// read and store response body
var bodyRaw []byte
bodyRaw, err = io.ReadAll(io.LimitReader(resp.Body, responseBodyBytesLimit))
if err != nil {
tErr := fmt.Errorf("an error occurred while reading the response body: %w", err)
execution.Error = tErr.Error()
execution.Result = enum.WebhookExecutionResultRetriableError
return tErr
}
execution.Response.Body = string(bodyRaw)
}
// Analyze status code
// IMPORTANT: cases are EVALUATED IN ORDER
switch code := resp.StatusCode; {
case code < 200:
// 1XX - server is continuing the processing (call was successful, but not completed yet)
execution.Error = "1xx response codes are not supported"
execution.Result = enum.WebhookExecutionResultFatalError
return fmt.Errorf("received response with unsupported status code %d", code)
case code < 300:
// 2XX - call was successful
execution.Error = ""
execution.Result = enum.WebhookExecutionResultSuccess
return nil
case code < 400:
// 3XX - Redirection (further action is required by the client)
// NOTE: technically we could follow the redirect, but not supported as of now
execution.Error = "3xx response codes are not supported"
execution.Result = enum.WebhookExecutionResultFatalError
return fmt.Errorf("received response with unsupported status code %d", code)
case code == 408:
// 408 - Request Timeout
tErr := errors.New("request timed out")
execution.Error = tErr.Error()
execution.Result = enum.WebhookExecutionResultRetriableError
return tErr
case code == 429:
// 429 - Too Many Requests
tErr := errors.New("request got throttled")
execution.Error = tErr.Error()
execution.Result = enum.WebhookExecutionResultRetriableError
return tErr
case code < 500:
// 4xx - Issue with request (bad request, url too large, ...)
execution.Error = "4xx response codes are not supported (apart from 408 and 429)"
execution.Result = enum.WebhookExecutionResultFatalError
return fmt.Errorf("received response with unrecoverable status code %d", code)
case code == 501:
// 501 - Not Implemented
execution.Error = "remote server does not implement requested action"
execution.Result = enum.WebhookExecutionResultFatalError
return fmt.Errorf("received response with unrecoverable status code %d", code)
case code < 600:
// 5xx - Server Errors
execution.Error = "remote server encountered an error"
execution.Result = enum.WebhookExecutionResultRetriableError
return fmt.Errorf("remote server encountered an error: %d", code)
default:
// >= 600 - No commonly used response status code
execution.Error = "response code not supported"
execution.Result = enum.WebhookExecutionResultFatalError
return fmt.Errorf("received response with unsupported status code %d", code)
}
}
func getSecretValue(
ctx context.Context, spacePathStore gitnessstore.SpacePathStore, secretService secret.Service,
secretSpaceID int64, secretSpacePath string,
) (string, error) {
spacePath, err := spacePathStore.FindPrimaryBySpaceID(ctx, secretSpaceID)
if err != nil {
err = fmt.Errorf("failed to find space path: %w", err)
log.Error().Msg(err.Error())
return "", err
}
decryptedSecret, err := secretService.DecryptSecret(ctx, spacePath.Value, secretSpacePath)
if err != nil {
err = fmt.Errorf("failed to decrypt secret: %w", err)
log.Error().Msg(err.Error())
return "", err
}
return decryptedSecret, nil
}
func CoreWebhookExecutionToGitnessWebhookExecution(execution *types.WebhookExecutionCore) *types.WebhookExecution {
return &types.WebhookExecution{
ID: execution.ID,
WebhookID: execution.WebhookID,
TriggerID: execution.TriggerID,
TriggerType: execution.TriggerType,
Result: execution.Result,
Error: execution.Error,
Request: execution.Request,
Response: execution.Response,
RetriggerOf: execution.RetriggerOf,
Retriggerable: execution.Retriggerable,
Duration: execution.Duration,
Created: execution.Created,
}
}
func GitnessWebhookExecutionToWebhookExecutionCore(execution *types.WebhookExecution) *types.WebhookExecutionCore {
return &types.WebhookExecutionCore{
ID: execution.ID,
WebhookID: execution.WebhookID,
TriggerID: execution.TriggerID,
TriggerType: execution.TriggerType,
Result: execution.Result,
Error: execution.Error,
Request: execution.Request,
Response: execution.Response,
RetriggerOf: execution.RetriggerOf,
Retriggerable: execution.Retriggerable,
Duration: execution.Duration,
Created: execution.Created,
}
}
func GitnessWebhookToWebhookCore(webhook *types.Webhook) *types.WebhookCore {
return &types.WebhookCore{
ID: webhook.ID,
Version: webhook.Version,
ParentID: webhook.ParentID,
ParentType: webhook.ParentType,
CreatedBy: webhook.CreatedBy,
Created: webhook.Created,
Updated: webhook.Updated,
Type: webhook.Type,
Scope: webhook.Scope,
Identifier: webhook.Identifier,
DisplayName: webhook.DisplayName,
Description: webhook.Description,
URL: webhook.URL,
Secret: webhook.Secret,
Enabled: webhook.Enabled,
Insecure: webhook.Insecure,
Triggers: webhook.Triggers,
LatestExecutionResult: webhook.LatestExecutionResult,
ExtraHeaders: webhook.ExtraHeaders,
}
}
func CoreWebhookToGitnessWebhook(webhook *types.WebhookCore) *types.Webhook {
return &types.Webhook{
ID: webhook.ID,
Version: webhook.Version,
ParentID: webhook.ParentID,
ParentType: webhook.ParentType,
CreatedBy: webhook.CreatedBy,
Created: webhook.Created,
Updated: webhook.Updated,
Type: webhook.Type,
Scope: webhook.Scope,
Identifier: webhook.Identifier,
DisplayName: webhook.DisplayName,
Description: webhook.Description,
URL: webhook.URL,
Secret: webhook.Secret,
Enabled: webhook.Enabled,
Insecure: webhook.Insecure,
Triggers: webhook.Triggers,
LatestExecutionResult: webhook.LatestExecutionResult,
ExtraHeaders: webhook.ExtraHeaders,
}
}
func GitnessWebhooksToWebhooksCore(webhooks []*types.Webhook) []*types.WebhookCore {
webhooksCore := make([]*types.WebhookCore, 0)
for _, webhook := range webhooks {
webhookCore := GitnessWebhookToWebhookCore(webhook)
webhooksCore = append(webhooksCore, webhookCore)
}
return webhooksCore
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/ownership.go | app/services/webhook/ownership.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/harness/gitness/errors"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
func (s *Service) Find(
ctx context.Context,
parentID int64,
parentType enum.WebhookParent,
webhookIdentifier string,
) (*types.Webhook, error) {
hook, err := s.GetWebhookVerifyOwnership(ctx, parentID, parentType, webhookIdentifier)
if err != nil {
return nil, errors.NotFoundf("failed to find webhook %s: %q", webhookIdentifier, err)
}
return hook, nil
}
// GetWebhookVerifyOwnership gets the webhook and
// ensures it belongs to the scope with the specified id and type.
func (s *Service) GetWebhookVerifyOwnership(
ctx context.Context,
parentID int64,
parentType enum.WebhookParent,
webhookIdentifier string,
) (*types.Webhook, error) {
// TODO: Remove once webhook identifier migration completed
webhookID, err := strconv.ParseInt(webhookIdentifier, 10, 64)
if (err == nil && webhookID <= 0) || len(strings.TrimSpace(webhookIdentifier)) == 0 {
return nil, errors.InvalidArgument("A valid webhook identifier must be provided.")
}
var webhook *types.Webhook
if err == nil {
webhook, err = s.webhookStore.Find(ctx, webhookID)
} else {
webhook, err = s.webhookStore.FindByIdentifier(
ctx, parentType, parentID, webhookIdentifier)
}
if err != nil {
return nil, fmt.Errorf("failed to find webhook with identifier %q: %w", webhookIdentifier, err)
}
// ensure the webhook actually belongs to the repo
if webhook.ParentType != parentType || webhook.ParentID != parentID {
return nil, errors.NotFoundf("webhook doesn't belong to requested %s.", parentType)
}
return webhook, nil
}
// GetWebhookExecutionVerifyOwnership gets the webhook execution and
// ensures it belongs to the webhook with the specified id.
func (s *Service) GetWebhookExecutionVerifyOwnership(
ctx context.Context,
webhookID int64,
webhookExecutionID int64,
) (*types.WebhookExecution, error) {
if webhookExecutionID <= 0 {
return nil, errors.InvalidArgument("A valid webhook execution ID must be provided.")
}
webhookExecution, err := s.webhookExecutionStore.Find(ctx, webhookExecutionID)
if err != nil {
return nil, fmt.Errorf("failed to find webhook execution with id %d: %w", webhookExecutionID, err)
}
// ensure the webhook execution actually belongs to the webhook
if webhookID != webhookExecution.WebhookID {
return nil, errors.NotFound("webhook execution doesn't belong to requested webhook")
}
return webhookExecution, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/types.go | app/services/webhook/types.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"encoding/json"
"time"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/git"
gitenum "github.com/harness/gitness/git/enum"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
/*
* The idea of segments is to expose similar fields using the same structure.
* This makes consumption on webhook payloads easier as we ensure related webhooks have similar payload formats.
* Segments are meant to be embedded, while Infos are meant to be used as fields.
*/
// BaseSegment contains base info of all payloads for webhooks.
type BaseSegment struct {
Trigger enum.WebhookTrigger `json:"trigger"`
Repo RepositoryInfo `json:"repo"`
Principal PrincipalInfo `json:"principal"`
}
// ReferenceSegment contains the reference info for webhooks.
type ReferenceSegment struct {
Ref ReferenceInfo `json:"ref"`
}
// ReferenceDetailsSegment contains extra details for reference related payloads for webhooks.
type ReferenceDetailsSegment struct {
SHA string `json:"sha"`
HeadCommit *CommitInfo `json:"head_commit,omitempty"`
Commits *[]CommitInfo `json:"commits,omitempty"`
TotalCommitsCount int `json:"total_commits_count,omitempty"`
// Deprecated
Commit *CommitInfo `json:"commit,omitempty"`
}
// ReferenceUpdateSegment contains extra details for reference update related payloads for webhooks.
type ReferenceUpdateSegment struct {
OldSHA string `json:"old_sha"`
Forced bool `json:"forced"`
}
// PullReqTargetReferenceSegment contains details for the pull req target reference for webhooks.
type PullReqTargetReferenceSegment struct {
TargetRef ReferenceInfo `json:"target_ref"`
}
// PullReqSegment contains details for all pull req related payloads for webhooks.
type PullReqSegment struct {
PullReq PullReqInfo `json:"pull_req"`
}
// PullReqCommentSegment contains details for all pull req comment related payloads for webhooks.
type PullReqCommentSegment struct {
CommentInfo CommentInfo `json:"comment"`
*CodeCommentInfo
}
type PullReqCommentStatusUpdatedSegment struct {
Status enum.PullReqCommentStatus `json:"status"`
}
// PullReqLabelSegment contains details for all pull req label related payloads for webhooks.
type PullReqLabelSegment struct {
LabelInfo LabelInfo `json:"label"`
}
// PullReqUpdateSegment contains details what has been updated in the pull request.
type PullReqUpdateSegment struct {
TitleChanged bool `json:"title_changed"`
TitleOld string `json:"title_old"`
TitleNew string `json:"title_new"`
DescriptionChanged bool `json:"description_changed"`
DescriptionOld string `json:"description_old"`
DescriptionNew string `json:"description_new"`
}
type PullReqReviewSegment struct {
ReviewDecision enum.PullReqReviewDecision `json:"review_decision"`
ReviewerInfo PrincipalInfo `json:"reviewer"`
}
type PullReqTargetBrancheChangedSegment struct {
OldTargetBranch string `json:"old_target_branch"`
OldMergeBaseSHA string `json:"old_merge_base_sha"`
}
// RepositoryInfo describes the repo related info for a webhook payload.
// NOTE: don't use types package as we want webhook payload to be independent from API calls.
type RepositoryInfo struct {
ID int64 `json:"id"`
Path string `json:"path"`
Identifier string `json:"identifier"`
Description string `json:"description"`
DefaultBranch string `json:"default_branch"`
URL string `json:"url"`
GitURL string `json:"git_url"`
GitSSHURL string `json:"git_ssh_url"`
}
// TODO [CODE-1363]: remove after identifier migration.
func (r RepositoryInfo) MarshalJSON() ([]byte, error) {
// alias allows us to embed the original object while avoiding an infinite loop of marshaling.
type alias RepositoryInfo
return json.Marshal(&struct {
alias
UID string `json:"uid"`
}{
alias: (alias)(r),
UID: r.Identifier,
})
}
// repositoryInfoFrom gets the RepositoryInfo from a types.Repository.
func repositoryInfoFrom(ctx context.Context, repo *types.Repository, urlProvider url.Provider) RepositoryInfo {
if repo == nil {
return RepositoryInfo{}
}
return RepositoryInfo{
ID: repo.ID,
Path: repo.Path,
Identifier: repo.Identifier,
Description: repo.Description,
DefaultBranch: repo.DefaultBranch,
URL: urlProvider.GenerateUIRepoURL(ctx, repo.Path),
GitURL: urlProvider.GenerateGITCloneURL(ctx, repo.Path),
GitSSHURL: urlProvider.GenerateGITCloneSSHURL(ctx, repo.Path),
}
}
// PullReqInfo describes the pullreq related info for a webhook payload.
// NOTE: don't use types package as we want pullreq payload to be independent from API calls.
type PullReqInfo struct {
Number int64 `json:"number"`
State enum.PullReqState `json:"state"`
IsDraft bool `json:"is_draft"`
Title string `json:"title"`
Description string `json:"description"`
SourceRepoID *int64 `json:"source_repo_id"`
SourceBranch string `json:"source_branch"`
TargetRepoID int64 `json:"target_repo_id"`
TargetBranch string `json:"target_branch"`
MergeBaseSHA string `json:"merge_base_sha"`
MergeStrategy *enum.MergeMethod `json:"merge_strategy,omitempty"`
Author PrincipalInfo `json:"author"`
PrURL string `json:"pr_url"`
}
// pullReqInfoFrom gets the PullReqInfo from a types.PullReq.
func pullReqInfoFrom(
ctx context.Context,
pr *types.PullReq,
repo *types.Repository,
urlProvider url.Provider,
) PullReqInfo {
return PullReqInfo{
Number: pr.Number,
State: pr.State,
IsDraft: pr.IsDraft,
Title: pr.Title,
Description: pr.Description,
SourceRepoID: pr.SourceRepoID,
SourceBranch: pr.SourceBranch,
TargetRepoID: pr.TargetRepoID,
TargetBranch: pr.TargetBranch,
MergeBaseSHA: pr.MergeBaseSHA,
MergeStrategy: pr.MergeMethod,
Author: principalInfoFrom(&pr.Author),
PrURL: urlProvider.GenerateUIPRURL(ctx, repo.Path, pr.Number),
}
}
// PrincipalInfo describes the principal related info for a webhook payload.
// NOTE: don't use types package as we want webhook payload to be independent from API calls.
type PrincipalInfo struct {
ID int64 `json:"id"`
UID string `json:"uid"`
DisplayName string `json:"display_name"`
Email string `json:"email"`
Type enum.PrincipalType `json:"type"`
Created int64 `json:"created"`
Updated int64 `json:"updated"`
}
// principalInfoFrom gets the PrincipalInfo from a types.Principal.
func principalInfoFrom(principal *types.PrincipalInfo) PrincipalInfo {
return PrincipalInfo{
ID: principal.ID,
UID: principal.UID,
DisplayName: principal.DisplayName,
Email: principal.Email,
Type: principal.Type,
Created: principal.Created,
Updated: principal.Updated,
}
}
// CommitInfo describes the commit related info for a webhook payload.
// NOTE: don't use types package as we want webhook payload to be independent from API calls.
type CommitInfo struct {
SHA string `json:"sha"`
Message string `json:"message"`
Author SignatureInfo `json:"author"`
Committer SignatureInfo `json:"committer"`
URL string `json:"url"`
Added []string `json:"added"`
Removed []string `json:"removed"`
Modified []string `json:"modified"`
}
// commitInfoFrom gets the CommitInfo from a git.Commit.
func commitInfoFrom(
ctx context.Context,
repoPath string,
commit git.Commit,
urlProvider url.Provider,
) CommitInfo {
added := []string{}
removed := []string{}
modified := []string{}
for _, stat := range commit.FileStats {
switch stat.Status {
case gitenum.FileDiffStatusModified:
modified = append(modified, stat.Path)
case gitenum.FileDiffStatusRenamed:
added = append(added, stat.Path)
removed = append(removed, stat.OldPath)
case gitenum.FileDiffStatusDeleted:
removed = append(removed, stat.Path)
case gitenum.FileDiffStatusAdded, gitenum.FileDiffStatusCopied:
added = append(added, stat.Path)
case gitenum.FileDiffStatusUndefined:
default:
log.Warn().Msgf("unknown status %q for path %q", stat.Status, stat.Path)
}
}
return CommitInfo{
SHA: commit.SHA.String(),
Message: commit.Message,
Author: signatureInfoFrom(commit.Author),
Committer: signatureInfoFrom(commit.Committer),
URL: urlProvider.GenerateUIRefURL(ctx, repoPath, commit.SHA.String()),
Added: added,
Removed: removed,
Modified: modified,
}
}
// commitsInfoFrom gets the ExtendedCommitInfo from a []git.Commit.
func commitsInfoFrom(
ctx context.Context,
repoPath string,
commits []git.Commit,
urlProvider url.Provider,
) []CommitInfo {
commitsInfo := make([]CommitInfo, len(commits))
for i, commit := range commits {
commitsInfo[i] = commitInfoFrom(ctx, repoPath, commit, urlProvider)
}
return commitsInfo
}
// SignatureInfo describes the commit signature related info for a webhook payload.
// NOTE: don't use types package as we want webhook payload to be independent from API calls.
type SignatureInfo struct {
Identity IdentityInfo `json:"identity"`
When time.Time `json:"when"`
}
// signatureInfoFrom gets the SignatureInfo from a git.Signature.
func signatureInfoFrom(signature git.Signature) SignatureInfo {
return SignatureInfo{
Identity: identityInfoFrom(signature.Identity),
When: signature.When,
}
}
// IdentityInfo describes the signature identity related info for a webhook payload.
// NOTE: don't use types package as we want webhook payload to be independent from API calls.
type IdentityInfo struct {
Name string `json:"name"`
Email string `json:"email"`
}
// identityInfoFrom gets the IdentityInfo from a git.Identity.
func identityInfoFrom(identity git.Identity) IdentityInfo {
return IdentityInfo{
Name: identity.Name,
Email: identity.Email,
}
}
// ReferenceInfo describes a unique reference in Harness.
// It contains both the reference name as well as the repo the reference belongs to.
type ReferenceInfo struct {
Name string `json:"name"`
Repo RepositoryInfo `json:"repo"`
}
type CommentInfo struct {
ID int64 `json:"id"`
ParentID *int64 `json:"parent_id,omitempty"`
Text string `json:"text"`
Created int64 `json:"created"`
Updated int64 `json:"updated"`
Kind enum.PullReqActivityKind `json:"kind"`
}
type LabelInfo struct {
ID int64 `json:"id"`
Key string `json:"key"`
ValueID *int64 `json:"value_id,omitempty"`
Value *string `json:"value,omitempty"`
}
type CodeCommentInfo struct {
Outdated bool `json:"outdated"`
MergeBaseSHA string `json:"merge_base_sha"`
SourceSHA string `json:"source_sha"`
Path string `json:"path"`
LineNew int `json:"line_new"`
SpanNew int `json:"span_new"`
LineOld int `json:"line_old"`
SpanOld int `json:"span_old"`
}
type ParentResource struct {
ID int64
Identifier string
Type enum.WebhookParent
Path string
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/service.go | app/services/webhook/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"errors"
"fmt"
"net/http"
"time"
gitevents "github.com/harness/gitness/app/events/git"
pullreqevents "github.com/harness/gitness/app/events/pullreq"
"github.com/harness/gitness/app/sse"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/encrypt"
"github.com/harness/gitness/events"
"github.com/harness/gitness/git"
"github.com/harness/gitness/secret"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/stream"
"github.com/harness/gitness/types"
)
const (
eventsReaderGroupName = "gitness:webhook"
)
type Config struct {
// UserAgentIdentity specifies the identity used for the user agent header
// IMPORTANT: do not include version.
UserAgentIdentity string
// HeaderIdentity specifies the identity used for headers in webhook calls (e.g. X-Gitness-Trigger, ...).
// NOTE: If no value is provided, the UserAgentIdentity will be used.
HeaderIdentity string
EventReaderName string
Concurrency int
MaxRetries int
AllowPrivateNetwork bool
AllowLoopback bool
InternalSecret string
}
func (c *Config) Prepare() error {
if c == nil {
return errors.New("config is required")
}
if c.EventReaderName == "" {
return errors.New("Config.EventReaderName is required")
}
if c.UserAgentIdentity == "" {
return errors.New("Config.UserAgentIdentity is required")
}
if c.Concurrency < 1 {
return errors.New("Config.Concurrency has to be a positive number")
}
if c.MaxRetries < 0 {
return errors.New("Config.MaxRetries can't be negative")
}
// Backfill data
if c.HeaderIdentity == "" {
c.HeaderIdentity = c.UserAgentIdentity
}
return nil
}
//nolint:revive
type WebhookExecutorStore interface {
Find(ctx context.Context, id int64) (*types.WebhookExecutionCore, error)
ListWebhooks(
ctx context.Context,
parents []types.WebhookParentInfo,
) ([]*types.WebhookCore, error)
UpdateOptLock(
ctx context.Context, hook *types.WebhookCore,
execution *types.WebhookExecutionCore,
) (*types.WebhookCore, error)
FindWebhook(
ctx context.Context,
id int64,
) (*types.WebhookCore, error)
ListForTrigger(
ctx context.Context,
triggerID string,
) ([]*types.WebhookExecutionCore, error)
CreateWebhookExecution(ctx context.Context, hook *types.WebhookExecutionCore) error
}
//nolint:revive
type WebhookExecutor struct {
secureHTTPClient *http.Client
insecureHTTPClient *http.Client
secureHTTPClientInternal *http.Client
insecureHTTPClientInternal *http.Client
config Config
webhookURLProvider URLProvider
encrypter encrypt.Encrypter
spacePathStore store.SpacePathStore
secretService secret.Service
principalStore store.PrincipalStore
webhookExecutorStore WebhookExecutorStore
source string
}
func NewWebhookExecutor(
config Config,
webhookURLProvider URLProvider,
encrypter encrypt.Encrypter,
spacePathStore store.SpacePathStore,
secretService secret.Service,
principalStore store.PrincipalStore,
webhookExecutorStore WebhookExecutorStore,
source string,
) *WebhookExecutor {
return &WebhookExecutor{
webhookExecutorStore: webhookExecutorStore,
secureHTTPClient: newHTTPClient(config.AllowLoopback, config.AllowPrivateNetwork, false),
insecureHTTPClient: newHTTPClient(config.AllowLoopback, config.AllowPrivateNetwork, true),
secureHTTPClientInternal: newHTTPClient(config.AllowLoopback, true, false),
insecureHTTPClientInternal: newHTTPClient(config.AllowLoopback, true, true),
config: config,
webhookURLProvider: webhookURLProvider,
encrypter: encrypter,
spacePathStore: spacePathStore,
secretService: secretService,
principalStore: principalStore,
source: source,
}
}
// Service is responsible for processing webhook events.
type Service struct {
WebhookExecutor *WebhookExecutor
tx dbtx.Transactor
webhookStore store.WebhookStore
webhookExecutionStore store.WebhookExecutionStore
urlProvider url.Provider
spaceStore store.SpaceStore
repoStore store.RepoStore
pullreqStore store.PullReqStore
principalStore store.PrincipalStore
git git.Interface
activityStore store.PullReqActivityStore
labelStore store.LabelStore
labelValueStore store.LabelValueStore
encrypter encrypt.Encrypter
config Config
auditService audit.Service
sseStreamer sse.Streamer
}
func NewService(
ctx context.Context,
config Config,
tx dbtx.Transactor,
gitReaderFactory *events.ReaderFactory[*gitevents.Reader],
prReaderFactory *events.ReaderFactory[*pullreqevents.Reader],
webhookStore store.WebhookStore,
webhookExecutionStore store.WebhookExecutionStore,
spaceStore store.SpaceStore,
repoStore store.RepoStore,
pullreqStore store.PullReqStore,
activityStore store.PullReqActivityStore,
urlProvider url.Provider,
principalStore store.PrincipalStore,
git git.Interface,
encrypter encrypt.Encrypter,
labelStore store.LabelStore,
webhookURLProvider URLProvider,
labelValueStore store.LabelValueStore,
auditService audit.Service,
sseStreamer sse.Streamer,
secretService secret.Service,
spacePathStore store.SpacePathStore,
) (*Service, error) {
if err := config.Prepare(); err != nil {
return nil, fmt.Errorf("provided webhook service Config is invalid: %w", err)
}
webhookExecutorStore := &GitnessWebhookExecutorStore{
webhookStore: webhookStore,
webhookExecutionStore: webhookExecutionStore,
}
executor := NewWebhookExecutor(config, webhookURLProvider, encrypter, spacePathStore,
secretService, principalStore, webhookExecutorStore, RepoTrigger)
service := &Service{
WebhookExecutor: executor,
tx: tx,
webhookStore: webhookStore,
webhookExecutionStore: webhookExecutionStore,
spaceStore: spaceStore,
repoStore: repoStore,
pullreqStore: pullreqStore,
activityStore: activityStore,
urlProvider: urlProvider,
principalStore: principalStore,
git: git,
encrypter: encrypter,
config: config,
labelStore: labelStore,
labelValueStore: labelValueStore,
auditService: auditService,
sseStreamer: sseStreamer,
}
_, err := gitReaderFactory.Launch(ctx, eventsReaderGroupName, config.EventReaderName,
func(r *gitevents.Reader) error {
const idleTimeout = 1 * time.Minute
r.Configure(
stream.WithConcurrency(config.Concurrency),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(config.MaxRetries),
))
// register events
_ = r.RegisterBranchCreated(service.handleEventBranchCreated)
_ = r.RegisterBranchUpdated(service.handleEventBranchUpdated)
_ = r.RegisterBranchDeleted(service.handleEventBranchDeleted)
_ = r.RegisterTagCreated(service.handleEventTagCreated)
_ = r.RegisterTagUpdated(service.handleEventTagUpdated)
_ = r.RegisterTagDeleted(service.handleEventTagDeleted)
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to launch git event reader for webhooks: %w", err)
}
_, err = prReaderFactory.Launch(ctx, eventsReaderGroupName, config.EventReaderName,
func(r *pullreqevents.Reader) error {
const idleTimeout = 1 * time.Minute
r.Configure(
stream.WithConcurrency(config.Concurrency),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(config.MaxRetries),
))
// register events
_ = r.RegisterCreated(service.handleEventPullReqCreated)
_ = r.RegisterReopened(service.handleEventPullReqReopened)
_ = r.RegisterBranchUpdated(service.handleEventPullReqBranchUpdated)
_ = r.RegisterClosed(service.handleEventPullReqClosed)
_ = r.RegisterCommentCreated(service.handleEventPullReqComment)
_ = r.RegisterCommentUpdated(service.handleEventPullReqCommentUpdated)
_ = r.RegisterMerged(service.handleEventPullReqMerged)
_ = r.RegisterUpdated(service.handleEventPullReqUpdated)
_ = r.RegisterLabelAssigned(service.handleEventPullReqLabelAssigned)
_ = r.RegisterReviewSubmitted(service.handleEventPullReqReviewSubmitted)
_ = r.RegisterCommentStatusUpdated(service.handleEventPullReqCommentStatusUpdated)
_ = r.RegisterTargetBranchChanged(service.handleEventPullReqTargetBranchChanged)
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to launch pr event reader for webhooks: %w", err)
}
return service, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/delete.go | app/services/webhook/delete.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"github.com/harness/gitness/audit"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
// Delete deletes an existing webhook.
func (s *Service) Delete(
ctx context.Context,
principal *types.Principal,
webhookIdentifier string,
parentResource ParentResource,
allowDeletingInternal bool,
) error {
hook, err := s.GetWebhookVerifyOwnership(ctx, parentResource.ID, parentResource.Type, webhookIdentifier)
if err != nil {
return err
}
if hook.Type == enum.WebhookTypeInternal && !allowDeletingInternal {
return ErrInternalWebhookOperationNotAllowed
}
if err := s.webhookStore.Delete(ctx, hook.ID); err != nil {
return err
}
if shouldAuditWebhook(hook.Type) {
resourceType, nameKey := getWebhookAuditInfo(parentResource.Type)
err = s.auditService.Log(ctx,
*principal,
audit.NewResource(resourceType, hook.Identifier, nameKey, parentResource.Identifier),
audit.ActionDeleted,
parentResource.Path,
audit.WithOldObject(hook),
)
if err != nil {
log.Ctx(ctx).Warn().Msgf("failed to insert audit log for delete webhook operation: %s", err)
}
}
s.sendSSE(ctx, parentResource, enum.SSETypeWebhookDeleted, hook)
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/execution.go | app/services/webhook/execution.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"fmt"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
// FindExecution finds a webhook execution.
func (s *Service) FindExecution(
ctx context.Context,
parentID int64,
parentType enum.WebhookParent,
webhookIdentifier string,
webhookExecutionID int64,
) (*types.WebhookExecution, error) {
webhook, err := s.GetWebhookVerifyOwnership(ctx, parentID, parentType, webhookIdentifier)
if err != nil {
return nil, err
}
webhookExecution, err := s.GetWebhookExecutionVerifyOwnership(ctx, webhook.ID, webhookExecutionID)
if err != nil {
return nil, err
}
return webhookExecution, nil
}
// ListExecutions returns the executions of the webhook.
func (s *Service) ListExecutions(
ctx context.Context,
parentID int64,
parentType enum.WebhookParent,
webhookIdentifier string,
filter *types.WebhookExecutionFilter,
) ([]*types.WebhookExecution, int64, error) {
webhook, err := s.GetWebhookVerifyOwnership(ctx, parentID, parentType, webhookIdentifier)
if err != nil {
return nil, 0, fmt.Errorf("failed to verify ownership for webhook %d: %w", webhook.ID, err)
}
total, err := s.webhookExecutionStore.CountForWebhook(ctx, webhook.ID)
if err != nil {
return nil, 0, fmt.Errorf("failed to count webhook executions for webhook %d: %w", webhook.ID, err)
}
webhookExecutions, err := s.webhookExecutionStore.ListForWebhook(ctx, webhook.ID, filter)
if err != nil {
return nil, 0, fmt.Errorf("failed to list webhook executions for webhook %d: %w", webhook.ID, err)
}
return webhookExecutions, total, nil
}
// RetriggerExecution retriggers an existing webhook execution.
func (s *Service) RetriggerExecution(
ctx context.Context,
parentID int64,
parentType enum.WebhookParent,
webhookIdentifier string,
webhookExecutionID int64,
) (*types.WebhookExecutionCore, error) {
webhook, err := s.GetWebhookVerifyOwnership(
ctx, parentID, parentType, webhookIdentifier)
if err != nil {
return nil, err
}
webhookExecution, err := s.GetWebhookExecutionVerifyOwnership(
ctx, webhook.ID, webhookExecutionID)
if err != nil {
return nil, err
}
executionResult, err := s.WebhookExecutor.RetriggerWebhookExecution(ctx, webhookExecution.ID)
if err != nil {
return nil, fmt.Errorf("failed to retrigger webhook execution: %w", err)
}
if executionResult.Err != nil {
log.Ctx(ctx).Warn().Err(executionResult.Err).Msgf(
"retrigger of webhhook %d execution %d (new id: %d) had an error",
webhook.ID, webhookExecution.ID, executionResult.Execution.ID)
}
return executionResult.Execution, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/list.go | app/services/webhook/list.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"fmt"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
// List returns the webhooks from the provided scope.
func (s *Service) List(
ctx context.Context,
parentID int64,
parentType enum.WebhookParent,
inherited bool,
filter *types.WebhookFilter,
) ([]*types.Webhook, int64, error) {
var parents []types.WebhookParentInfo
var err error
switch parentType {
case enum.WebhookParentRepo:
parents, err = s.getParentInfoRepo(ctx, parentID, inherited)
if err != nil {
return nil, 0, err
}
case enum.WebhookParentSpace:
parents, err = s.getParentInfoSpace(ctx, parentID, inherited)
if err != nil {
return nil, 0, err
}
case enum.WebhookParentRegistry:
default:
return nil, 0, fmt.Errorf("webhook type %s is not supported", parentType)
}
count, err := s.webhookStore.Count(ctx, parents, filter)
if err != nil {
return nil, 0, fmt.Errorf("failed to count webhooks for scope with id %d: %w", parentID, err)
}
webhooks, err := s.webhookStore.List(ctx, parents, filter)
if err != nil {
return nil, 0, fmt.Errorf("failed to list webhooks for scope with id %d: %w", parentID, err)
}
return webhooks, count, nil
}
func (s *Service) getParentInfoRepo(
ctx context.Context,
repoID int64,
inherited bool,
) ([]types.WebhookParentInfo, error) {
var parents []types.WebhookParentInfo
parents = append(parents, types.WebhookParentInfo{
ID: repoID,
Type: enum.WebhookParentRepo,
})
if inherited {
repo, err := s.repoStore.Find(ctx, repoID)
if err != nil {
return nil, fmt.Errorf("failed to get repo: %w", err)
}
ids, err := s.spaceStore.GetAncestorIDs(ctx, repo.ParentID)
if err != nil {
return nil, fmt.Errorf("failed to get parent space ids: %w", err)
}
for _, id := range ids {
parents = append(parents, types.WebhookParentInfo{
Type: enum.WebhookParentSpace,
ID: id,
})
}
}
return parents, nil
}
func (s *Service) getParentInfoSpace(
ctx context.Context,
spaceID int64,
inherited bool,
) ([]types.WebhookParentInfo, error) {
var parents []types.WebhookParentInfo
if inherited {
ids, err := s.spaceStore.GetAncestorIDs(ctx, spaceID)
if err != nil {
return nil, fmt.Errorf("failed to get parent space ids: %w", err)
}
for _, id := range ids {
parents = append(parents, types.WebhookParentInfo{
Type: enum.WebhookParentSpace,
ID: id,
})
}
} else {
parents = append(parents, types.WebhookParentInfo{
Type: enum.WebhookParentSpace,
ID: spaceID,
})
}
return parents, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/store.go | app/services/webhook/store.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
gitnessstore "github.com/harness/gitness/app/store"
"github.com/harness/gitness/types"
)
type GitnessWebhookExecutorStore struct {
webhookStore gitnessstore.WebhookStore
webhookExecutionStore gitnessstore.WebhookExecutionStore
}
func (s *GitnessWebhookExecutorStore) Find(ctx context.Context, id int64) (*types.WebhookExecutionCore, error) {
execution, err := s.webhookExecutionStore.Find(ctx, id)
if err != nil {
return nil, err
}
executionCore := GitnessWebhookExecutionToWebhookExecutionCore(execution)
return executionCore, nil
}
func (s *GitnessWebhookExecutorStore) ListWebhooks(
ctx context.Context,
parents []types.WebhookParentInfo,
) ([]*types.WebhookCore, error) {
webhooks, err := s.webhookStore.List(ctx, parents, &types.WebhookFilter{})
if err != nil {
return nil, err
}
webhooksCore := GitnessWebhooksToWebhooksCore(webhooks)
return webhooksCore, nil
}
func (s *GitnessWebhookExecutorStore) ListForTrigger(
ctx context.Context,
triggerID string,
) ([]*types.WebhookExecutionCore, error) {
executions, err := s.webhookExecutionStore.ListForTrigger(ctx, triggerID)
if err != nil {
return nil, err
}
webhookExecutionsCore := make([]*types.WebhookExecutionCore, 0)
for _, e := range executions {
executionCore := GitnessWebhookExecutionToWebhookExecutionCore(e)
webhookExecutionsCore = append(webhookExecutionsCore, executionCore)
}
return webhookExecutionsCore, nil
}
func (s *GitnessWebhookExecutorStore) CreateWebhookExecution(
ctx context.Context,
executionCore *types.WebhookExecutionCore,
) error {
execution := CoreWebhookExecutionToGitnessWebhookExecution(executionCore)
if err := s.webhookExecutionStore.Create(ctx, execution); err != nil {
return err
}
executionCore.ID = execution.ID
return nil
}
func (s *GitnessWebhookExecutorStore) UpdateOptLock(
ctx context.Context, hook *types.WebhookCore,
execution *types.WebhookExecutionCore,
) (*types.WebhookCore, error) {
webhook := CoreWebhookToGitnessWebhook(hook)
fn := func(hook *types.Webhook) error {
hook.LatestExecutionResult = &execution.Result
return nil
}
gitnessWebhook, err := s.webhookStore.UpdateOptLock(ctx, webhook, fn)
if err != nil {
return nil, err
}
webhookCore := GitnessWebhookToWebhookCore(gitnessWebhook)
return webhookCore, err
}
func (s *GitnessWebhookExecutorStore) FindWebhook(
ctx context.Context,
id int64,
) (*types.WebhookCore, error) {
webhook, err := s.webhookStore.Find(ctx, id)
if err != nil {
return nil, err
}
webhookCore := GitnessWebhookToWebhookCore(webhook)
return webhookCore, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/app/services/webhook/url_provider_interface.go | app/services/webhook/url_provider_interface.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"github.com/harness/gitness/types"
)
type URLProvider interface {
GetWebhookURL(ctx context.Context, webhook *types.WebhookCore) (string, error)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.