repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/raw_header.go | git/parser/raw_header.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"fmt"
"regexp"
"strconv"
"time"
)
var (
reGitHeaderPerson = regexp.MustCompile(`^(.+) <(.+)?> (\d+) (([-|+]\d\d)(\d\d))$`)
)
func ObjectHeaderIdentity(line string) (name string, email string, timestamp time.Time, err error) {
data := reGitHeaderPerson.FindStringSubmatch(line)
if data == nil {
// Returns empty entityType if the header line doesn't match the pattern.
return "", "", time.Time{}, nil
}
tzName := data[4]
unixTime, err := strconv.ParseInt(data[3], 10, 64)
if err != nil {
return "", "", time.Time{},
fmt.Errorf("failed to parse unix time in %q: %w", line, err)
}
hour, err := strconv.Atoi(data[5])
if err != nil {
return "", "", time.Time{},
fmt.Errorf("unrecognized hour tz offset in %q: %w", line, err)
}
minutes, err := strconv.Atoi(data[6])
if err != nil {
return "", "", time.Time{},
fmt.Errorf("unrecognized minute tz offset in %q: %w", line, err)
}
offset := hour * 60
if hour < 0 {
offset -= minutes
} else {
offset += minutes
}
offset *= 60
name = data[1]
email = data[2]
timestamp = time.Unix(unixTime, 0).In(time.FixedZone(tzName, offset))
return name, email, timestamp, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/diff_headers.go | git/parser/diff_headers.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"bufio"
"io"
"regexp"
)
type DiffFileHunkHeaders struct {
FileHeader DiffFileHeader
HunksHeaders []HunkHeader
}
var regExpDiffFileHeader = regexp.MustCompile(`^diff --git a/(.+) b/(.+)$`)
func ParseDiffFileHeader(line string) (DiffFileHeader, bool) {
groups := regExpDiffFileHeader.FindStringSubmatch(line)
if groups == nil {
return DiffFileHeader{}, false
}
return DiffFileHeader{
OldFileName: groups[1],
NewFileName: groups[2],
Extensions: map[string]string{},
}, true
}
// GetHunkHeaders parses git diff output and returns all diff headers for all files.
// See for documentation: https://git-scm.com/docs/git-diff#generate_patch_text_with_p
func GetHunkHeaders(r io.Reader) ([]*DiffFileHunkHeaders, error) {
bufrd := bufio.NewReader(r)
var currentFile *DiffFileHunkHeaders
var result []*DiffFileHunkHeaders
for {
// Consume the line but get only the first 4K of it...
// We're interested only in the hunk headers anyway, and they are never longer than this.
line, err := readLinePrefix(bufrd, 4096)
if err != nil && err != io.EOF { //nolint:errorlint
return nil, err
}
if len(line) == 0 {
break
}
if h, ok := ParseDiffFileHeader(line); ok {
if currentFile != nil {
result = append(result, currentFile)
}
currentFile = &DiffFileHunkHeaders{
FileHeader: h,
HunksHeaders: nil,
}
continue
}
if currentFile == nil {
// should not happen: we reached the hunk header without first finding the file header.
return nil, ErrHunkNotFound
}
if h, ok := ParseDiffHunkHeader(line); ok {
currentFile.HunksHeaders = append(currentFile.HunksHeaders, h)
continue
}
if headerKey, headerValue := ParseDiffFileExtendedHeader(line); headerKey != "" {
currentFile.FileHeader.Extensions[headerKey] = headerValue
continue
}
}
if currentFile != nil {
result = append(result, currentFile)
}
return result, nil
}
// readLinePrefix will consume the entire line from the reader,
// but will return only the first maxLen bytes from it - the rest is discarded.
// Returns io.EOF when the end of the input has been reached.
func readLinePrefix(br *bufio.Reader, maxLen int) (line string, err error) {
for {
var raw []byte
var isPrefix bool
raw, isPrefix, err = br.ReadLine()
if err != nil && err != io.EOF { //nolint:errorlint
return "", err
}
if needMore := maxLen - len(line); needMore > 0 {
if len(raw) > needMore {
line += string(raw[:needMore])
} else {
line += string(raw)
}
}
if !isPrefix || len(raw) == 0 {
break
}
}
return line, err
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/commit_message_test.go | git/parser/commit_message_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import "testing"
func TestCleanUpWhitespace(t *testing.T) {
tests := []struct {
name string
input string
exp string
}{
{
name: "remove_trailing_spaces_in_lines",
input: "" +
"ABC \n" +
"\t\t\n" +
"DEF\t\n",
exp: "" +
"ABC\n" +
"\n" +
"DEF\n",
},
{
name: "add_eof_to_the_last_line",
input: "" +
"ABC\n" +
"DEF",
exp: "" +
"ABC\n" +
"DEF\n",
},
{
name: "remove_consecutive_empty_lines",
input: "" +
"ABC\n" +
"\n" +
"\t\t\n" +
"\n" +
"DEF\n",
exp: "" +
"ABC\n" +
"\n" +
"DEF\n",
},
{
name: "remove_empty_lines_from_the_message_bottom",
input: "" +
"ABC\n" +
"\n" +
"DEF\n" +
"\n" +
"\n" +
"\n",
exp: "" +
"ABC\n" +
"\n" +
"DEF\n",
},
{
name: "remove_empty_lines_from_the_message_top",
input: "" +
"\n" +
"\n" +
"ABC\n" +
"\n" +
"DEF\n" +
"\n",
exp: "" +
"ABC\n" +
"\n" +
"DEF\n",
},
{
name: "multi_line_body",
input: "" +
"ABC\n" +
"DEF\n" +
"\n" +
"GHI\n" +
"JKL\n" +
"\n" +
"NMO\n",
exp: "" +
"ABC\n" +
"DEF\n" +
"\n" +
"GHI\n" +
"JKL\n" +
"\n" +
"NMO\n",
},
{
name: "complex",
input: "" +
"\n" +
"subj one\n" +
" subj two\n" +
"\t\t\n" +
" \n" +
" body one\n" +
"body two\n" +
" \t \n" +
" body three\n" +
" \n" +
" ",
exp: "" +
"subj one\n" +
" subj two\n" +
"\n" +
" body one\n" +
"body two\n" +
"\n" +
" body three\n",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
cleaned := CleanUpWhitespace(test.input)
if want, got := test.exp, cleaned; want != got {
t.Errorf("want=%q, got=%q", want, got)
}
})
}
}
func TestExtractSubject(t *testing.T) {
tests := []struct {
name string
input string
exp string
}{
{
name: "join_lines",
input: "" +
"ABC\n" +
"DEF\n",
exp: "ABC DEF",
},
{
name: "stop_after_empty",
input: "" +
"ABC\n" +
"DEF\n" +
"\n" +
"GHI\n",
exp: "ABC DEF",
},
{
name: "ignore_extra_whitespace",
input: "" +
"\t\n" +
" ABC \n" +
"\tDEF \n" +
"\t\t\n" +
"GHI",
exp: "ABC DEF",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
subject := ExtractSubject(test.input)
if want, got := test.exp, subject; want != got {
t.Errorf("want=%q, got=%q", want, got)
}
})
}
}
func TestSplitMessage(t *testing.T) {
tests := []struct {
name string
input string
expSubject string
expBody string
}{
{
name: "remove_trailing_spaces_in_lines",
input: "" +
"ABC \n" +
"\t\t\n" +
"DEF\n",
expSubject: "ABC",
expBody: "DEF\n",
},
{
name: "add_eof_to_the_last_line",
input: "" +
"ABC\n" +
"DEF",
expSubject: "ABC DEF",
expBody: "",
},
{
name: "add_eof_to_the_last_line_of_body",
input: "" +
"ABC\n" +
"DEF\n" +
"\n" +
"GHI",
expSubject: "ABC DEF",
expBody: "GHI\n",
},
{
name: "remove_consecutive_empty_lines",
input: "" +
"ABC\n" +
"\n" +
"\t\t\n" +
"\n" +
"DEF\n",
expSubject: "ABC",
expBody: "DEF\n",
},
{
name: "multi_line_body",
input: "" +
"ABC\n" +
"\n" +
"DEF\n" +
"GHI\n",
expSubject: "ABC",
expBody: "DEF\nGHI\n",
},
{
name: "remove_empty_lines_from_the_message_bottom",
input: "" +
"ABC\n" +
"\n" +
"DEF\n" +
"\n" +
"\n" +
"\n",
expSubject: "ABC",
expBody: "DEF\n",
},
{
name: "remove_empty_lines_from_the_message_top",
input: "" +
"\n" +
"\n" +
"ABC\n" +
"\n" +
"DEF\n" +
"\n",
expSubject: "ABC",
expBody: "DEF\n",
},
{
name: "complex",
input: "" +
"\n" +
"subj one\n" +
" subj two\n" +
"\t\t\n" +
" \n" +
" body one\n" +
"body two\n" +
" \t \n" +
" body three\n" +
" \n" +
" ",
expSubject: "subj one subj two",
expBody: "" +
" body one\n" +
"body two\n" +
"\n" +
" body three\n",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
subject, body := SplitMessage(test.input)
if want, got := test.expSubject, subject; want != got {
t.Errorf("subject: want=%q, got=%q", want, got)
}
if want, got := test.expBody, body; want != got {
t.Errorf("body: want=%q, got=%q", want, got)
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/errors.go | git/parser/errors.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import "github.com/harness/gitness/errors"
var (
ErrSHADoesNotMatch = errors.InvalidArgument("sha does not match")
ErrHunkNotFound = errors.NotFound("hunk not found")
ErrBinaryFile = errors.InvalidArgument("can't handle a binary file")
ErrPeekedMoreThanOnce = errors.PreconditionFailed("peeking more than once in a row is not supported")
)
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/reference_list.go | git/parser/reference_list.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"bufio"
"fmt"
"io"
"regexp"
"github.com/harness/gitness/git/sha"
)
var reReference = regexp.MustCompile(`^([0-9a-f]+)[ |\t](.+)$`)
func ReferenceList(r io.Reader) (map[string]sha.SHA, error) {
result := make(map[string]sha.SHA)
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
parts := reReference.FindStringSubmatch(line)
if parts == nil {
return nil, fmt.Errorf("unexpected output of reference list: %s", line)
}
refSHAStr := parts[1]
refName := parts[2]
refSHA, err := sha.New(refSHAStr)
if err != nil {
return nil, fmt.Errorf("failed to parse reference sha (%s): %w", refSHAStr, err)
}
result[refName] = refSHA
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to read references: %w", err)
}
return result, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/diff_headers_test.go | git/parser/diff_headers_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"bufio"
"io"
"strings"
"testing"
"github.com/harness/gitness/git/enum"
"github.com/google/go-cmp/cmp"
)
func TestGetHunkHeaders(t *testing.T) {
input := `diff --git a/new_file.txt b/new_file.txt
new file mode 100644
index 0000000..fb0c863
--- /dev/null
+++ b/new_file.txt
@@ -0,0 +1,3 @@
+This is a new file
+created for this
+unit test.
diff --git a/old_file_name.txt b/changed_file.txt
index f043b93..e9449b5 100644
--- a/changed_file.txt
+++ b/changed_file.txt
@@ -7,3 +7,4 @@
Unchanged line
-Removed line 1
+Added line 1
+Added line 2
Unchanged line
@@ -27,2 +28,3 @@
Unchanged line
+Added line
Unchanged line
diff --git a/deleted_file.txt b/deleted_file.txt
deleted file mode 100644
index f043b93..0000000
--- a/deleted_file.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-This is content of
-a deleted file
-in git diff output.
`
got, err := GetHunkHeaders(strings.NewReader(input))
if err != nil {
t.Errorf("got error: %v", err)
return
}
want := []*DiffFileHunkHeaders{
{
FileHeader: DiffFileHeader{
OldFileName: "new_file.txt",
NewFileName: "new_file.txt",
Extensions: map[string]string{
enum.DiffExtHeaderNewFileMode: "100644",
enum.DiffExtHeaderIndex: "0000000..fb0c863",
},
},
HunksHeaders: []HunkHeader{{OldLine: 0, OldSpan: 0, NewLine: 1, NewSpan: 3}},
},
{
FileHeader: DiffFileHeader{
OldFileName: "old_file_name.txt",
NewFileName: "changed_file.txt",
Extensions: map[string]string{
enum.DiffExtHeaderIndex: "f043b93..e9449b5 100644",
},
},
HunksHeaders: []HunkHeader{
{OldLine: 7, OldSpan: 3, NewLine: 7, NewSpan: 4},
{OldLine: 27, OldSpan: 2, NewLine: 28, NewSpan: 3},
},
},
{
FileHeader: DiffFileHeader{
OldFileName: "deleted_file.txt",
NewFileName: "deleted_file.txt",
Extensions: map[string]string{
enum.DiffExtHeaderDeletedFileMode: "100644",
enum.DiffExtHeaderIndex: "f043b93..0000000",
},
},
HunksHeaders: []HunkHeader{{OldLine: 1, OldSpan: 3, NewLine: 0, NewSpan: 0}},
},
}
if diff := cmp.Diff(got, want); diff != "" {
t.Error(diff)
}
}
func TestReadLinePrefix(t *testing.T) {
const maxLen = 256
tests := []struct {
name string
wf func(w io.Writer)
expLens []int
}{
{
name: "empty",
wf: func(io.Writer) {},
expLens: nil,
},
{
name: "single",
wf: func(w io.Writer) {
_, _ = w.Write([]byte("aaa"))
},
expLens: []int{3},
},
{
name: "single-eol",
wf: func(w io.Writer) {
_, _ = w.Write([]byte("aaa\n"))
},
expLens: []int{3},
},
{
name: "two-lines",
wf: func(w io.Writer) {
_, _ = w.Write([]byte("aa\nbb"))
},
expLens: []int{2, 2},
},
{
name: "two-lines-crlf",
wf: func(w io.Writer) {
_, _ = w.Write([]byte("aa\r\nbb\r\n"))
},
expLens: []int{2, 2},
},
{
name: "empty-line",
wf: func(w io.Writer) {
_, _ = w.Write([]byte("aa\n\ncc"))
},
expLens: []int{2, 0, 2},
},
{
name: "too-long",
wf: func(w io.Writer) {
for range maxLen {
_, _ = w.Write([]byte("a"))
}
_, _ = w.Write([]byte("\n"))
for range maxLen * 2 {
_, _ = w.Write([]byte("b"))
}
_, _ = w.Write([]byte("\n"))
for range maxLen / 2 {
_, _ = w.Write([]byte("c"))
}
_, _ = w.Write([]byte("\n"))
},
expLens: []int{maxLen, maxLen, maxLen / 2},
},
{
name: "overflow-buffer",
wf: func(w io.Writer) {
for range bufio.MaxScanTokenSize + 1 {
_, _ = w.Write([]byte("a"))
}
_, _ = w.Write([]byte("\n"))
for range bufio.MaxScanTokenSize * 2 {
_, _ = w.Write([]byte("b"))
}
_, _ = w.Write([]byte("\n"))
for range bufio.MaxScanTokenSize {
_, _ = w.Write([]byte("c"))
}
},
expLens: []int{maxLen, maxLen, maxLen},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
pr, pw := io.Pipe()
defer pr.Close()
go func() {
test.wf(pw)
_ = pw.Close()
}()
br := bufio.NewReader(pr)
for i, expLen := range test.expLens {
expLine := strings.Repeat(string(rune('a'+i)), expLen)
line, err := readLinePrefix(br, maxLen)
if err != nil && err != io.EOF { //nolint:errorlint
t.Errorf("got error: %s", err.Error())
return
}
if want, got := expLine, line; want != got {
t.Errorf("line %d mismatch want=%s got=%s", i, want, got)
return
}
}
line, err := readLinePrefix(br, maxLen)
if line != "" || err != io.EOF { //nolint:errorlint
t.Errorf("expected empty line and EOF but got: line=%s err=%v", line, err)
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/reference_list_test.go | git/parser/reference_list_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"strings"
"testing"
"github.com/harness/gitness/git/sha"
"golang.org/x/exp/maps"
)
func TestReferenceList(t *testing.T) {
test := "" + "" +
"3dffbe6490139d56d57a3bbb5f3a9a9e8cc316bb\trefs/heads/main\n" +
"20e8b3475740f528f0b6f62d29ce5098ad491bfd\trefs/heads/master\n"
m, err := ReferenceList(strings.NewReader(test))
if err != nil {
t.Errorf("failed with error: %s", err.Error())
return
}
expected := map[string]sha.SHA{
"refs/heads/main": sha.Must("3dffbe6490139d56d57a3bbb5f3a9a9e8cc316bb"),
"refs/heads/master": sha.Must("20e8b3475740f528f0b6f62d29ce5098ad491bfd"),
}
if !maps.Equal(m, expected) {
t.Errorf("expected %v, got %v", expected, m)
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/raw_object.go | git/parser/raw_object.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"bytes"
"fmt"
"regexp"
)
var (
reSigBegin = regexp.MustCompile(`^-----BEGIN (.+)-----\n`)
reSigEnd = regexp.MustCompile(`^-----END (.+)-----\n`)
)
type ObjectRaw struct {
Headers []ObjectHeader
Message string
SignedContent []byte
Signature []byte
SignatureType string
}
type ObjectHeader struct {
Type string
Value string
}
func Object(raw []byte) (ObjectRaw, error) {
var (
headers []ObjectHeader
message bytes.Buffer
signedContent bytes.Buffer
sigBuffer bytes.Buffer
sig []byte
sigType string
)
headers = make([]ObjectHeader, 0, 4)
const (
stateHead = iota
stateBody
stateBodySig
)
state := stateHead
for len(raw) != 0 {
if state == stateHead {
headerType, headerContent, advance, err := getHeader(raw)
if err != nil {
return ObjectRaw{}, err
}
switch headerType {
case "":
signedContent.Write(raw[:advance])
state = stateBody
case "gpgsig":
sig = headerContent.Bytes()
data := reSigBegin.FindSubmatch(sig)
if len(data) == 0 {
return ObjectRaw{}, fmt.Errorf("invalid signature header: %s", data)
}
sigType = string(data[1])
default:
signedContent.Write(raw[:advance])
// Headers added with the trailing EOL character. This is important for some headers (mergetag).
headerValue := headerContent.String()
headers = append(headers, ObjectHeader{
Type: headerType,
Value: headerValue,
})
}
raw = raw[advance:]
continue
}
var line []byte
idxEOL := bytes.IndexByte(raw, '\n')
if idxEOL == -1 {
line = raw
raw = nil
} else {
line = raw[:idxEOL+1] // line includes EOL
raw = raw[idxEOL+1:]
}
if state == stateBodySig {
sigBuffer.Write(line)
if reSigEnd.Match(line) {
sig = sigBuffer.Bytes()
state = stateBody
}
continue
}
data := reSigBegin.FindSubmatch(line)
if len(data) > 0 {
sigBuffer.Write(line)
sigType = string(data[1])
state = stateBodySig
} else {
signedContent.Write(line)
message.Write(line)
}
}
var signedContentBytes []byte
if sigType != "" {
signedContentBytes = signedContent.Bytes()
}
return ObjectRaw{
Headers: headers,
Message: message.String(),
SignedContent: signedContentBytes,
Signature: sig,
SignatureType: sigType,
}, nil
}
func getHeader(raw []byte) (headerType string, headerContent *bytes.Buffer, advance int, err error) {
headerContent = bytes.NewBuffer(nil)
for {
idxEOL := bytes.IndexByte(raw, '\n')
if idxEOL < 0 {
return "", nil, 0, fmt.Errorf("header line must end with EOL character: %s", raw)
}
lineLen := idxEOL + 1
line := raw[:lineLen] // line includes EOL
raw = raw[lineLen:]
if advance == 0 {
if len(line) == 1 {
// empty line means no header
return "", nil, 1, nil
}
idxSpace := bytes.IndexByte(line, ' ') // expected header form is "<header-type><space><header_value>"
if idxSpace <= 0 {
return "", nil, 0, fmt.Errorf("malformed header: %s", line[:len(line)-1])
}
headerType = string(line[:idxSpace])
headerContent.Write(line[idxSpace+1:])
} else {
headerContent.Write(line[1:]) // add the line without the space prefix
}
advance += lineLen
// peak at next line to find if it's a multiline header (i.e. a signature) - if the next line starts with space
hasMoreLines := len(raw) > 0 && raw[0] == ' '
if !hasMoreLines {
return headerType, headerContent, advance, nil
}
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/diff_headers_extended.go | git/parser/diff_headers_extended.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"regexp"
"github.com/harness/gitness/git/enum"
)
var regExpDiffExtHeader = regexp.MustCompile(
"^(" +
enum.DiffExtHeaderOldMode + "|" +
enum.DiffExtHeaderNewMode + "|" +
enum.DiffExtHeaderDeletedFileMode + "|" +
enum.DiffExtHeaderNewFileMode + "|" +
enum.DiffExtHeaderCopyFrom + "|" +
enum.DiffExtHeaderCopyTo + "|" +
enum.DiffExtHeaderRenameFrom + "|" +
enum.DiffExtHeaderRenameTo + "|" +
enum.DiffExtHeaderSimilarity + "|" +
enum.DiffExtHeaderDissimilarity + "|" +
enum.DiffExtHeaderIndex +
") (.+)$")
// ParseDiffFileExtendedHeader parses a generic extended header line.
func ParseDiffFileExtendedHeader(line string) (string, string) {
groups := regExpDiffExtHeader.FindStringSubmatch(line)
if groups == nil {
return "", ""
}
return groups[1], groups[2]
}
// regExpDiffFileIndexHeader parses the `index` extended header line with a format like:
//
// index f994c2cf569523ba736473bbfbac3700fa1db28d..0000000000000000000000000000000000000000
// index 68233d6cd204b0df84e91a1ce8c8b75e13529973..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
//
// NOTE: it's NEW_SHA..OLD_SHA.
var regExpDiffFileIndexHeader = regexp.MustCompile(`^index ([0-9a-f]{4,64})\.\.([0-9a-f]{4,64})( [0-9]+)?$`)
// DiffExtParseIndex parses the `index` extended diff header line.
func DiffExtHeaderParseIndex(line string) (newSHA string, oldSHA string, ok bool) {
groups := regExpDiffFileIndexHeader.FindStringSubmatch(line)
if groups == nil {
return "", "", false
}
return groups[1], groups[2], true
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/lfs_pointers.go | git/parser/lfs_pointers.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"bytes"
"context"
"errors"
"regexp"
"strconv"
"github.com/rs/zerolog/log"
)
// LfsPointerMaxSize is the maximum size for an LFS pointer file.
// This is used to identify blobs that are too large to be valid LFS pointers.
// lfs-pointer specification ref: https://github.com/git-lfs/git-lfs/blob/master/docs/spec.md#the-pointer
const LfsPointerMaxSize = 200
const lfsPointerVersionPrefix = "version https://git-lfs.github.com/spec"
type LFSPointer struct {
OID string
Size int64
}
var (
regexLFSOID = regexp.MustCompile(`(?m)^oid sha256:([a-f0-9]{64})$`)
regexLFSSize = regexp.MustCompile(`(?m)^size (\d+)+$`)
ErrInvalidLFSPointer = errors.New("invalid lfs pointer")
)
func GetLFSObjectID(content []byte) (string, error) {
if !bytes.HasPrefix(content, []byte(lfsPointerVersionPrefix)) {
return "", ErrInvalidLFSPointer
}
oidMatch := regexLFSOID.FindSubmatch(content)
if oidMatch == nil {
return "", ErrInvalidLFSPointer
}
if !regexLFSSize.Match(content) {
return "", ErrInvalidLFSPointer
}
return string(oidMatch[1]), nil
}
func IsLFSPointer(
ctx context.Context,
content []byte,
size int64,
) (*LFSPointer, bool) {
if size > LfsPointerMaxSize {
return nil, false
}
if !bytes.HasPrefix(content, []byte(lfsPointerVersionPrefix)) {
return nil, false
}
oidMatch := regexLFSOID.FindSubmatch(content)
if oidMatch == nil {
return nil, false
}
sizeMatch := regexLFSSize.FindSubmatch(content)
if sizeMatch == nil {
return nil, false
}
contentSize, err := strconv.ParseInt(string(sizeMatch[1]), 10, 64)
if err != nil {
log.Ctx(ctx).Warn().Err(err).Msgf("failed to parse lfs pointer size for object ID %s", oidMatch[1])
return nil, false
}
return &LFSPointer{OID: string(oidMatch[1]), Size: contentSize}, true
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/scanner.go | git/parser/scanner.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"bufio"
"bytes"
"io"
)
type Scanner interface {
Scan() bool
Err() error
Bytes() []byte
Text() string
}
func ScanZeroSeparated(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil // Return nothing if at end of file and no data passed
}
if i := bytes.IndexByte(data, 0); i >= 0 {
return i + 1, data[0:i], nil // Split at zero byte
}
if atEOF {
return len(data), data, nil // at the end of file return the data
}
return
}
// ScanLinesWithEOF is a variation of bufio's ScanLine method that returns the line endings.
// https://cs.opensource.google/go/go/+/master:src/bufio/scan.go;l=355;drc=bc2124dab14fa292e18df2937037d782f7868635
func ScanLinesWithEOF(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\n'); i >= 0 {
// We have a full newline-terminated line.
return i + 1, data[:i+1], nil
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), data, nil
}
// Request more data.
return 0, nil, nil
}
func NewScannerWithPeek(r io.Reader, split bufio.SplitFunc) *ScannerWithPeek {
scanner := bufio.NewScanner(r)
scanner.Split(split)
return &ScannerWithPeek{
scanner: scanner,
}
}
type ScannerWithPeek struct {
peeked bool
peekedScanOut bool
nextLine []byte
nextErr error
scanner *bufio.Scanner
}
func (s *ScannerWithPeek) scan() bool {
scanOut := s.scanner.Scan()
s.nextErr = s.scanner.Err()
s.nextLine = s.scanner.Bytes()
return scanOut
}
func (s *ScannerWithPeek) Peek() bool {
if s.peeked {
s.nextLine = nil
s.nextErr = ErrPeekedMoreThanOnce
return false
}
// load next line
scanOut := s.scan()
// set peeked data
s.peeked = true
s.peekedScanOut = scanOut
return scanOut
}
func (s *ScannerWithPeek) Scan() bool {
if s.peeked {
s.peeked = false
return s.peekedScanOut
}
return s.scan()
}
func (s *ScannerWithPeek) Err() error {
return s.nextErr
}
func (s *ScannerWithPeek) Bytes() []byte {
return s.nextLine
}
func (s *ScannerWithPeek) Text() string {
return string(s.nextLine)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/text_test.go | git/parser/text_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"bytes"
"testing"
"github.com/gotidy/ptr"
"github.com/stretchr/testify/assert"
)
func Test_readTextFileEmpty(t *testing.T) {
scanner, le, err := ReadTextFile(bytes.NewReader(nil), nil)
assert.NoError(t, err)
assert.Equal(t, "\n", le)
ok := scanner.Scan()
assert.False(t, ok)
assert.NoError(t, scanner.Err())
}
func Test_readTextFileFirstLineNotUTF8(t *testing.T) {
scanner, _, err := ReadTextFile(bytes.NewReader([]byte{0xFF, 0xFF}), nil)
// method itself doesn't return an error, only the scanning fails for utf8.
assert.NotNil(t, scanner)
assert.NoError(t, err)
ok := scanner.Scan()
assert.False(t, ok)
assert.ErrorIs(t, scanner.Err(), ErrBinaryFile)
}
func Test_readTextFileNoLineEnding(t *testing.T) {
scanner, le, err := ReadTextFile(bytes.NewReader([]byte("abc")), nil)
assert.NoError(t, err)
assert.Equal(t, "\n", le)
ok := scanner.Scan()
assert.True(t, ok)
assert.NoError(t, scanner.Err())
assert.Equal(t, "abc", scanner.Text())
ok = scanner.Scan()
assert.False(t, ok)
assert.NoError(t, scanner.Err())
assert.Nil(t, scanner.Bytes())
}
func Test_readTextFileLineEndingLF(t *testing.T) {
scanner, le, err := ReadTextFile(bytes.NewReader([]byte("abc\n")), nil)
assert.NoError(t, err)
assert.Equal(t, "\n", le)
ok := scanner.Scan()
assert.True(t, ok)
assert.NoError(t, scanner.Err())
assert.Equal(t, "abc\n", scanner.Text())
ok = scanner.Scan()
assert.False(t, ok)
assert.NoError(t, scanner.Err())
assert.Nil(t, scanner.Bytes())
}
func Test_readTextFileLineEndingCRLF(t *testing.T) {
scanner, le, err := ReadTextFile(bytes.NewReader([]byte("abc\r\n")), nil)
assert.NoError(t, err)
assert.Equal(t, "\r\n", le)
ok := scanner.Scan()
assert.True(t, ok)
assert.NoError(t, scanner.Err())
assert.Equal(t, "abc\r\n", scanner.Text())
ok = scanner.Scan()
assert.False(t, ok)
assert.NoError(t, scanner.Err())
assert.Nil(t, scanner.Bytes())
}
func Test_readTextFileLineEndingMultiple(t *testing.T) {
scanner, le, err := ReadTextFile(bytes.NewReader([]byte("abc\r\nd\n")), nil)
assert.NoError(t, err)
assert.Equal(t, "\r\n", le)
ok := scanner.Scan()
assert.True(t, ok)
assert.NoError(t, scanner.Err())
assert.Equal(t, "abc\r\n", scanner.Text())
ok = scanner.Scan()
assert.True(t, ok)
assert.NoError(t, scanner.Err())
assert.Equal(t, "d\n", scanner.Text())
ok = scanner.Scan()
assert.False(t, ok)
assert.NoError(t, scanner.Err())
assert.Nil(t, scanner.Bytes())
}
func Test_readTextFileLineEndingReplacementEmpty(t *testing.T) {
scanner, le, err := ReadTextFile(bytes.NewReader([]byte("abc\r\n")), ptr.Of(""))
assert.NoError(t, err)
assert.Equal(t, "\r\n", le)
ok := scanner.Scan()
assert.True(t, ok)
assert.NoError(t, scanner.Err())
assert.Equal(t, "abc", scanner.Text())
}
func Test_readTextFileLineEndingReplacement(t *testing.T) {
scanner, le, err := ReadTextFile(bytes.NewReader([]byte("abc\r\nd")), ptr.Of("\n"))
assert.NoError(t, err)
assert.Equal(t, "\r\n", le)
ok := scanner.Scan()
assert.True(t, ok)
assert.NoError(t, scanner.Err())
assert.Equal(t, "abc\n", scanner.Text())
ok = scanner.Scan()
assert.True(t, ok)
assert.NoError(t, scanner.Err())
assert.Equal(t, "d", scanner.Text())
ok = scanner.Scan()
assert.False(t, ok)
assert.NoError(t, scanner.Err())
assert.Nil(t, scanner.Bytes())
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/diff_cut_test.go | git/parser/diff_cut_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"reflect"
"strings"
"testing"
)
//nolint:gocognit // it's a unit test!!!
func TestDiffCut(t *testing.T) {
const input = `diff --git a/test.txt b/test.txt
--- a/test.txt
+++ b/test.txt
@@ -1,15 +1,11 @@
+0
1
2
3
4
5
-6
-7
-8
+6,7,8
9
10
11
12
-13
-14
-15
`
tests := []struct {
name string
params DiffCutParams
expCutHeader string
expCut []string
expError error
}{
{
name: "at-'+6,7,8':new",
params: DiffCutParams{
LineStart: 7, LineStartNew: true,
LineEnd: 7, LineEndNew: true,
BeforeLines: 0, AfterLines: 0,
LineLimit: 1000,
},
expCutHeader: "@@ -6,3 +7 @@",
expCut: []string{"-6", "-7", "-8", "+6,7,8"},
expError: nil,
},
{
name: "at-'+6,7,8':new-with-lines-around",
params: DiffCutParams{
LineStart: 7, LineStartNew: true,
LineEnd: 7, LineEndNew: true,
BeforeLines: 1, AfterLines: 2,
LineLimit: 1000,
},
expCutHeader: "@@ -5,6 +6,4 @@",
expCut: []string{" 5", "-6", "-7", "-8", "+6,7,8", " 9", " 10"},
expError: nil,
},
{
name: "at-'+0':new-with-lines-around",
params: DiffCutParams{
LineStart: 1, LineStartNew: true,
LineEnd: 1, LineEndNew: true,
BeforeLines: 3, AfterLines: 3,
LineLimit: 1000,
},
expCutHeader: "@@ -1,3 +1,4 @@",
expCut: []string{"+0", " 1", " 2", " 3"},
expError: nil,
},
{
name: "at-'-13':one-with-lines-around",
params: DiffCutParams{
LineStart: 13, LineStartNew: false,
LineEnd: 13, LineEndNew: false,
BeforeLines: 1, AfterLines: 1,
LineLimit: 1000,
},
expCutHeader: "@@ -12,3 +11 @@",
expCut: []string{" 12", "-13", "-14"},
expError: nil,
},
{
name: "at-'-13':mixed",
params: DiffCutParams{
LineStart: 7, LineStartNew: false,
LineEnd: 7, LineEndNew: true,
BeforeLines: 0, AfterLines: 0,
LineLimit: 0,
},
expCutHeader: "@@ -7,2 +7 @@",
expCut: []string{"-7", "-8", "+6,7,8"},
expError: nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
hunkHeader, linesHunk, err := DiffCut(
strings.NewReader(input),
test.params,
)
//nolint:errorlint // this error will not be wrapped
if want, got := test.expError, err; want != got {
t.Errorf("error mismatch: want=%v got=%v", want, got)
return
}
if err != nil {
return
}
if test.params.LineStartNew && test.params.LineStart != hunkHeader.NewLine {
t.Errorf("hunk line start mismatch: want=%d got=%d", test.params.LineStart, hunkHeader.NewLine)
}
if !test.params.LineStartNew && test.params.LineStart != hunkHeader.OldLine {
t.Errorf("hunk line start mismatch: want=%d got=%d", test.params.LineStart, hunkHeader.OldLine)
}
if want, got := test.expCutHeader, linesHunk.String(); want != got {
t.Errorf("header mismatch: want=%s got=%s", want, got)
}
if want, got := test.expCut, linesHunk.Lines; !reflect.DeepEqual(want, got) {
t.Errorf("lines mismatch: want=%s got=%s", want, got)
}
})
}
}
func TestDiffCutNoEOLInOld(t *testing.T) {
const input = `diff --git a/test.txt b/test.txt
index 541cb64f..047d7ee2 100644
--- a/test.txt
+++ b/test.txt
@@ -1 +1,4 @@
-test
\ No newline at end of file
+123
+456
+789
`
hh, h, err := DiffCut(
strings.NewReader(input),
DiffCutParams{
LineStart: 3,
LineStartNew: true,
LineEnd: 3,
LineEndNew: true,
BeforeLines: 1,
AfterLines: 1,
LineLimit: 100,
},
)
if err != nil {
t.Errorf("got error: %v", err)
return
}
expectedHH := HunkHeader{OldLine: 2, OldSpan: 0, NewLine: 3, NewSpan: 1}
if expectedHH != hh {
t.Errorf("expected hunk header: %+v, but got: %+v", expectedHH, hh)
}
expectedHunkLines := Hunk{
HunkHeader: HunkHeader{OldLine: 2, OldSpan: 0, NewLine: 2, NewSpan: 2},
Lines: []string{"+456", "+789"},
}
if !reflect.DeepEqual(expectedHunkLines, h) {
t.Errorf("expected hunk header: %+v, but got: %+v", expectedHunkLines, h)
}
}
func TestDiffCutNoEOLInNew(t *testing.T) {
const input = `diff --git a/test.txt b/test.txt
index af7864ba..541cb64f 100644
--- a/test.txt
+++ b/test.txt
@@ -1,3 +1 @@
-123
-456
-789
+test
\ No newline at end of file
`
hh, h, err := DiffCut(
strings.NewReader(input),
DiffCutParams{
LineStart: 1,
LineStartNew: true,
LineEnd: 1,
LineEndNew: true,
BeforeLines: 0,
AfterLines: 0,
LineLimit: 100,
},
)
if err != nil {
t.Errorf("got error: %v", err)
return
}
expectedHH := HunkHeader{OldLine: 1, OldSpan: 3, NewLine: 1, NewSpan: 1}
if expectedHH != hh {
t.Errorf("expected hunk header: %+v, but got: %+v", expectedHH, hh)
}
expectedHunkLines := Hunk{
HunkHeader: HunkHeader{OldLine: 1, OldSpan: 3, NewLine: 1, NewSpan: 1},
Lines: []string{"-123", "-456", "-789", "+test"},
}
if !reflect.DeepEqual(expectedHunkLines, h) {
t.Errorf("expected hunk header: %+v, but got: %+v", expectedHunkLines, h)
}
}
func TestBlobCut(t *testing.T) {
const input = `1
2
3
4
5
6`
tests := []struct {
name string
params DiffCutParams
expCutHeader CutHeader
expCut Cut
expError error
}{
{
name: "first 3 lines",
params: DiffCutParams{LineStart: 1, LineEnd: 3, LineLimit: 40},
expCutHeader: CutHeader{Line: 1, Span: 3},
expCut: Cut{CutHeader: CutHeader{Line: 1, Span: 3}, Lines: []string{"1", "2", "3"}},
},
{
name: "last 2 lines",
params: DiffCutParams{LineStart: 5, LineEnd: 6, LineLimit: 40},
expCutHeader: CutHeader{Line: 5, Span: 2},
expCut: Cut{CutHeader: CutHeader{Line: 5, Span: 2}, Lines: []string{"5", "6"}},
},
{
name: "first 3 lines with 1 more",
params: DiffCutParams{LineStart: 1, LineEnd: 3, BeforeLines: 1, AfterLines: 1, LineLimit: 40},
expCutHeader: CutHeader{Line: 1, Span: 3},
expCut: Cut{CutHeader: CutHeader{Line: 1, Span: 4}, Lines: []string{"1", "2", "3", "4"}},
},
{
name: "last 2 lines with 2 more",
params: DiffCutParams{LineStart: 5, LineEnd: 6, BeforeLines: 2, AfterLines: 2, LineLimit: 40},
expCutHeader: CutHeader{Line: 5, Span: 2},
expCut: Cut{CutHeader: CutHeader{Line: 3, Span: 4}, Lines: []string{"3", "4", "5", "6"}},
},
{
name: "mid range",
params: DiffCutParams{LineStart: 2, LineEnd: 4, BeforeLines: 100, AfterLines: 100, LineLimit: 40},
expCutHeader: CutHeader{Line: 2, Span: 3},
expCut: Cut{CutHeader: CutHeader{Line: 1, Span: 6}, Lines: []string{"1", "2", "3", "4", "5", "6"}},
},
{
name: "out of range 1",
params: DiffCutParams{LineStart: 15, LineEnd: 17, LineLimit: 40},
expError: ErrHunkNotFound,
},
{
name: "out of range 2",
params: DiffCutParams{LineStart: 5, LineEnd: 7, BeforeLines: 1, AfterLines: 1, LineLimit: 40},
expError: ErrHunkNotFound,
},
{
name: "limited",
params: DiffCutParams{LineStart: 3, LineEnd: 4, BeforeLines: 1, AfterLines: 1, LineLimit: 3},
expCutHeader: CutHeader{Line: 3, Span: 2},
expCut: Cut{CutHeader: CutHeader{Line: 2, Span: 3}, Lines: []string{"2", "3", "4"}},
},
{
name: "unlimited",
params: DiffCutParams{LineStart: 1, LineEnd: 6, BeforeLines: 1, AfterLines: 1, LineLimit: 0},
expCutHeader: CutHeader{Line: 1, Span: 6},
expCut: Cut{CutHeader: CutHeader{Line: 1, Span: 6}, Lines: []string{"1", "2", "3", "4", "5", "6"}},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ch, c, err := BlobCut(strings.NewReader(input), test.params)
if err != test.expError { //nolint:errorlint // it's a unit test and no errors are wrapped
t.Errorf("test failed with error: %s", err.Error())
return
}
if want, got := test.expCutHeader, ch; want != got {
t.Errorf("cut header: want=%+v got: %+v", want, got)
}
if want, got := test.expCut, c; !reflect.DeepEqual(want, got) {
t.Errorf("cut: want=%+v got: %+v", want, got)
}
})
}
}
func TestStrCircBuf(t *testing.T) {
tests := []struct {
name string
cap int
feed []string
exp []string
}{
{name: "empty", cap: 10, feed: nil, exp: []string{}},
{name: "zero-cap", cap: 0, feed: []string{"A", "B"}, exp: []string{}},
{name: "one", cap: 5, feed: []string{"A"}, exp: []string{"A"}},
{name: "two", cap: 3, feed: []string{"A", "B"}, exp: []string{"A", "B"}},
{name: "cap", cap: 3, feed: []string{"A", "B", "C"}, exp: []string{"A", "B", "C"}},
{name: "cap+1", cap: 3, feed: []string{"A", "B", "C", "D"}, exp: []string{"B", "C", "D"}},
{name: "cap+2", cap: 3, feed: []string{"A", "B", "C", "D", "E"}, exp: []string{"C", "D", "E"}},
{name: "cap*2+1", cap: 2, feed: []string{"A", "B", "C", "D", "E"}, exp: []string{"D", "E"}},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
b := newStrCircBuf(test.cap)
for _, s := range test.feed {
b.push(s)
}
if want, got := test.exp, b.lines(); !reflect.DeepEqual(want, got) {
t.Errorf("want=%v, got=%v", want, got)
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/diff_cut.go | git/parser/diff_cut.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"bufio"
"errors"
"fmt"
"io"
"unicode/utf8"
)
type DiffFileHeader struct {
OldFileName string
NewFileName string
Extensions map[string]string
}
type DiffCutParams struct {
LineStart int
LineStartNew bool
LineEnd int
LineEndNew bool
BeforeLines int
AfterLines int
LineLimit int
}
// DiffCut parses git diff output that should consist of a single hunk
// (usually generated with large value passed to the "--unified" parameter)
// and returns lines specified with the parameters.
//
//nolint:funlen,gocognit,nestif,gocognit,gocyclo,cyclop // it's actually very readable
func DiffCut(r io.Reader, params DiffCutParams) (HunkHeader, Hunk, error) {
scanner := bufio.NewScanner(r)
var err error
var hunkHeader HunkHeader
if _, err = scanFileHeader(scanner); err != nil {
return HunkHeader{}, Hunk{}, err
}
if hunkHeader, err = scanHunkHeader(scanner); err != nil {
return HunkHeader{}, Hunk{}, err
}
currentOldLine := hunkHeader.OldLine
currentNewLine := hunkHeader.NewLine
var inCut bool
var diffCutHeader HunkHeader
var diffCut []string
linesBeforeBuf := newStrCircBuf(params.BeforeLines)
for (!params.LineEndNew || currentNewLine <= params.LineEnd) &&
(params.LineEndNew || currentOldLine <= params.LineEnd) {
var line string
var action diffAction
line, action, err = scanHunkLine(scanner)
if err != nil {
return HunkHeader{}, Hunk{}, err
}
if line == "" {
err = io.EOF
break
}
if params.LineStartNew && currentNewLine < params.LineStart ||
!params.LineStartNew && currentOldLine < params.LineStart {
// not yet in the requested line range
linesBeforeBuf.push(line)
} else {
if !inCut {
diffCutHeader.NewLine = currentNewLine
diffCutHeader.OldLine = currentOldLine
}
inCut = true
if action != actionRemoved {
diffCutHeader.NewSpan++
}
if action != actionAdded {
diffCutHeader.OldSpan++
}
diffCut = append(diffCut, line)
if params.LineLimit > 0 && len(diffCut) >= params.LineLimit {
break // safety break
}
}
// increment the line numbers
if action != actionRemoved {
currentNewLine++
}
if action != actionAdded {
currentOldLine++
}
}
if !inCut {
return HunkHeader{}, Hunk{}, ErrHunkNotFound
}
var (
linesBefore []string
linesAfter []string
)
linesBefore = linesBeforeBuf.lines()
if !errors.Is(err, io.EOF) {
for i := 0; i < params.AfterLines; i++ {
line, _, err := scanHunkLine(scanner)
if err != nil {
return HunkHeader{}, Hunk{}, err
}
if line == "" {
break
}
linesAfter = append(linesAfter, line)
}
}
diffCutHeaderLines := diffCutHeader
for _, s := range linesBefore {
action := diffAction(s[0])
if action != actionRemoved {
diffCutHeaderLines.NewLine--
diffCutHeaderLines.NewSpan++
}
if action != actionAdded {
diffCutHeaderLines.OldLine--
diffCutHeaderLines.OldSpan++
}
}
for _, s := range linesAfter {
action := diffAction(s[0])
if action != actionRemoved {
diffCutHeaderLines.NewSpan++
}
if action != actionAdded {
diffCutHeaderLines.OldSpan++
}
}
return diffCutHeader, Hunk{
HunkHeader: diffCutHeaderLines,
Lines: concat(linesBefore, diffCut, linesAfter),
}, nil
}
// scanFileHeader keeps reading lines until file header line is read.
func scanFileHeader(scan *bufio.Scanner) (DiffFileHeader, error) {
for scan.Scan() {
line := scan.Text()
if h, ok := ParseDiffFileHeader(line); ok {
return h, nil
}
}
if err := scan.Err(); err != nil {
return DiffFileHeader{}, err
}
return DiffFileHeader{}, ErrHunkNotFound
}
// scanHunkHeader keeps reading lines until hunk header line is read.
func scanHunkHeader(scan *bufio.Scanner) (HunkHeader, error) {
for scan.Scan() {
line := scan.Text()
if h, ok := ParseDiffHunkHeader(line); ok {
return h, nil
}
}
if err := scan.Err(); err != nil {
return HunkHeader{}, err
}
return HunkHeader{}, ErrHunkNotFound
}
type diffAction byte
const (
actionUnchanged diffAction = ' '
actionRemoved diffAction = '-'
actionAdded diffAction = '+'
)
func scanHunkLine(scan *bufio.Scanner) (line string, action diffAction, err error) {
again:
action = actionUnchanged
if !scan.Scan() {
err = scan.Err()
return
}
line = scan.Text()
if line == "" {
err = ErrHunkNotFound // should not happen: empty line in diff output
return
}
action = diffAction(line[0])
if action == '\\' { // handle the "\ No newline at end of file" line
goto again
}
if action != actionRemoved && action != actionAdded && action != actionUnchanged {
// treat this as the end of hunk
line = ""
action = actionUnchanged
return
}
return
}
// BlobCut parses raw file and returns lines specified with the parameter.
func BlobCut(r io.Reader, params DiffCutParams) (CutHeader, Cut, error) {
scanner := bufio.NewScanner(r)
var (
err error
lineNumber int
inCut bool
cutStart, cutSpan int
cutLines []string
)
extStart := params.LineStart - params.BeforeLines
extEnd := params.LineEnd + params.AfterLines
linesNeeded := params.LineEnd - params.LineStart + 1
for {
if !scanner.Scan() {
err = scanner.Err()
break
}
lineNumber++
line := scanner.Text()
if !utf8.ValidString(line) {
return CutHeader{}, Cut{}, ErrBinaryFile
}
if lineNumber > extEnd {
break // exceeded the requested line range
}
if lineNumber < extStart {
// not yet in the requested line range
continue
}
if !inCut {
cutStart = lineNumber
inCut = true
}
cutLines = append(cutLines, line)
cutSpan++
if lineNumber >= params.LineStart && lineNumber <= params.LineEnd {
linesNeeded--
}
if params.LineLimit > 0 && len(cutLines) >= params.LineLimit {
break
}
}
if errors.Is(err, bufio.ErrTooLong) {
// By default, the max token size is 65536 (bufio.MaxScanTokenSize).
// If the file contains a line that is longer than this we treat it as a binary file.
return CutHeader{}, Cut{}, ErrBinaryFile
}
if err != nil && !errors.Is(err, io.EOF) {
return CutHeader{}, Cut{}, fmt.Errorf("failed to parse blob cut: %w", err)
}
if !inCut || linesNeeded > 0 {
return CutHeader{}, Cut{}, ErrHunkNotFound
}
// the cut header is hunk-like header (with Line and Span) that describes the requested lines exactly
ch := CutHeader{Line: params.LineStart, Span: params.LineEnd - params.LineStart + 1}
// the cut includes the requested lines and few more lines specified with the BeforeLines and AfterLines.
c := Cut{CutHeader: CutHeader{Line: cutStart, Span: cutSpan}, Lines: cutLines}
return ch, c, nil
}
func LimitLineLen(lines *[]string, maxLen int) {
outer:
for idxLine, line := range *lines {
var l int
for idxRune := range line {
l++
if l > maxLen {
(*lines)[idxLine] = line[:idxRune] + "…" // append the ellipsis to indicate that the line was trimmed.
continue outer
}
}
}
}
type strCircBuf struct {
head int
entries []string
}
func newStrCircBuf(size int) strCircBuf {
return strCircBuf{
head: -1,
entries: make([]string, 0, size),
}
}
func (b *strCircBuf) push(s string) {
n := cap(b.entries)
if n == 0 {
return
}
b.head++
if len(b.entries) < n {
b.entries = append(b.entries, s)
return
}
if b.head >= n {
b.head = 0
}
b.entries[b.head] = s
}
func (b *strCircBuf) lines() []string {
n := cap(b.entries)
if len(b.entries) < n {
return b.entries
}
res := make([]string, n)
for i := range n {
idx := (b.head + 1 + i) % n
res[i] = b.entries[idx]
}
return res
}
func concat[T any](a ...[]T) []T {
var n int
for _, m := range a {
n += len(m)
}
res := make([]T, n)
n = 0
for _, m := range a {
copy(res[n:], m)
n += len(m)
}
return res
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/raw_header_test.go | git/parser/raw_header_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"testing"
"time"
"github.com/harness/gitness/types"
)
func TestObjectHeaderIdentity(t *testing.T) {
tzBelgrade, _ := time.LoadLocation("Europe/Belgrade")
tzCalifornia, _ := time.LoadLocation("America/Los_Angeles")
tests := []struct {
name string
input string
expected types.Signature
}{
{
name: "test1",
input: "Vincent Willem van Gogh <van.gogh@harness.io> 1748779200 +0200",
expected: types.Signature{
Identity: types.Identity{Name: "Vincent Willem van Gogh", Email: "van.gogh@harness.io"},
When: time.Date(2025, time.June, 1, 14, 0, 0, 0, tzBelgrade),
},
},
{
name: "test2",
input: "徳川家康 <tokugawa@harness.io> 1748779200 -0700",
expected: types.Signature{
Identity: types.Identity{Name: "徳川家康", Email: "tokugawa@harness.io"},
When: time.Date(2025, time.June, 1, 5, 0, 0, 0, tzCalifornia),
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
name, email, when, err := ObjectHeaderIdentity(test.input)
if err != nil {
t.Error(err)
return
}
if test.expected.Identity.Name != name {
t.Errorf("name mismatch - expected: %v, got: %v", test.expected.Identity.Name, name)
}
if test.expected.Identity.Email != email {
t.Errorf("email mismatch - expected: %v, got: %v", test.expected.Identity.Email, email)
}
if !test.expected.When.Equal(when) {
t.Errorf("timestamp mismatch - expected: %s, got: %s",
test.expected.When.Format(time.RFC3339), when.Format(time.RFC3339))
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/raw_object_test.go | git/parser/raw_object_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"context"
"strings"
"testing"
"time"
"github.com/harness/gitness/app/services/publickey/keyssh"
"github.com/harness/gitness/git/sha"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/google/go-cmp/cmp"
"golang.org/x/crypto/ssh"
)
func TestObject(t *testing.T) {
tests := []struct {
name string
data string
want ObjectRaw
}{
{
name: "empty",
data: "",
want: ObjectRaw{
Headers: []ObjectHeader{},
Message: "",
},
},
{
name: "no_header",
data: "\nline1\nline2\n",
want: ObjectRaw{
Headers: []ObjectHeader{},
Message: "line1\nline2\n",
},
},
{
name: "no_body",
data: "header 1\nheader 2\n",
want: ObjectRaw{
Headers: []ObjectHeader{
{Type: "header", Value: "1\n"},
{Type: "header", Value: "2\n"},
},
Message: "",
},
},
{
name: "dummy_content",
data: "header 1\nheader 2\n\nblah blah\nblah",
want: ObjectRaw{
Headers: []ObjectHeader{
{Type: "header", Value: "1\n"},
{Type: "header", Value: "2\n"},
},
Message: "blah blah\nblah",
},
},
{
name: "dummy_content_multiline_header",
data: "header-simple 1\nheader-multiline line1\n line2\nheader-three blah\n\nblah blah\nblah",
want: ObjectRaw{
Headers: []ObjectHeader{
{Type: "header-simple", Value: "1\n"},
{Type: "header-multiline", Value: "line1\nline2\n"},
{Type: "header-three", Value: "blah\n"},
},
Message: "blah blah\nblah",
},
},
{
name: "simple_commit",
data: `tree a32348a67ba786383cedddccd79944992e1656b9
parent 286d9081dfddd0b95e43f98f32984b782678fc43
author Marko Gaćeša <marko.gacesa@harness.io> 1748009627 +0200
committer Marko Gaćeša <marko.gacesa@harness.io> 1748012917 +0200
Test commit
`,
want: ObjectRaw{
Headers: []ObjectHeader{
{Type: "tree", Value: "a32348a67ba786383cedddccd79944992e1656b9\n"},
{Type: "parent", Value: "286d9081dfddd0b95e43f98f32984b782678fc43\n"},
{Type: "author", Value: "Marko Gaćeša <marko.gacesa@harness.io> 1748009627 +0200\n"},
{Type: "committer", Value: "Marko Gaćeša <marko.gacesa@harness.io> 1748012917 +0200\n"},
},
Message: "Test commit\n",
SignedContent: nil,
Signature: nil,
SignatureType: "",
},
},
{
name: "signed_commit",
data: `tree 1e6502c1add2beb75875d261ca28abdf6e3d9091
parent a74b6a06bcf7f0d7b902af492826c20f9835a932
author Marko Gaćeša <marko.gacesa@harness.io> 1749221807 +0200
committer Marko Gaćeša <marko.gacesa@harness.io> 1749221807 +0200
gpgsig -----BEGIN SSH SIGNATURE-----
U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgEM1i8vha2gQ/ZXHinPejh0hS4C
x8VV1M2uwW6tglOswAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
AAAAQDJwTh2XHcewg3MXY8hxnH1WuSAjuQPzcjaoX0Q1x923k4y2Y2hXd/cN6l+PdGo71B
8+HfQ6jFa7/UU4cZu4QAc=
-----END SSH SIGNATURE-----
this is a commit message
`,
want: ObjectRaw{
Headers: []ObjectHeader{
{Type: "tree", Value: "1e6502c1add2beb75875d261ca28abdf6e3d9091\n"},
{Type: "parent", Value: "a74b6a06bcf7f0d7b902af492826c20f9835a932\n"},
{Type: "author", Value: "Marko Gaćeša <marko.gacesa@harness.io> 1749221807 +0200\n"},
{Type: "committer", Value: "Marko Gaćeša <marko.gacesa@harness.io> 1749221807 +0200\n"},
},
Message: "this is a commit message\n",
SignedContent: []byte(`tree 1e6502c1add2beb75875d261ca28abdf6e3d9091
parent a74b6a06bcf7f0d7b902af492826c20f9835a932
author Marko Gaćeša <marko.gacesa@harness.io> 1749221807 +0200
committer Marko Gaćeša <marko.gacesa@harness.io> 1749221807 +0200
this is a commit message
`),
Signature: []byte(`-----BEGIN SSH SIGNATURE-----
U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgEM1i8vha2gQ/ZXHinPejh0hS4C
x8VV1M2uwW6tglOswAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
AAAAQDJwTh2XHcewg3MXY8hxnH1WuSAjuQPzcjaoX0Q1x923k4y2Y2hXd/cN6l+PdGo71B
8+HfQ6jFa7/UU4cZu4QAc=
-----END SSH SIGNATURE-----
`),
SignatureType: "SSH SIGNATURE",
},
},
{
name: "signed_tag",
data: `object 7a56ee7136c7d4882f88db68c0e629b81a47bfc9
type commit
tag test
tagger Marko Gaćeša <marko.gacesa@harness.io> 1749035203 +0200
This is a test tag
-----BEGIN SSH SIGNATURE-----
U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgEM1i8vha2gQ/ZXHinPejh0hS4C
x8VV1M2uwW6tglOswAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
AAAAQDzfgUo2uoz/VCuv74QnweB16XS6FGmaDkefMcVpYJdz88WRG99yhmYC0ca6QYiaj4
ttNpubwUBQRPTo8z5Aows=
-----END SSH SIGNATURE-----
`,
want: ObjectRaw{
Headers: []ObjectHeader{
{Type: "object", Value: "7a56ee7136c7d4882f88db68c0e629b81a47bfc9\n"},
{Type: "type", Value: "commit\n"},
{Type: "tag", Value: "test\n"},
{Type: "tagger", Value: "Marko Gaćeša <marko.gacesa@harness.io> 1749035203 +0200\n"},
},
Message: "This is a test tag\n",
SignedContent: []byte(`object 7a56ee7136c7d4882f88db68c0e629b81a47bfc9
type commit
tag test
tagger Marko Gaćeša <marko.gacesa@harness.io> 1749035203 +0200
This is a test tag
`),
Signature: []byte(`-----BEGIN SSH SIGNATURE-----
U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgEM1i8vha2gQ/ZXHinPejh0hS4C
x8VV1M2uwW6tglOswAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
AAAAQDzfgUo2uoz/VCuv74QnweB16XS6FGmaDkefMcVpYJdz88WRG99yhmYC0ca6QYiaj4
ttNpubwUBQRPTo8z5Aows=
-----END SSH SIGNATURE-----
`),
SignatureType: "SSH SIGNATURE",
},
},
{
name: "merge_commit",
data: `tree bf7ecca7c6741453e16a0a92be5d9ccd779abcfa
parent 04617da8f3215c84ae4af39b8d734c3df2247347
parent 7077c29016c1be5465678c9ba25983937040dcb2
author Marko Gaćeša <marko.gacesa@harness.io> 1749219134 +0200
committer Marko Gaćeša <marko.gacesa@harness.io> 1749219134 +0200
mergetag object 7077c29016c1be5465678c9ba25983937040dcb2
type commit
tag v1.0.0
tagger Marko Gaćeša <marko.gacesa@harness.io> 1749218976 +0200
version 1
-----BEGIN SSH SIGNATURE-----
U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgEM1i8vha2gQ/ZXHinPejh0hS4C
x8VV1M2uwW6tglOswAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
AAAAQG0+9xHX8+7AnbkV//QH7ZvrDoUcm6GrqWkTwHmgSqBsMa7X8aXOtcwPwNJvXpOl8E
prGrumXZoEXzcZMrCG5A0=
-----END SSH SIGNATURE-----
gpgsig -----BEGIN SSH SIGNATURE-----
U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgEM1i8vha2gQ/ZXHinPejh0hS4C
x8VV1M2uwW6tglOswAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
AAAAQJpnz9Dsv6VleulZSzd3/PRGTJoPsUem0Waq4EbSRB7FDewjf11LRkkqNENiivT1pT
Rv18ZouJpO2LRIXdZpxAE=
-----END SSH SIGNATURE-----
Merge tag 'v1.0.0' into marko
version 1
`,
want: ObjectRaw{
Headers: []ObjectHeader{
{Type: "tree", Value: "bf7ecca7c6741453e16a0a92be5d9ccd779abcfa\n"},
{Type: "parent", Value: "04617da8f3215c84ae4af39b8d734c3df2247347\n"},
{Type: "parent", Value: "7077c29016c1be5465678c9ba25983937040dcb2\n"},
{Type: "author", Value: "Marko Gaćeša <marko.gacesa@harness.io> 1749219134 +0200\n"},
{Type: "committer", Value: "Marko Gaćeša <marko.gacesa@harness.io> 1749219134 +0200\n"},
{Type: "mergetag", Value: `object 7077c29016c1be5465678c9ba25983937040dcb2
type commit
tag v1.0.0
tagger Marko Gaćeša <marko.gacesa@harness.io> 1749218976 +0200
version 1
-----BEGIN SSH SIGNATURE-----
U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgEM1i8vha2gQ/ZXHinPejh0hS4C
x8VV1M2uwW6tglOswAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
AAAAQG0+9xHX8+7AnbkV//QH7ZvrDoUcm6GrqWkTwHmgSqBsMa7X8aXOtcwPwNJvXpOl8E
prGrumXZoEXzcZMrCG5A0=
-----END SSH SIGNATURE-----
`},
},
Message: "Merge tag 'v1.0.0' into marko\n\nversion 1\n",
SignedContent: []byte(`tree bf7ecca7c6741453e16a0a92be5d9ccd779abcfa
parent 04617da8f3215c84ae4af39b8d734c3df2247347
parent 7077c29016c1be5465678c9ba25983937040dcb2
author Marko Gaćeša <marko.gacesa@harness.io> 1749219134 +0200
committer Marko Gaćeša <marko.gacesa@harness.io> 1749219134 +0200
mergetag object 7077c29016c1be5465678c9ba25983937040dcb2
type commit
tag v1.0.0
tagger Marko Gaćeša <marko.gacesa@harness.io> 1749218976 +0200
version 1
-----BEGIN SSH SIGNATURE-----
U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgEM1i8vha2gQ/ZXHinPejh0hS4C
x8VV1M2uwW6tglOswAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
AAAAQG0+9xHX8+7AnbkV//QH7ZvrDoUcm6GrqWkTwHmgSqBsMa7X8aXOtcwPwNJvXpOl8E
prGrumXZoEXzcZMrCG5A0=
-----END SSH SIGNATURE-----
Merge tag 'v1.0.0' into marko
version 1
`),
Signature: []byte(`-----BEGIN SSH SIGNATURE-----
U1NIU0lHAAAAAQAAADMAAAALc3NoLWVkMjU1MTkAAAAgEM1i8vha2gQ/ZXHinPejh0hS4C
x8VV1M2uwW6tglOswAAAADZ2l0AAAAAAAAAAZzaGE1MTIAAABTAAAAC3NzaC1lZDI1NTE5
AAAAQJpnz9Dsv6VleulZSzd3/PRGTJoPsUem0Waq4EbSRB7FDewjf11LRkkqNENiivT1pT
Rv18ZouJpO2LRIXdZpxAE=
-----END SSH SIGNATURE-----
`),
SignatureType: "SSH SIGNATURE",
},
},
}
objectSHA := sha.Must("123456789")
person := types.Signature{
Identity: types.Identity{Name: "Michelangelo", Email: "michelangelo@harness.io"},
When: time.Now(),
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
object, err := Object([]byte(test.data))
if err != nil {
t.Errorf("failed: %s", err.Error())
return
}
if diff := cmp.Diff(object, test.want); diff != "" {
t.Errorf("failed:\n%s\n", diff)
}
if len(object.Signature) == 0 {
// skip testing signed content because the data doesn't contain a signature
return
}
ctx := context.Background()
var verify keyssh.Verify
signature := object.Signature
content := object.SignedContent
if status := verify.Parse(ctx, signature, objectSHA); status != "" {
t.Errorf("failed to extract key from the signature: %s", status)
}
// we use the public key directly from the signature
publicKey, _ := ssh.ParsePublicKey(verify.SignaturePublicKey())
pk := ssh.MarshalAuthorizedKey(publicKey)
if status := verify.Verify(ctx, pk, content, objectSHA, person); status != enum.GitSignatureGood {
t.Errorf("failed to verify the signature: %s", status)
}
})
}
}
func TestObjectNegative(t *testing.T) {
tests := []struct {
name string
data string
errStr string
}{
{
name: "header_without_EOL",
data: "header 1\nheader 2",
errStr: "header line must end with EOL character",
},
{
name: "header_without_value",
data: "header\n\nbody",
errStr: "malformed header",
},
{
name: "header_without_type",
data: " 1\n",
errStr: "malformed header",
},
{
name: "header_invalid_sig",
data: "gpgsig this is\n not a sig\n\nbody",
errStr: "invalid signature header",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
_, err := Object([]byte(test.data))
if err == nil {
t.Error("expected error but got none")
return
}
if want, got := test.errStr, err.Error(); !strings.HasPrefix(got, want) {
t.Errorf("want error message to start with %s, got %s", want, got)
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/commit_message.go | git/parser/commit_message.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"bufio"
"strings"
"unicode"
)
// CleanUpWhitespace removes extra whitespace for the multiline string passed as parameter.
// The intended usage is to clean up commit messages.
func CleanUpWhitespace(message string) string {
var messageStarted bool
var isLastLineEmpty bool
const eol = '\n'
builder := strings.Builder{}
scan := bufio.NewScanner(strings.NewReader(message))
for scan.Scan() {
line := strings.TrimRightFunc(scan.Text(), unicode.IsSpace)
if len(line) == 0 {
if messageStarted {
isLastLineEmpty = true
}
continue
}
if isLastLineEmpty {
builder.WriteByte(eol)
}
builder.WriteString(line)
builder.WriteByte(eol)
isLastLineEmpty = false
messageStarted = true
}
return builder.String()
}
// ExtractSubject extracts subject from a commit message. The result should be like output of
// the one line commit summary, like "git log --oneline" or "git log --format=%s".
func ExtractSubject(message string) string {
var messageStarted bool
builder := strings.Builder{}
scan := bufio.NewScanner(strings.NewReader(message))
for scan.Scan() {
line := strings.TrimSpace(scan.Text())
// process empty lines
if len(line) == 0 {
if messageStarted {
return builder.String()
}
continue
}
if messageStarted {
builder.WriteByte(' ')
}
builder.WriteString(line)
messageStarted = true
}
return builder.String()
}
// SplitMessage splits a commit message. Returns two strings:
// * subject (the one line commit summary, like "git log --oneline" or "git log --format=%s),
// * body only (like "git log --format=%b").
func SplitMessage(message string) (string, string) {
var state int
var lastLineEmpty bool
const (
stateInit = iota
stateSubject
stateSeparator
stateBody
)
const eol = '\n'
subjectBuilder := strings.Builder{}
bodyBuilder := strings.Builder{}
scan := bufio.NewScanner(strings.NewReader(message))
for scan.Scan() {
line := strings.TrimRightFunc(scan.Text(), unicode.IsSpace)
// process empty lines
if len(line) == 0 {
switch state {
case stateInit, stateSeparator:
// ignore all empty lines before the first line of the subject
case stateSubject:
state = stateSeparator
case stateBody:
lastLineEmpty = true
}
continue
}
switch state {
case stateInit:
state = stateSubject
subjectBuilder.WriteString(strings.TrimLeftFunc(line, unicode.IsSpace))
case stateSubject:
subjectBuilder.WriteByte(' ')
subjectBuilder.WriteString(strings.TrimLeftFunc(line, unicode.IsSpace))
case stateSeparator:
state = stateBody
bodyBuilder.WriteString(line)
bodyBuilder.WriteByte(eol)
lastLineEmpty = false
case stateBody:
if lastLineEmpty {
bodyBuilder.WriteByte(eol)
}
bodyBuilder.WriteString(line)
bodyBuilder.WriteByte(eol)
lastLineEmpty = false
}
}
return subjectBuilder.String(), bodyBuilder.String()
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/git/parser/hunk.go | git/parser/hunk.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"regexp"
"strconv"
"strings"
)
type Hunk struct {
HunkHeader
Lines []string
}
type HunkHeader struct {
OldLine int
OldSpan int
NewLine int
NewSpan int
Text string
}
type Cut struct {
CutHeader
Lines []string
}
type CutHeader struct {
Line int
Span int
}
var regExpHunkHeader = regexp.MustCompile(`^@@ -([0-9]+)(,([0-9]+))? \+([0-9]+)(,([0-9]+))? @@( (.+))?$`)
func (h *HunkHeader) IsZero() bool {
return h.OldLine == 0 && h.OldSpan == 0 && h.NewLine == 0 && h.NewSpan == 0
}
func (h *HunkHeader) IsValid() bool {
oldOk := h.OldLine == 0 && h.OldSpan == 0 || h.OldLine > 0 && h.OldSpan > 0
newOk := h.NewLine == 0 && h.NewSpan == 0 || h.NewLine > 0 && h.NewSpan > 0
return !h.IsZero() && oldOk && newOk
}
func (h *HunkHeader) String() string {
sb := strings.Builder{}
sb.WriteString("@@ -")
sb.WriteString(strconv.Itoa(h.OldLine))
if h.OldSpan != 1 {
sb.WriteByte(',')
sb.WriteString(strconv.Itoa(h.OldSpan))
}
sb.WriteString(" +")
sb.WriteString(strconv.Itoa(h.NewLine))
if h.NewSpan != 1 {
sb.WriteByte(',')
sb.WriteString(strconv.Itoa(h.NewSpan))
}
sb.WriteString(" @@")
if h.Text != "" {
sb.WriteByte(' ')
sb.WriteString(h.Text)
}
return sb.String()
}
func ParseDiffHunkHeader(line string) (HunkHeader, bool) {
groups := regExpHunkHeader.FindStringSubmatch(line)
if groups == nil {
return HunkHeader{}, false
}
oldLine, _ := strconv.Atoi(groups[1])
oldSpan := 1
if groups[3] != "" {
oldSpan, _ = strconv.Atoi(groups[3])
}
newLine, _ := strconv.Atoi(groups[4])
newSpan := 1
if groups[6] != "" {
newSpan, _ = strconv.Atoi(groups[6])
}
return HunkHeader{
OldLine: oldLine,
OldSpan: oldSpan,
NewLine: newLine,
NewSpan: newSpan,
Text: groups[8],
}, true
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/infraprovider/wire.go | infraprovider/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
events "github.com/harness/gitness/app/events/gitspaceinfra"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideDockerProvider,
ProvideFactory,
ProvideDockerClientFactory,
)
func ProvideDockerProvider(
config *DockerConfig,
dockerClientFactory *DockerClientFactory,
eventReporter *events.Reporter,
) *DockerProvider {
return NewDockerProvider(config, dockerClientFactory, eventReporter)
}
func ProvideFactory(dockerProvider *DockerProvider) Factory {
return NewFactory(dockerProvider)
}
func ProvideDockerClientFactory(config *DockerConfig) *DockerClientFactory {
return NewDockerClientFactory(config)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/infraprovider/infra_provider.go | infraprovider/infra_provider.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
type InfraProvider interface {
// Provision provisions infrastructure against a gitspace with the provided parameters.
Provision(
ctx context.Context,
gitspaceConfig types.GitspaceConfig,
agentPort int,
requiredGitspacePorts []types.GitspacePort,
inputParameters []types.InfraProviderParameter,
configMetadata map[string]any,
existingInfrastructure types.Infrastructure,
) error
// Find finds infrastructure provisioned against a gitspace.
Find(
ctx context.Context,
spaceID int64,
spacePath string,
gitspaceConfigIdentifier string,
inputParameters []types.InfraProviderParameter,
) (*types.Infrastructure, error)
FindInfraStatus(
ctx context.Context,
gitspaceConfigIdentifier string,
gitspaceInstanceIdentifier string,
inputParameters []types.InfraProviderParameter,
) (*enum.InfraStatus, error)
// Stop frees up the resources allocated against a gitspace, which can be freed.
Stop(
ctx context.Context,
infra types.Infrastructure,
gitspaceConfig types.GitspaceConfig,
configMetadata map[string]any,
) error
// CleanupInstanceResources cleans up resources exclusively allocated to a gitspace instance.
CleanupInstanceResources(ctx context.Context, infra types.Infrastructure) error
// Deprovision removes infrastructure provisioned against a gitspace.
// canDeleteUserData = false -> remove all resources except storage where user has stored it's data.
// canDeleteUserData = true -> remove all resources including storage.
Deprovision(
ctx context.Context,
infra types.Infrastructure,
gitspaceConfig types.GitspaceConfig,
canDeleteUserData bool,
configMetadata map[string]any,
params []types.InfraProviderParameter,
) error
// AvailableParams provides a schema to define the infrastructure.
AvailableParams() []types.InfraProviderParameterSchema
// UpdateParams updates input Parameters to add or modify given inputParameters.
UpdateParams(inputParameters []types.InfraProviderParameter,
configMetaData map[string]any) ([]types.InfraProviderParameter, error)
// ValidateParams validates the supplied params before defining the infrastructure resource .
ValidateParams(inputParameters []types.InfraProviderParameter) error
// TemplateParams provides a list of params which are of type template.
TemplateParams() []types.InfraProviderParameterSchema
// ProvisioningType specifies whether the provider will provision new infra resources or it will reuse existing.
ProvisioningType() enum.InfraProvisioningType
// UpdateConfig update infraProvider config to add or modify config.
UpdateConfig(infraProviderConfig *types.InfraProviderConfig) (*types.InfraProviderConfig, error)
// ValidateConfig checks if the provided infra config is as per the provider.
ValidateConfig(infraProviderConfig *types.InfraProviderConfig) error
// GenerateSetupYAML generates the setup file required for the infra provider in yaml format.
GenerateSetupYAML(infraProviderConfig *types.InfraProviderConfig) (string, error)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/infraprovider/infra_provider_factory.go | infraprovider/infra_provider_factory.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"fmt"
"github.com/harness/gitness/types/enum"
)
type Factory interface {
GetInfraProvider(providerType enum.InfraProviderType) (InfraProvider, error)
}
type factory struct {
providers map[enum.InfraProviderType]InfraProvider
}
func NewFactory(dockerProvider *DockerProvider) Factory {
providers := make(map[enum.InfraProviderType]InfraProvider)
providers[enum.InfraProviderTypeDocker] = dockerProvider
return &factory{providers: providers}
}
func (f *factory) GetInfraProvider(providerType enum.InfraProviderType) (InfraProvider, error) {
val := f.providers[providerType]
if val == nil {
return nil, fmt.Errorf("unknown infra provider type: %s", providerType)
}
return val, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/infraprovider/docker_provider.go | infraprovider/docker_provider.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"fmt"
"strings"
events "github.com/harness/gitness/app/events/gitspaceinfra"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client"
"github.com/rs/zerolog/log"
)
var _ InfraProvider = (*DockerProvider)(nil)
type DockerProvider struct {
config *DockerConfig
dockerClientFactory *DockerClientFactory
eventReporter *events.Reporter
}
func NewDockerProvider(
config *DockerConfig,
dockerClientFactory *DockerClientFactory,
eventReporter *events.Reporter,
) *DockerProvider {
return &DockerProvider{
config: config,
dockerClientFactory: dockerClientFactory,
eventReporter: eventReporter,
}
}
// Provision assumes a docker engine is already running on the Harness host machine and re-uses that as infra.
// It does not start docker engine. It creates a docker volume using the given gitspace config identifier.
func (d DockerProvider) Provision(
ctx context.Context,
gitspaceConfig types.GitspaceConfig,
_ int,
requiredGitspacePorts []types.GitspacePort,
inputParameters []types.InfraProviderParameter,
_ map[string]any,
_ types.Infrastructure,
) error {
dockerClient, err := d.dockerClientFactory.NewDockerClient(ctx, types.Infrastructure{
ProviderType: enum.InfraProviderTypeDocker,
InputParameters: inputParameters,
})
if err != nil {
return fmt.Errorf("error getting docker client from docker client factory: %w", err)
}
defer func() {
closingErr := dockerClient.Close()
if closingErr != nil {
log.Ctx(ctx).Warn().Err(closingErr).Msg("failed to close docker client")
}
}()
infrastructure, err := d.dockerHostInfo(ctx, dockerClient)
if err != nil {
return err
}
infrastructure.SpaceID = gitspaceConfig.SpaceID
infrastructure.SpacePath = gitspaceConfig.SpacePath
infrastructure.GitspaceConfigIdentifier = gitspaceConfig.Identifier
infrastructure.GitspaceInstanceIdentifier = gitspaceConfig.GitspaceInstance.Identifier
storageName, err := d.createNamedVolume(ctx, gitspaceConfig.SpacePath, gitspaceConfig.Identifier, dockerClient)
if err != nil {
return err
}
infrastructure.Storage = storageName
var portMappings = make(map[int]*types.PortMapping, len(requiredGitspacePorts))
for _, requiredPort := range requiredGitspacePorts {
portMapping := &types.PortMapping{
PublishedPort: 0,
ForwardedPort: 0,
}
portMappings[requiredPort.Port] = portMapping
}
infrastructure.GitspacePortMappings = portMappings
event := &events.GitspaceInfraEventPayload{
Infra: *infrastructure,
Type: enum.InfraEventProvision,
}
err = d.eventReporter.EmitGitspaceInfraEvent(ctx, events.GitspaceInfraEvent, event)
if err != nil {
return fmt.Errorf("error emitting gitspace infra event for provisioning: %w", err)
}
return nil
}
// Find fetches the infrastructure with the current state, the method has no side effects on the infra.
func (d DockerProvider) Find(
ctx context.Context,
spaceID int64,
spacePath string,
gitspaceConfigIdentifier string,
inputParameters []types.InfraProviderParameter,
) (*types.Infrastructure, error) {
dockerClient, err := d.dockerClientFactory.NewDockerClient(ctx, types.Infrastructure{
ProviderType: enum.InfraProviderTypeDocker,
InputParameters: inputParameters,
})
if err != nil {
return nil, fmt.Errorf("error getting docker client from docker client factory: %w", err)
}
defer func() {
closingErr := dockerClient.Close()
if closingErr != nil {
log.Ctx(ctx).Warn().Err(closingErr).Msg("failed to close docker client")
}
}()
infrastructure, err := d.dockerHostInfo(ctx, dockerClient)
if err != nil {
return nil, err
}
infrastructure.SpaceID = spaceID
infrastructure.SpacePath = spacePath
infrastructure.GitspaceConfigIdentifier = gitspaceConfigIdentifier
infrastructure.Storage = volumeName(spacePath, gitspaceConfigIdentifier)
return infrastructure, nil
}
func (d DockerProvider) FindInfraStatus(_ context.Context,
_ string,
_ string,
_ []types.InfraProviderParameter) (*enum.InfraStatus, error) {
return nil, nil //nolint:nilnil
}
// Stop is NOOP as this provider uses already running docker engine. It does not stop the docker engine.
func (d DockerProvider) Stop(
ctx context.Context,
infra types.Infrastructure,
_ types.GitspaceConfig,
_ map[string]any,
) error {
infra.Status = enum.InfraStatusDestroyed
event := &events.GitspaceInfraEventPayload{
Infra: infra,
Type: enum.InfraEventStop,
}
err := d.eventReporter.EmitGitspaceInfraEvent(ctx, events.GitspaceInfraEvent, event)
if err != nil {
return fmt.Errorf("error emitting gitspace infra event for stopping: %w", err)
}
return nil
}
// CleanupInstanceResources is NOOP as this provider does not utilise infra exclusively associated to a gitspace
// instance.
func (d DockerProvider) CleanupInstanceResources(ctx context.Context, infra types.Infrastructure) error {
infra.Status = enum.InfraStatusStopped
event := &events.GitspaceInfraEventPayload{
Infra: infra,
Type: enum.InfraEventCleanup,
}
err := d.eventReporter.EmitGitspaceInfraEvent(ctx, events.GitspaceInfraEvent, event)
if err != nil {
return fmt.Errorf("error emitting gitspace infra event for cleanup: %w", err)
}
return nil
}
// Deprovision is NOOP if canDeleteUserData = false
// Deprovision deletes the volume created by Provision method if canDeleteUserData = false.
// Deprovision does not stop the docker engine in any case.
func (d DockerProvider) Deprovision(
ctx context.Context,
infra types.Infrastructure,
_ types.GitspaceConfig,
canDeleteUserData bool,
_ map[string]any,
_ []types.InfraProviderParameter,
) error {
if canDeleteUserData {
err := d.deleteVolume(ctx, infra)
if err != nil {
return fmt.Errorf("couldn't delete volume for %s : %w", infra.Storage, err)
}
}
infra.Status = enum.InfraStatusDestroyed
event := &events.GitspaceInfraEventPayload{
Infra: infra,
Type: enum.InfraEventDeprovision,
}
err := d.eventReporter.EmitGitspaceInfraEvent(ctx, events.GitspaceInfraEvent, event)
if err != nil {
return fmt.Errorf("error emitting gitspace infra event for deprovisioning: %w", err)
}
return nil
}
func (d DockerProvider) deleteVolume(ctx context.Context, infra types.Infrastructure) error {
dockerClient, err := d.dockerClientFactory.NewDockerClient(ctx, types.Infrastructure{
ProviderType: enum.InfraProviderTypeDocker,
InputParameters: infra.InputParameters,
})
if err != nil {
return fmt.Errorf("error getting docker client from docker client factory: %w", err)
}
defer func() {
closingErr := dockerClient.Close()
if closingErr != nil {
log.Ctx(ctx).Warn().Err(closingErr).Msg("failed to close docker client")
}
}()
// check if volume is available
volumeList, err := dockerClient.VolumeList(ctx, volume.ListOptions{})
if err != nil {
return fmt.Errorf("couldn't list the volume: %w", err)
}
if !findVolume(infra.Storage, volumeList.Volumes) {
// given volume does not exist, return nil
return nil
}
err = dockerClient.VolumeRemove(ctx, infra.Storage, true)
if err != nil {
return fmt.Errorf("couldn't delete volume for %s : %w", infra.Storage, err)
}
return nil
}
func findVolume(target string, volumes []*volume.Volume) bool {
for _, vol := range volumes {
if vol == nil {
continue
}
if vol.Name == target {
return true
}
}
return false
}
// AvailableParams returns empty slice as no params are defined.
func (d DockerProvider) AvailableParams() []types.InfraProviderParameterSchema {
return []types.InfraProviderParameterSchema{}
}
// ValidateParams returns nil as no params are defined.
func (d DockerProvider) ValidateParams(_ []types.InfraProviderParameter) error {
return nil
}
func (d DockerProvider) UpdateParams(ip []types.InfraProviderParameter,
_ map[string]any) ([]types.InfraProviderParameter, error) {
return ip, nil
}
// TemplateParams returns nil as no template params are used.
func (d DockerProvider) TemplateParams() []types.InfraProviderParameterSchema {
return nil
}
// ProvisioningType returns existing as docker provider doesn't create new resources.
func (d DockerProvider) ProvisioningType() enum.InfraProvisioningType {
return enum.InfraProvisioningTypeExisting
}
func (d DockerProvider) UpdateConfig(
config *types.InfraProviderConfig,
) (*types.InfraProviderConfig, error) {
return config, nil
}
func (d DockerProvider) dockerHostInfo(
ctx context.Context,
dockerClient *client.Client,
) (*types.Infrastructure, error) {
info, err := dockerClient.Info(ctx)
if err != nil {
return nil, fmt.Errorf("unable to connect to docker engine: %w", err)
}
return &types.Infrastructure{
Identifier: info.ID,
ProviderType: enum.InfraProviderTypeDocker,
Status: enum.InfraStatusProvisioned,
GitspaceHost: d.config.DockerMachineHostName,
GitspaceScheme: "http",
}, nil
}
func (d DockerProvider) createNamedVolume(
ctx context.Context,
spacePath string,
resourceKey string,
dockerClient *client.Client,
) (string, error) {
name := volumeName(spacePath, resourceKey)
dockerVolume, err := dockerClient.VolumeCreate(ctx, volume.CreateOptions{
Name: name,
Driver: "local",
Labels: nil,
DriverOpts: nil})
if err != nil {
return "", fmt.Errorf(
"could not create name volume %s: %w", name, err)
}
log.Info().Msgf("created volume %s", dockerVolume.Name)
return dockerVolume.Name, nil
}
func volumeName(spacePath string, resourceKey string) string {
name := "gitspace-" + strings.ReplaceAll(spacePath, "/", "-") + "-" + resourceKey
return name
}
func (d DockerProvider) ValidateConfig(_ *types.InfraProviderConfig) error {
return nil
}
func (d DockerProvider) GenerateSetupYAML(_ *types.InfraProviderConfig) (string, error) {
return "", nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/infraprovider/docker_client_factory.go | infraprovider/docker_client_factory.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
import (
"context"
"fmt"
"net/http"
"path/filepath"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
"github.com/docker/docker/client"
"github.com/docker/go-connections/tlsconfig"
)
type DockerClientFactory struct {
config *DockerConfig
}
func NewDockerClientFactory(config *DockerConfig) *DockerClientFactory {
return &DockerClientFactory{config: config}
}
// NewDockerClient returns a new docker client created using the docker config and infra.
func (d *DockerClientFactory) NewDockerClient(
_ context.Context,
infra types.Infrastructure,
) (*client.Client, error) {
if infra.ProviderType != enum.InfraProviderTypeDocker {
return nil, fmt.Errorf("infra provider type %s not supported", infra.ProviderType)
}
dockerClient, err := d.getClient(infra.InputParameters)
if err != nil {
return nil, fmt.Errorf("error creating docker client using infra %+v: %w", infra, err)
}
return dockerClient, nil
}
func (d *DockerClientFactory) getClient(_ []types.InfraProviderParameter) (*client.Client, error) {
overrides, err := d.dockerOpts(d.config)
if err != nil {
return nil, fmt.Errorf("unable to create docker opts overrides: %w", err)
}
opts := append([]client.Opt{client.FromEnv}, overrides...)
dockerClient, err := client.NewClientWithOpts(opts...)
if err != nil {
return nil, fmt.Errorf("unable to create docker client: %w", err)
}
return dockerClient, nil
}
func (d *DockerClientFactory) getHTTPSClient() (*http.Client, error) {
options := tlsconfig.Options{
CAFile: filepath.Join(d.config.DockerCertPath, "ca.pem"),
CertFile: filepath.Join(d.config.DockerCertPath, "cert.pem"),
KeyFile: filepath.Join(d.config.DockerCertPath, "key.pem"),
InsecureSkipVerify: d.config.DockerTLSVerify == "",
}
tlsc, err := tlsconfig.Client(options)
if err != nil {
return nil, err
}
return &http.Client{
Transport: &http.Transport{TLSClientConfig: tlsc},
CheckRedirect: client.CheckRedirect,
}, nil
}
// dockerOpts returns back the options to be overridden from docker options set
// in the environment. If values are specified in gitness, they get preference.
func (d *DockerClientFactory) dockerOpts(config *DockerConfig) ([]client.Opt, error) {
var overrides []client.Opt
if config.DockerHost != "" {
overrides = append(overrides, client.WithHost(config.DockerHost))
}
if config.DockerAPIVersion != "" {
overrides = append(overrides, client.WithVersion(config.DockerAPIVersion))
}
if config.DockerCertPath != "" {
httpsClient, err := d.getHTTPSClient()
if err != nil {
return nil, fmt.Errorf("unable to create https client for docker client: %w", err)
}
overrides = append(overrides, client.WithHTTPClient(httpsClient))
}
return overrides, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/infraprovider/docker_config.go | infraprovider/docker_config.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infraprovider
type DockerConfig struct {
DockerHost string
DockerAPIVersion string
DockerCertPath string
DockerTLSVerify string
DockerMachineHostName string
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/livelog/memory_test.go | livelog/memory_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package livelog
import (
"context"
"errors"
"testing"
"time"
)
func TestNewMemory(t *testing.T) {
stream := NewMemory()
if stream == nil {
t.Fatal("expected non-nil stream")
}
// Verify it implements LogStream interface
var _ = stream
}
func TestStreamer_Create(t *testing.T) {
stream := NewMemory()
ctx := context.Background()
tests := []struct {
name string
id int64
}{
{
name: "positive id",
id: 1,
},
{
name: "zero id",
id: 0,
},
{
name: "negative id",
id: -1,
},
{
name: "large id",
id: 9223372036854775807,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
err := stream.Create(ctx, test.id)
if err != nil {
t.Errorf("unexpected error creating stream: %v", err)
}
})
}
}
func TestStreamer_Create_Multiple(t *testing.T) {
stream := NewMemory()
ctx := context.Background()
// Create multiple streams
ids := []int64{1, 2, 3, 100, -5}
for _, id := range ids {
err := stream.Create(ctx, id)
if err != nil {
t.Errorf("unexpected error creating stream %d: %v", id, err)
}
}
// Verify all streams exist by checking info
info := stream.Info(ctx)
if len(info.Streams) != len(ids) {
t.Errorf("expected %d streams, got %d", len(ids), len(info.Streams))
}
for _, id := range ids {
if _, exists := info.Streams[id]; !exists {
t.Errorf("stream %d not found in info", id)
}
}
}
func TestStreamer_Delete(t *testing.T) {
stream := NewMemory()
ctx := context.Background()
// Create a stream first
id := int64(1)
err := stream.Create(ctx, id)
if err != nil {
t.Fatalf("failed to create stream: %v", err)
}
// Delete the stream
err = stream.Delete(ctx, id)
if err != nil {
t.Errorf("unexpected error deleting stream: %v", err)
}
// Verify stream is deleted
info := stream.Info(ctx)
if _, exists := info.Streams[id]; exists {
t.Error("stream should have been deleted")
}
}
func TestStreamer_Delete_NotFound(t *testing.T) {
stream := NewMemory()
ctx := context.Background()
// Try to delete non-existent stream
err := stream.Delete(ctx, 999)
if !errors.Is(err, ErrStreamNotFound) {
t.Errorf("expected ErrStreamNotFound, got %v", err)
}
}
func TestStreamer_Write(t *testing.T) {
stream := NewMemory()
ctx := context.Background()
// Create a stream
id := int64(1)
err := stream.Create(ctx, id)
if err != nil {
t.Fatalf("failed to create stream: %v", err)
}
// Write to the stream
line := &Line{
Number: 1,
Message: "test message",
Timestamp: time.Now().Unix(),
}
err = stream.Write(ctx, id, line)
if err != nil {
t.Errorf("unexpected error writing to stream: %v", err)
}
}
func TestStreamer_Write_NotFound(t *testing.T) {
stream := NewMemory()
ctx := context.Background()
// Try to write to non-existent stream
line := &Line{
Number: 1,
Message: "test message",
Timestamp: time.Now().Unix(),
}
err := stream.Write(ctx, 999, line)
if !errors.Is(err, ErrStreamNotFound) {
t.Errorf("expected ErrStreamNotFound, got %v", err)
}
}
func TestStreamer_Tail(t *testing.T) {
stream := NewMemory()
ctx := context.Background()
// Create a stream
id := int64(1)
err := stream.Create(ctx, id)
if err != nil {
t.Fatalf("failed to create stream: %v", err)
}
// Start tailing
lines, errs := stream.Tail(ctx, id)
if lines == nil || errs == nil {
t.Fatal("expected non-nil channels")
}
// Write a line
line := &Line{
Number: 1,
Message: "test message",
Timestamp: time.Now().Unix(),
}
err = stream.Write(ctx, id, line)
if err != nil {
t.Fatalf("failed to write to stream: %v", err)
}
// Read the line
select {
case receivedLine := <-lines:
if receivedLine.Number != line.Number {
t.Errorf("expected number %d, got %d", line.Number, receivedLine.Number)
}
if receivedLine.Message != line.Message {
t.Errorf("expected message %q, got %q", line.Message, receivedLine.Message)
}
if receivedLine.Timestamp != line.Timestamp {
t.Errorf("expected timestamp %d, got %d", line.Timestamp, receivedLine.Timestamp)
}
case <-time.After(time.Second):
t.Error("timeout waiting for line")
}
}
func TestStreamer_Tail_NotFound(t *testing.T) {
stream := NewMemory()
ctx := context.Background()
// Try to tail non-existent stream
lines, errs := stream.Tail(ctx, 999)
if lines != nil || errs != nil {
t.Error("expected nil channels for non-existent stream")
}
}
func TestStreamer_Tail_History(t *testing.T) {
stream := NewMemory()
ctx := context.Background()
// Create a stream
id := int64(1)
err := stream.Create(ctx, id)
if err != nil {
t.Fatalf("failed to create stream: %v", err)
}
// Write some lines before tailing
lines := []*Line{
{Number: 1, Message: "line 1", Timestamp: 1},
{Number: 2, Message: "line 2", Timestamp: 2},
{Number: 3, Message: "line 3", Timestamp: 3},
}
for _, line := range lines {
err = stream.Write(ctx, id, line)
if err != nil {
t.Fatalf("failed to write line: %v", err)
}
}
// Start tailing
lineChan, _ := stream.Tail(ctx, id)
// Should receive all historical lines
for i, expectedLine := range lines {
select {
case receivedLine := <-lineChan:
if receivedLine.Number != expectedLine.Number {
t.Errorf("line %d: expected number %d, got %d", i, expectedLine.Number, receivedLine.Number)
}
if receivedLine.Message != expectedLine.Message {
t.Errorf("line %d: expected message %q, got %q", i, expectedLine.Message, receivedLine.Message)
}
case <-time.After(time.Second):
t.Errorf("timeout waiting for historical line %d", i)
}
}
}
func TestStreamer_Info(t *testing.T) {
stream := NewMemory()
ctx := context.Background()
// Initially should have no streams
info := stream.Info(ctx)
if len(info.Streams) != 0 {
t.Errorf("expected 0 streams, got %d", len(info.Streams))
}
// Create some streams
ids := []int64{1, 2, 3}
for _, id := range ids {
err := stream.Create(ctx, id)
if err != nil {
t.Fatalf("failed to create stream %d: %v", id, err)
}
}
// Check info again
info = stream.Info(ctx)
if len(info.Streams) != len(ids) {
t.Errorf("expected %d streams, got %d", len(ids), len(info.Streams))
}
for _, id := range ids {
if count, exists := info.Streams[id]; !exists {
t.Errorf("stream %d not found in info", id)
} else if count != 0 {
t.Errorf("expected 0 subscribers for stream %d, got %d", id, count)
}
}
}
func TestStreamer_ConcurrentAccess(t *testing.T) {
stream := NewMemory()
ctx := context.Background()
// Create a stream
id := int64(1)
err := stream.Create(ctx, id)
if err != nil {
t.Fatalf("failed to create stream: %v", err)
}
// Start multiple goroutines writing to the stream
done := make(chan bool)
numWriters := 10
linesPerWriter := 100
for i := range numWriters {
go func(writerID int) {
defer func() { done <- true }()
for j := range linesPerWriter {
line := &Line{
Number: writerID*linesPerWriter + j,
Message: "concurrent message",
Timestamp: time.Now().Unix(),
}
err := stream.Write(ctx, id, line)
if err != nil {
t.Errorf("writer %d: failed to write line %d: %v", writerID, j, err)
}
}
}(i)
}
// Wait for all writers to complete
for range numWriters {
<-done
}
// Verify stream still exists
info := stream.Info(ctx)
if _, exists := info.Streams[id]; !exists {
t.Error("stream should still exist after concurrent writes")
}
}
func TestErrStreamNotFound(t *testing.T) {
expectedMsg := "stream: not found"
if ErrStreamNotFound.Error() != expectedMsg {
t.Errorf("expected error message %q, got %q", expectedMsg, ErrStreamNotFound.Error())
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/livelog/wire.go | livelog/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package livelog
import (
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideLogStream,
)
// ProvideLogStream provides an implementation of a logs streamer.
func ProvideLogStream() LogStream {
return NewMemory()
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/livelog/stream.go | livelog/stream.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package livelog
import (
"context"
"sync"
)
// this is the amount of items that are stored in memory
// in the buffer. This should result in approximately 10kb
// of memory allocated per-stream and per-subscriber, not
// including any logdata stored in these structures.
const bufferSize = 5000
type stream struct {
sync.Mutex
hist []*Line
list map[*subscriber]struct{}
}
func newStream() *stream {
return &stream{
list: map[*subscriber]struct{}{},
}
}
func (s *stream) write(line *Line) error {
s.Lock()
s.hist = append(s.hist, line)
for l := range s.list {
if !l.closed {
l.publish(line)
}
}
// the history should not be unbounded. The history
// slice is capped and items are removed in a FIFO
// ordering when capacity is reached.
if size := len(s.hist); size >= bufferSize {
s.hist = s.hist[size-bufferSize:]
}
s.Unlock()
return nil
}
func (s *stream) subscribe(ctx context.Context) (<-chan *Line, <-chan error) {
sub := &subscriber{
handler: make(chan *Line, bufferSize),
closec: make(chan struct{}),
}
err := make(chan error)
s.Lock()
for _, line := range s.hist {
sub.publish(line)
}
s.list[sub] = struct{}{}
s.Unlock()
go func() {
defer close(err)
select {
case <-sub.closec:
case <-ctx.Done():
sub.close()
}
}()
return sub.handler, err
}
func (s *stream) close() error {
s.Lock()
defer s.Unlock()
for sub := range s.list {
delete(s.list, sub)
sub.close()
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/livelog/memory.go | livelog/memory.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package livelog
import (
"context"
"errors"
"sync"
)
// ErrStreamNotFound is returned when a stream is not registered with
// the streamer.
var ErrStreamNotFound = errors.New("stream: not found")
type streamer struct {
sync.Mutex
streams map[int64]*stream
}
// New returns a new in-memory log streamer.
func NewMemory() LogStream {
return &streamer{
streams: make(map[int64]*stream),
}
}
func (s *streamer) Create(_ context.Context, id int64) error {
s.Lock()
s.streams[id] = newStream()
s.Unlock()
return nil
}
func (s *streamer) Delete(_ context.Context, id int64) error {
s.Lock()
stream, ok := s.streams[id]
if ok {
delete(s.streams, id)
}
s.Unlock()
if !ok {
return ErrStreamNotFound
}
return stream.close()
}
func (s *streamer) Write(_ context.Context, id int64, line *Line) error {
s.Lock()
stream, ok := s.streams[id]
s.Unlock()
if !ok {
return ErrStreamNotFound
}
return stream.write(line)
}
func (s *streamer) Tail(ctx context.Context, id int64) (<-chan *Line, <-chan error) {
s.Lock()
stream, ok := s.streams[id]
s.Unlock()
if !ok {
return nil, nil
}
return stream.subscribe(ctx)
}
func (s *streamer) Info(_ context.Context) *LogStreamInfo {
s.Lock()
defer s.Unlock()
info := &LogStreamInfo{
Streams: map[int64]int{},
}
for id, stream := range s.streams {
stream.Lock()
info.Streams[id] = len(stream.list)
stream.Unlock()
}
return info
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/livelog/stream_test.go | livelog/stream_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package livelog
import (
"context"
"testing"
"time"
)
func TestNewStream(t *testing.T) {
s := newStream()
if s == nil {
t.Fatal("expected non-nil stream")
}
if s.list == nil {
t.Error("expected non-nil subscriber list")
}
if len(s.list) != 0 {
t.Errorf("expected empty subscriber list, got %d subscribers", len(s.list))
}
if len(s.hist) != 0 {
t.Errorf("expected empty history, got %d lines", len(s.hist))
}
}
func TestStream_Write(t *testing.T) {
s := newStream()
tests := []struct {
name string
line *Line
}{
{
name: "basic line",
line: &Line{
Number: 1,
Message: "test message",
Timestamp: time.Now().Unix(),
},
},
{
name: "empty message",
line: &Line{
Number: 2,
Message: "",
Timestamp: time.Now().Unix(),
},
},
{
name: "zero timestamp",
line: &Line{
Number: 3,
Message: "zero timestamp",
Timestamp: 0,
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
err := s.write(test.line)
if err != nil {
t.Errorf("unexpected error writing line: %v", err)
}
})
}
// Verify all lines are in history
if len(s.hist) != len(tests) {
t.Errorf("expected %d lines in history, got %d", len(tests), len(s.hist))
}
}
func TestStream_Write_BufferLimit(t *testing.T) {
s := newStream()
// Write more lines than buffer size
numLines := bufferSize + 100
for i := range numLines {
line := &Line{
Number: i,
Message: "test message",
Timestamp: int64(i),
}
err := s.write(line)
if err != nil {
t.Errorf("unexpected error writing line %d: %v", i, err)
}
}
// History should be capped at buffer size
if len(s.hist) != bufferSize {
t.Errorf("expected history size %d, got %d", bufferSize, len(s.hist))
}
// Should contain the most recent lines
firstLine := s.hist[0]
expectedFirstNumber := numLines - bufferSize
if firstLine.Number != expectedFirstNumber {
t.Errorf("expected first line number %d, got %d", expectedFirstNumber, firstLine.Number)
}
lastLine := s.hist[len(s.hist)-1]
expectedLastNumber := numLines - 1
if lastLine.Number != expectedLastNumber {
t.Errorf("expected last line number %d, got %d", expectedLastNumber, lastLine.Number)
}
}
func TestStream_Subscribe(t *testing.T) {
s := newStream()
ctx := context.Background()
// Subscribe to empty stream
lineChan, errChan := s.subscribe(ctx)
if lineChan == nil {
t.Fatal("expected non-nil line channel")
}
if errChan == nil {
t.Fatal("expected non-nil error channel")
}
// Verify subscriber was added
if len(s.list) != 1 {
t.Errorf("expected 1 subscriber, got %d", len(s.list))
}
}
func TestStream_Subscribe_WithHistory(t *testing.T) {
s := newStream()
ctx := context.Background()
// Write some lines to history
historyLines := []*Line{
{Number: 1, Message: "line 1", Timestamp: 1},
{Number: 2, Message: "line 2", Timestamp: 2},
{Number: 3, Message: "line 3", Timestamp: 3},
}
for _, line := range historyLines {
err := s.write(line)
if err != nil {
t.Fatalf("failed to write line: %v", err)
}
}
// Subscribe and receive history
lineChan, _ := s.subscribe(ctx)
// Should receive all historical lines
for i, expectedLine := range historyLines {
select {
case receivedLine := <-lineChan:
if receivedLine.Number != expectedLine.Number {
t.Errorf("line %d: expected number %d, got %d", i, expectedLine.Number, receivedLine.Number)
}
if receivedLine.Message != expectedLine.Message {
t.Errorf("line %d: expected message %q, got %q", i, expectedLine.Message, receivedLine.Message)
}
case <-time.After(time.Second):
t.Errorf("timeout waiting for historical line %d", i)
}
}
}
func TestStream_Subscribe_NewLines(t *testing.T) {
s := newStream()
ctx := context.Background()
// Subscribe first
lineChan, _ := s.subscribe(ctx)
// Write new lines
newLines := []*Line{
{Number: 1, Message: "new line 1", Timestamp: 1},
{Number: 2, Message: "new line 2", Timestamp: 2},
}
for _, line := range newLines {
err := s.write(line)
if err != nil {
t.Fatalf("failed to write line: %v", err)
}
// Should receive the new line
select {
case receivedLine := <-lineChan:
if receivedLine.Number != line.Number {
t.Errorf("expected number %d, got %d", line.Number, receivedLine.Number)
}
if receivedLine.Message != line.Message {
t.Errorf("expected message %q, got %q", line.Message, receivedLine.Message)
}
case <-time.After(time.Second):
t.Error("timeout waiting for new line")
}
}
}
func TestStream_Subscribe_ContextCancellation(t *testing.T) {
s := newStream()
ctx, cancel := context.WithCancel(context.Background())
// Subscribe
lineChan, errChan := s.subscribe(ctx)
// Cancel context
cancel()
// Error channel should close
select {
case <-errChan:
// Expected - error channel should close
case <-time.After(time.Second):
t.Error("timeout waiting for error channel to close")
}
// Line channel should also be closed eventually
select {
case _, ok := <-lineChan:
if ok {
t.Error("expected line channel to be closed")
}
case <-time.After(time.Second):
t.Error("timeout waiting for line channel to close")
}
}
func TestStream_Close(t *testing.T) {
s := newStream()
ctx := context.Background()
// Add some subscribers
numSubscribers := 3
for range numSubscribers {
s.subscribe(ctx)
}
// Verify subscribers exist
if len(s.list) != numSubscribers {
t.Errorf("expected %d subscribers, got %d", numSubscribers, len(s.list))
}
// Close the stream
err := s.close()
if err != nil {
t.Errorf("unexpected error closing stream: %v", err)
}
// All subscribers should be removed
if len(s.list) != 0 {
t.Errorf("expected 0 subscribers after close, got %d", len(s.list))
}
}
func TestStream_MultipleSubscribers(t *testing.T) {
s := newStream()
ctx := context.Background()
// Create multiple subscribers
numSubscribers := 5
channels := make([]<-chan *Line, numSubscribers)
for i := range numSubscribers {
lineChan, _ := s.subscribe(ctx)
channels[i] = lineChan
}
// Write a line
line := &Line{
Number: 1,
Message: "broadcast message",
Timestamp: time.Now().Unix(),
}
err := s.write(line)
if err != nil {
t.Fatalf("failed to write line: %v", err)
}
// All subscribers should receive the line
for i, ch := range channels {
select {
case receivedLine := <-ch:
if receivedLine.Number != line.Number {
t.Errorf("subscriber %d: expected number %d, got %d", i, line.Number, receivedLine.Number)
}
if receivedLine.Message != line.Message {
t.Errorf("subscriber %d: expected message %q, got %q", i, line.Message, receivedLine.Message)
}
case <-time.After(time.Second):
t.Errorf("subscriber %d: timeout waiting for line", i)
}
}
}
func TestBufferSize(t *testing.T) {
expectedSize := 5000
if bufferSize != expectedSize {
t.Errorf("expected buffer size %d, got %d", expectedSize, bufferSize)
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/livelog/livelog.go | livelog/livelog.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package livelog
import "context"
// Line represents a line in the logs.
type Line struct {
Number int `json:"pos"`
Message string `json:"out"`
Timestamp int64 `json:"time"`
}
// LogStreamInfo provides internal stream information. This can
// be used to monitor the number of registered streams and
// subscribers.
type LogStreamInfo struct {
// Streams is a key-value pair where the key is the step
// identifier, and the value is the count of subscribers
// streaming the logs.
Streams map[int64]int `json:"streams"`
}
// LogStream manages a live stream of logs.
type LogStream interface {
// Create creates the log stream for the step ID.
Create(context.Context, int64) error
// Delete deletes the log stream for the step ID.
Delete(context.Context, int64) error
// Writes writes to the log stream.
Write(context.Context, int64, *Line) error
// Tail tails the log stream.
Tail(context.Context, int64) (<-chan *Line, <-chan error)
// Info returns internal stream information.
Info(context.Context) *LogStreamInfo
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/livelog/sub_test.go | livelog/sub_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package livelog
import (
"testing"
"time"
)
func TestSubscriber_Publish(t *testing.T) {
sub := &subscriber{
handler: make(chan *Line, bufferSize),
closec: make(chan struct{}),
closed: false,
}
line := &Line{
Number: 1,
Message: "test message",
Timestamp: time.Now().Unix(),
}
// Publish line
sub.publish(line)
// Should receive the line
select {
case receivedLine := <-sub.handler:
if receivedLine.Number != line.Number {
t.Errorf("expected number %d, got %d", line.Number, receivedLine.Number)
}
if receivedLine.Message != line.Message {
t.Errorf("expected message %q, got %q", line.Message, receivedLine.Message)
}
if receivedLine.Timestamp != line.Timestamp {
t.Errorf("expected timestamp %d, got %d", line.Timestamp, receivedLine.Timestamp)
}
case <-time.After(time.Second):
t.Error("timeout waiting for published line")
}
}
func TestSubscriber_Publish_Multiple(t *testing.T) {
sub := &subscriber{
handler: make(chan *Line, bufferSize),
closec: make(chan struct{}),
closed: false,
}
lines := []*Line{
{Number: 1, Message: "line 1", Timestamp: 1},
{Number: 2, Message: "line 2", Timestamp: 2},
{Number: 3, Message: "line 3", Timestamp: 3},
}
// Publish all lines
for _, line := range lines {
sub.publish(line)
}
// Should receive all lines in order
for i, expectedLine := range lines {
select {
case receivedLine := <-sub.handler:
if receivedLine.Number != expectedLine.Number {
t.Errorf("line %d: expected number %d, got %d", i, expectedLine.Number, receivedLine.Number)
}
if receivedLine.Message != expectedLine.Message {
t.Errorf("line %d: expected message %q, got %q", i, expectedLine.Message, receivedLine.Message)
}
case <-time.After(time.Second):
t.Errorf("timeout waiting for line %d", i)
}
}
}
func TestSubscriber_Publish_BufferFull(t *testing.T) {
// Create subscriber with small buffer for testing
sub := &subscriber{
handler: make(chan *Line, 2), // Small buffer
closec: make(chan struct{}),
closed: false,
}
// Fill the buffer
line1 := &Line{Number: 1, Message: "line 1", Timestamp: 1}
line2 := &Line{Number: 2, Message: "line 2", Timestamp: 2}
line3 := &Line{Number: 3, Message: "line 3", Timestamp: 3}
sub.publish(line1)
sub.publish(line2)
// Buffer should be full now, third publish should not block
// (it should be dropped due to default case in select)
sub.publish(line3)
// Should receive first two lines
receivedLine1 := <-sub.handler
if receivedLine1.Number != 1 {
t.Errorf("expected first line number 1, got %d", receivedLine1.Number)
}
receivedLine2 := <-sub.handler
if receivedLine2.Number != 2 {
t.Errorf("expected second line number 2, got %d", receivedLine2.Number)
}
// Channel should be empty now (third line was dropped)
select {
case <-sub.handler:
t.Error("unexpected line received (should have been dropped)")
default:
// Expected - no more lines
}
}
func TestSubscriber_Close(t *testing.T) {
sub := &subscriber{
handler: make(chan *Line, bufferSize),
closec: make(chan struct{}),
closed: false,
}
// Close the subscriber
sub.close()
// Should be marked as closed
if !sub.closed {
t.Error("subscriber should be marked as closed")
}
// Channels should be closed
select {
case <-sub.closec:
// Expected - close channel should be closed
default:
t.Error("close channel should be closed")
}
select {
case _, ok := <-sub.handler:
if ok {
t.Error("handler channel should be closed")
}
default:
t.Error("handler channel should be closed")
}
}
func TestSubscriber_Close_Multiple(t *testing.T) {
sub := &subscriber{
handler: make(chan *Line, bufferSize),
closec: make(chan struct{}),
closed: false,
}
// Close multiple times should not panic
sub.close()
sub.close()
sub.close()
// Should still be marked as closed
if !sub.closed {
t.Error("subscriber should be marked as closed")
}
}
func TestSubscriber_Publish_AfterClose(_ *testing.T) {
sub := &subscriber{
handler: make(chan *Line, bufferSize),
closec: make(chan struct{}),
closed: false,
}
// Close the subscriber
sub.close()
// Publishing after close should not panic (due to recover in publish)
line := &Line{Number: 1, Message: "test", Timestamp: 1}
sub.publish(line) // Should not panic
}
func TestSubscriber_Publish_ClosedChannel(_ *testing.T) {
sub := &subscriber{
handler: make(chan *Line, bufferSize),
closec: make(chan struct{}),
closed: false,
}
// Close the close channel manually to simulate the condition
close(sub.closec)
line := &Line{Number: 1, Message: "test", Timestamp: 1}
// Should not block or panic when closec is closed
sub.publish(line)
}
func TestSubscriber_InitialState(t *testing.T) {
sub := &subscriber{
handler: make(chan *Line, bufferSize),
closec: make(chan struct{}),
closed: false,
}
// Initial state checks
if sub.closed {
t.Error("subscriber should not be closed initially")
}
if sub.handler == nil {
t.Error("handler channel should not be nil")
}
if sub.closec == nil {
t.Error("close channel should not be nil")
}
// Channels should be open
select {
case <-sub.closec:
t.Error("close channel should not be closed initially")
default:
// Expected
}
}
func TestSubscriber_ConcurrentPublish(t *testing.T) {
sub := &subscriber{
handler: make(chan *Line, bufferSize),
closec: make(chan struct{}),
closed: false,
}
// Start multiple goroutines publishing concurrently
numGoroutines := 10
linesPerGoroutine := 100
done := make(chan bool, numGoroutines)
for i := range numGoroutines {
go func(id int) {
defer func() { done <- true }()
for j := range linesPerGoroutine {
line := &Line{
Number: id*linesPerGoroutine + j,
Message: "concurrent message",
Timestamp: int64(j),
}
sub.publish(line)
}
}(i)
}
// Wait for all goroutines to complete
for range numGoroutines {
<-done
}
// Drain the channel and count received lines
receivedCount := 0
for {
select {
case <-sub.handler:
receivedCount++
default:
goto done
}
}
done:
// Should have received some lines (may not be all due to buffer limits)
if receivedCount == 0 {
t.Error("should have received at least some lines")
}
// Should not have received more than total sent
totalSent := numGoroutines * linesPerGoroutine
if receivedCount > totalSent {
t.Errorf("received more lines (%d) than sent (%d)", receivedCount, totalSent)
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/livelog/livelog_test.go | livelog/livelog_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package livelog
import (
"reflect"
"testing"
)
func TestLine(t *testing.T) {
tests := []struct {
name string
line Line
expected Line
}{
{
name: "basic line",
line: Line{
Number: 1,
Message: "hello world",
Timestamp: 1234567890,
},
expected: Line{
Number: 1,
Message: "hello world",
Timestamp: 1234567890,
},
},
{
name: "empty message",
line: Line{
Number: 0,
Message: "",
Timestamp: 0,
},
expected: Line{
Number: 0,
Message: "",
Timestamp: 0,
},
},
{
name: "negative number",
line: Line{
Number: -1,
Message: "error message",
Timestamp: 9876543210,
},
expected: Line{
Number: -1,
Message: "error message",
Timestamp: 9876543210,
},
},
{
name: "large timestamp",
line: Line{
Number: 999999,
Message: "large timestamp",
Timestamp: 9223372036854775807, // max int64
},
expected: Line{
Number: 999999,
Message: "large timestamp",
Timestamp: 9223372036854775807,
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
line := test.line
if line.Number != test.expected.Number {
t.Errorf("expected number %d, got %d", test.expected.Number, line.Number)
}
if line.Message != test.expected.Message {
t.Errorf("expected message %q, got %q", test.expected.Message, line.Message)
}
if line.Timestamp != test.expected.Timestamp {
t.Errorf("expected timestamp %d, got %d", test.expected.Timestamp, line.Timestamp)
}
})
}
}
func TestLogStreamInfo(t *testing.T) {
tests := []struct {
name string
info LogStreamInfo
expected LogStreamInfo
}{
{
name: "empty streams",
info: LogStreamInfo{
Streams: map[int64]int{},
},
expected: LogStreamInfo{
Streams: map[int64]int{},
},
},
{
name: "single stream",
info: LogStreamInfo{
Streams: map[int64]int{
1: 5,
},
},
expected: LogStreamInfo{
Streams: map[int64]int{
1: 5,
},
},
},
{
name: "multiple streams",
info: LogStreamInfo{
Streams: map[int64]int{
1: 3,
2: 7,
10: 1,
},
},
expected: LogStreamInfo{
Streams: map[int64]int{
1: 3,
2: 7,
10: 1,
},
},
},
{
name: "streams with zero subscribers",
info: LogStreamInfo{
Streams: map[int64]int{
1: 0,
2: 5,
3: 0,
},
},
expected: LogStreamInfo{
Streams: map[int64]int{
1: 0,
2: 5,
3: 0,
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
info := test.info
if !reflect.DeepEqual(info.Streams, test.expected.Streams) {
t.Errorf("expected streams %v, got %v", test.expected.Streams, info.Streams)
}
})
}
}
func TestLogStreamInfo_NilStreams(t *testing.T) {
info := LogStreamInfo{
Streams: nil,
}
// Should be able to access nil map without panic
if info.Streams != nil {
t.Error("expected nil streams")
}
}
func TestLine_JSONTags(t *testing.T) {
// Test that the struct has the expected JSON tags
lineType := reflect.TypeOf(Line{})
// Check Number field
numberField, found := lineType.FieldByName("Number")
if !found {
t.Fatal("Number field not found")
}
if tag := numberField.Tag.Get("json"); tag != "pos" {
t.Errorf("expected Number field to have json tag 'pos', got %q", tag)
}
// Check Message field
messageField, found := lineType.FieldByName("Message")
if !found {
t.Fatal("Message field not found")
}
if tag := messageField.Tag.Get("json"); tag != "out" {
t.Errorf("expected Message field to have json tag 'out', got %q", tag)
}
// Check Timestamp field
timestampField, found := lineType.FieldByName("Timestamp")
if !found {
t.Fatal("Timestamp field not found")
}
if tag := timestampField.Tag.Get("json"); tag != "time" {
t.Errorf("expected Timestamp field to have json tag 'time', got %q", tag)
}
}
func TestLogStreamInfo_JSONTags(t *testing.T) {
// Test that the struct has the expected JSON tags
infoType := reflect.TypeOf(LogStreamInfo{})
// Check Streams field
streamsField, found := infoType.FieldByName("Streams")
if !found {
t.Fatal("Streams field not found")
}
if tag := streamsField.Tag.Get("json"); tag != "streams" {
t.Errorf("expected Streams field to have json tag 'streams', got %q", tag)
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/livelog/sub.go | livelog/sub.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package livelog
import (
"sync"
"github.com/rs/zerolog/log"
)
type subscriber struct {
sync.Mutex
handler chan *Line
closec chan struct{}
closed bool
}
func (s *subscriber) publish(line *Line) {
defer func() {
r := recover()
if r != nil {
log.Debug().Msgf("publishing to closed subscriber")
}
}()
s.Lock()
defer s.Unlock()
select {
case <-s.closec:
case s.handler <- line:
default:
// lines are sent on a buffered channel. If there
// is a slow consumer that is not processing events,
// the buffered channel will fill and newer messages
// are ignored.
}
}
func (s *subscriber) close() {
s.Lock()
if !s.closed {
close(s.closec)
close(s.handler)
s.closed = true
}
s.Unlock()
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/cli_test.go | cli/cli_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cli
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetArguments(t *testing.T) {
tests := []struct {
name string
osArgs []string
expected []string
}{
{
name: "regular command with args",
osArgs: []string{"/path/to/gitness", "server", "start"},
expected: []string{"server", "start"},
},
{
name: "command with no args",
osArgs: []string{"/path/to/gitness"},
expected: []string{},
},
{
name: "command with single arg",
osArgs: []string{"/path/to/gitness", "version"},
expected: []string{"version"},
},
{
name: "command with multiple args",
osArgs: []string{"/path/to/gitness", "repo", "create", "myrepo"},
expected: []string{"repo", "create", "myrepo"},
},
{
name: "command with flags",
osArgs: []string{"/path/to/gitness", "server", "--port", "8080"},
expected: []string{"server", "--port", "8080"},
},
{
name: "command with mixed args and flags",
osArgs: []string{"/path/to/gitness", "repo", "create", "--private", "myrepo"},
expected: []string{"repo", "create", "--private", "myrepo"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Save original os.Args
originalArgs := os.Args
defer func() {
os.Args = originalArgs
}()
// Set test args
os.Args = tt.osArgs
// Call GetArguments
result := GetArguments()
// Verify result
assert.Equal(t, tt.expected, result, "arguments should match")
})
}
}
func TestGetArguments_PreservesOrder(t *testing.T) {
// Save original os.Args
originalArgs := os.Args
defer func() {
os.Args = originalArgs
}()
// Set test args with specific order
os.Args = []string{"/path/to/gitness", "first", "second", "third"}
// Call GetArguments
result := GetArguments()
// Verify order is preserved
expected := []string{"first", "second", "third"}
assert.Equal(t, expected, result, "argument order should be preserved")
}
func TestGetArguments_ReturnsSlice(t *testing.T) {
// Save original os.Args
originalArgs := os.Args
defer func() {
os.Args = originalArgs
}()
// Set test args
os.Args = []string{"/path/to/gitness", "arg1", "arg2"}
// Call GetArguments
result := GetArguments()
// Verify result is a slice
assert.NotNil(t, result, "result should not be nil")
assert.IsType(t, []string{}, result, "result should be a string slice")
}
func TestGetArguments_EmptyArgs(t *testing.T) {
// Save original os.Args
originalArgs := os.Args
defer func() {
os.Args = originalArgs
}()
// Set test args with only command
os.Args = []string{"/path/to/gitness"}
// Call GetArguments
result := GetArguments()
// Verify result is empty slice
assert.NotNil(t, result, "result should not be nil")
assert.Empty(t, result, "result should be empty")
assert.Equal(t, 0, len(result), "result length should be 0")
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/cli.go | cli/cli.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cli
import (
"os"
"github.com/harness/gitness/cli/operations/hooks"
"github.com/harness/gitness/git/hook"
)
func GetArguments() []string {
command := os.Args[0]
args := os.Args[1:]
// in case of githooks, translate the arguments coming from git to work with gitness.
if gitArgs, fromGit := hook.SanitizeArgsForGit(command, args); fromGit {
return append([]string{hooks.ParamHooks}, gitArgs...)
}
return args
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/textui/input.go | cli/textui/input.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package textui
import (
"bufio"
"fmt"
"os"
"strings"
"syscall"
"golang.org/x/term"
)
// Registration returns the userID, displayName, email and password from stdin.
func Registration() (string, string, string, string) {
return UserID(), DisplayName(), Email(), Password()
}
// Credentials returns the login identifier and password from stdin.
func Credentials() (string, string) {
return LoginIdentifier(), Password()
}
// UserID returns the user ID from stdin.
func UserID() string {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Enter User ID: ")
uid, _ := reader.ReadString('\n')
return strings.TrimSpace(uid)
}
// LoginIdentifier returns the login identifier from stdin.
func LoginIdentifier() string {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Enter User ID or Email: ")
id, _ := reader.ReadString('\n')
return strings.TrimSpace(id)
}
// DisplayName returns the display name from stdin.
func DisplayName() string {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Enter Display Name: ")
name, _ := reader.ReadString('\n')
return strings.TrimSpace(name)
}
// Email returns the email from stdin.
func Email() string {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Enter Email: ")
email, _ := reader.ReadString('\n')
return strings.TrimSpace(email)
}
// Password returns the password from stdin.
func Password() string {
fmt.Print("Enter Password: ")
passwordb, _ := term.ReadPassword(syscall.Stdin)
password := string(passwordb)
return strings.TrimSpace(password)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/user/create_pat.go | cli/operations/user/create_pat.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package user
import (
"context"
"encoding/json"
"os"
"text/template"
"time"
"github.com/harness/gitness/app/api/controller/user"
"github.com/harness/gitness/cli/provide"
"github.com/drone/funcmap"
"github.com/gotidy/ptr"
"gopkg.in/alecthomas/kingpin.v2"
)
const tokenTmpl = `
principalID: {{ .Token.PrincipalID }}
identifier: {{ .Token.Identifier }}
expiresAt: {{ .Token.ExpiresAt }}
token: {{ .AccessToken }}
` //#nosec G101
type createPATCommand struct {
identifier string
lifetimeInS int64
json bool
tmpl string
}
func (c *createPATCommand) run(*kingpin.ParseContext) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
var lifeTime *time.Duration
if c.lifetimeInS > 0 {
lifeTime = ptr.Duration(time.Duration(int64(time.Second) * c.lifetimeInS))
}
in := user.CreateTokenInput{
Identifier: c.identifier,
Lifetime: lifeTime,
}
tokenResp, err := provide.Client().UserCreatePAT(ctx, in)
if err != nil {
return err
}
if c.json {
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
return enc.Encode(tokenResp)
}
tmpl, err := template.New("_").Funcs(funcmap.Funcs).Parse(c.tmpl)
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, tokenResp)
}
// Register the command.
func registerCreatePAT(app *kingpin.CmdClause) {
c := &createPATCommand{}
cmd := app.Command("pat", "create personal access token").
Action(c.run)
cmd.Arg("identifier", "the identifier of the token").
Required().StringVar(&c.identifier)
cmd.Arg("lifetime", "the lifetime of the token in seconds").
Int64Var(&c.lifetimeInS)
cmd.Flag("json", "json encode the output").
BoolVar(&c.json)
cmd.Flag("format", "format the output using a Go template").
Default(tokenTmpl).
Hidden().
StringVar(&c.tmpl)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/user/self.go | cli/operations/user/self.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package user
import (
"context"
"encoding/json"
"os"
"text/template"
"time"
"github.com/harness/gitness/cli/provide"
"github.com/drone/funcmap"
"gopkg.in/alecthomas/kingpin.v2"
)
const userTmpl = `
uid: {{ .UID }}
name: {{ .DisplayName }}
email: {{ .Email }}
admin: {{ .Admin }}
`
type command struct {
tmpl string
json bool
}
func (c *command) run(*kingpin.ParseContext) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
user, err := provide.Client().Self(ctx)
if err != nil {
return err
}
if c.json {
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
return enc.Encode(user)
}
tmpl, err := template.New("_").Funcs(funcmap.Funcs).Parse(c.tmpl)
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, user)
}
// Register the command.
func registerSelf(app *kingpin.CmdClause) {
c := &command{}
cmd := app.Command("self", "display authenticated user").
Action(c.run)
cmd.Flag("json", "json encode the output").
BoolVar(&c.json)
cmd.Flag("format", "format the output using a Go template").
Default(userTmpl).
Hidden().
StringVar(&c.tmpl)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/user/users.go | cli/operations/user/users.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package user
import (
"gopkg.in/alecthomas/kingpin.v2"
)
// Register the command.
func Register(app *kingpin.Application) {
cmd := app.Command("user", "manage currently logged-in user")
registerSelf(cmd)
registerCreatePAT(cmd)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/hooks/hooks.go | cli/operations/hooks/hooks.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hooks
import (
gitnessgithook "github.com/harness/gitness/app/githook"
"github.com/harness/gitness/git/hook"
"gopkg.in/alecthomas/kingpin.v2"
)
const (
// ParamHooks defines the parameter for the git hooks sub-commands.
ParamHooks = "hooks"
)
func Register(app *kingpin.Application) {
subCmd := app.Command(ParamHooks, "manage git server hooks")
hook.RegisterAll(subCmd, gitnessgithook.LoadFromEnvironment)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/account/register.go | cli/operations/account/register.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package account
import (
"context"
"time"
"github.com/harness/gitness/app/api/controller/user"
"github.com/harness/gitness/cli/provide"
"github.com/harness/gitness/cli/session"
"github.com/harness/gitness/cli/textui"
"gopkg.in/alecthomas/kingpin.v2"
)
type Session interface {
SetURI(uri string) session.Session
SetExpiresAt(expiresAt int64) session.Session
SetAccessToken(token string) session.Session
Path() string
Store() error
}
type registerCommand struct {
server string
}
func (c *registerCommand) run(*kingpin.ParseContext) error {
ss := provide.NewSession()
uid, displayName, email, password := textui.Registration()
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
input := &user.RegisterInput{
UID: uid,
Email: email,
DisplayName: displayName,
Password: password,
}
ts, err := provide.OpenClient(c.server).Register(ctx, input)
if err != nil {
return err
}
return ss.
SetURI(c.server).
// register token always has an expiry date
SetExpiresAt(*ts.Token.ExpiresAt).
SetAccessToken(ts.AccessToken).
Store()
}
// RegisterRegister helper function to register the register command.
func RegisterRegister(app *kingpin.Application) {
c := ®isterCommand{}
cmd := app.Command("register", "register a user").
Action(c.run)
cmd.Arg("server", "server address").
Default(provide.DefaultServerURI).
StringVar(&c.server)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/account/logout.go | cli/operations/account/logout.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package account
import (
"os"
"github.com/harness/gitness/cli/provide"
"gopkg.in/alecthomas/kingpin.v2"
)
type logoutCommand struct{}
func (c *logoutCommand) run(*kingpin.ParseContext) error {
return os.Remove(provide.Session().Path())
}
// RegisterLogout helper function to register the logout command.
func RegisterLogout(app *kingpin.Application) {
c := &logoutCommand{}
app.Command("logout", "logout from the remote server").
Action(c.run)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/account/login.go | cli/operations/account/login.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package account
import (
"context"
"time"
"github.com/harness/gitness/app/api/controller/user"
"github.com/harness/gitness/cli/provide"
"github.com/harness/gitness/cli/textui"
"gopkg.in/alecthomas/kingpin.v2"
)
type loginCommand struct {
server string
}
func (c *loginCommand) run(*kingpin.ParseContext) error {
ss := provide.NewSession()
loginIdentifier, password := textui.Credentials()
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
in := &user.LoginInput{
LoginIdentifier: loginIdentifier,
Password: password,
}
ts, err := provide.OpenClient(c.server).Login(ctx, in)
if err != nil {
return err
}
return ss.
SetURI(c.server).
// login token always has an expiry date
SetExpiresAt(*ts.Token.ExpiresAt).
SetAccessToken(ts.AccessToken).
Store()
}
// RegisterLogin helper function to register the logout command.
func RegisterLogin(app *kingpin.Application) {
c := &loginCommand{}
cmd := app.Command("login", "login to the remote server").
Action(c.run)
cmd.Arg("server", "server address").
Default(provide.DefaultServerURI).
StringVar(&c.server)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/swagger/swagger.go | cli/operations/swagger/swagger.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package swagger
import (
"os"
"github.com/harness/gitness/app/api/openapi"
"gopkg.in/alecthomas/kingpin.v2"
)
type command struct {
openAPIService openapi.Service
path string
}
func (c *command) run(*kingpin.ParseContext) error {
spec := c.openAPIService.Generate()
data, _ := spec.MarshalYAML()
if c.path == "" {
os.Stdout.Write(data)
return nil
}
return os.WriteFile(c.path, data, 0o600)
}
// helper function to register the swagger command.
func Register(app *kingpin.Application, openAPIService openapi.Service) {
c := &command{
openAPIService: openAPIService,
}
cmd := app.Command("swagger", "generate swagger file").
Hidden().
Action(c.run)
cmd.Arg("path", "path to save swagger file").
StringVar(&c.path)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/users/create.go | cli/operations/users/create.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package users
import (
"context"
"encoding/json"
"os"
"text/template"
"time"
"github.com/harness/gitness/cli/provide"
"github.com/harness/gitness/cli/textui"
"github.com/harness/gitness/types"
"github.com/drone/funcmap"
"gopkg.in/alecthomas/kingpin.v2"
)
type createCommand struct {
email string
admin bool
tmpl string
json bool
}
func (c *createCommand) run(*kingpin.ParseContext) error {
in := &types.User{
Admin: c.admin,
Email: c.email,
Password: textui.Password(),
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
user, err := provide.Client().UserCreate(ctx, in)
if err != nil {
return err
}
if c.json {
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
return enc.Encode(user)
}
tmpl, err := template.New("_").Funcs(funcmap.Funcs).Parse(c.tmpl)
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, user)
}
// helper function registers the user create command.
func registerCreate(app *kingpin.CmdClause) {
c := &createCommand{}
cmd := app.Command("create", "create a user").
Action(c.run)
cmd.Arg("email", "user email").
Required().
StringVar(&c.email)
cmd.Arg("admin", "user is admin").
BoolVar(&c.admin)
cmd.Flag("json", "json encode the output").
BoolVar(&c.json)
cmd.Flag("format", "format the output using a Go template").
Default(userTmpl).
Hidden().
StringVar(&c.tmpl)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/users/delete.go | cli/operations/users/delete.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package users
import (
"context"
"time"
"github.com/harness/gitness/cli/provide"
"gopkg.in/alecthomas/kingpin.v2"
)
type deleteCommand struct {
email string
}
func (c *deleteCommand) run(*kingpin.ParseContext) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
return provide.Client().UserDelete(ctx, c.email)
}
// helper function registers the user delete command.
func registerDelete(app *kingpin.CmdClause) {
c := &deleteCommand{}
cmd := app.Command("delete", "delete a user").
Action(c.run)
cmd.Arg("id or email", "user id or email").
Required().
StringVar(&c.email)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/users/find.go | cli/operations/users/find.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package users
import (
"context"
"encoding/json"
"os"
"text/template"
"time"
"github.com/harness/gitness/cli/provide"
"github.com/drone/funcmap"
"gopkg.in/alecthomas/kingpin.v2"
)
type findCommand struct {
email string
tmpl string
json bool
}
func (c *findCommand) run(*kingpin.ParseContext) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
user, err := provide.Client().User(ctx, c.email)
if err != nil {
return err
}
if c.json {
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
return enc.Encode(user)
}
tmpl, err := template.New("_").Funcs(funcmap.Funcs).Parse(c.tmpl + "\n")
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, user)
}
// helper function registers the user find command.
func registerFind(app *kingpin.CmdClause) {
c := &findCommand{}
cmd := app.Command("find", "display user details").
Action(c.run)
cmd.Arg("id or email", "user id or email").
Required().
StringVar(&c.email)
cmd.Flag("json", "json encode the output").
BoolVar(&c.json)
cmd.Flag("format", "format the output using a Go template").
Default(userTmpl).
Hidden().
StringVar(&c.tmpl)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/users/list.go | cli/operations/users/list.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package users
import (
"context"
"encoding/json"
"os"
"text/template"
"time"
"github.com/harness/gitness/cli/provide"
"github.com/harness/gitness/types"
"github.com/drone/funcmap"
"gopkg.in/alecthomas/kingpin.v2"
)
const userTmpl = `
id: {{ .ID }}
email: {{ .Email }}
admin: {{ .Admin }}
`
type listCommand struct {
tmpl string
page int
size int
json bool
}
func (c *listCommand) run(*kingpin.ParseContext) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
list, err := provide.Client().UserList(ctx, types.UserFilter{
Size: c.size,
Page: c.page,
})
if err != nil {
return err
}
tmpl, err := template.New("_").Funcs(funcmap.Funcs).Parse(c.tmpl + "\n")
if err != nil {
return err
}
if c.json {
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
return enc.Encode(list)
}
for _, item := range list {
if err = tmpl.Execute(os.Stdout, item); err != nil {
return err
}
}
return nil
}
// helper function registers the user list command.
func registerList(app *kingpin.CmdClause) {
c := &listCommand{}
cmd := app.Command("ls", "display a list of users").
Action(c.run)
cmd.Flag("page", "page number").
IntVar(&c.page)
cmd.Flag("per-page", "page size").
IntVar(&c.size)
cmd.Flag("json", "json encode the output").
BoolVar(&c.json)
cmd.Flag("format", "format the output using a Go template").
Default(userTmpl).
Hidden().
StringVar(&c.tmpl)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/users/users.go | cli/operations/users/users.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package users
import (
"gopkg.in/alecthomas/kingpin.v2"
)
// Register the command.
func Register(app *kingpin.Application) {
cmd := app.Command("users", "manage users")
registerFind(cmd)
registerList(cmd)
registerCreate(cmd)
registerUpdate(cmd)
registerDelete(cmd)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/users/update.go | cli/operations/users/update.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package users
import (
"context"
"encoding/json"
"fmt"
"os"
"text/template"
"time"
"github.com/harness/gitness/cli/provide"
"github.com/harness/gitness/types"
"github.com/dchest/uniuri"
"github.com/drone/funcmap"
"github.com/gotidy/ptr"
"gopkg.in/alecthomas/kingpin.v2"
)
type updateCommand struct {
id string
email string
admin bool
demote bool
passgen bool
pass string
tmpl string
json bool
}
func (c *updateCommand) run(*kingpin.ParseContext) error {
in := new(types.UserInput)
if v := c.email; v != "" {
in.Email = ptr.String(v)
}
if v := c.pass; v != "" {
in.Password = ptr.String(v)
}
if v := c.admin; v {
in.Admin = ptr.Bool(v)
}
if v := c.demote; v {
in.Admin = ptr.Bool(false)
}
if c.passgen {
const maxRandomChars = 8
v := uniuri.NewLen(maxRandomChars)
in.Password = ptr.String(v)
fmt.Printf("generated temporary password: %s\n", v)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
user, err := provide.Client().UserUpdate(ctx, c.id, in)
if err != nil {
return err
}
if c.json {
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
return enc.Encode(user)
}
tmpl, err := template.New("_").Funcs(funcmap.Funcs).Parse(c.tmpl)
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, user)
}
// helper function registers the user update command.
func registerUpdate(app *kingpin.CmdClause) {
c := &updateCommand{}
cmd := app.Command("update", "update a user").
Action(c.run)
cmd.Arg("id or email", "user id or email").
Required().
StringVar(&c.id)
cmd.Flag("email", "update user email").
StringVar(&c.email)
cmd.Flag("password", "update user password").
StringVar(&c.pass)
cmd.Flag("password-gen", "generate and update user password").
BoolVar(&c.passgen)
cmd.Flag("promote", "promote user to admin").
BoolVar(&c.admin)
cmd.Flag("demote", "demote user from admin").
BoolVar(&c.demote)
cmd.Flag("json", "json encode the output").
BoolVar(&c.json)
cmd.Flag("format", "format the output using a Go template").
Default(userTmpl).
Hidden().
StringVar(&c.tmpl)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/server/redis.go | cli/operations/server/redis.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"strings"
"github.com/harness/gitness/types"
"github.com/go-redis/redis/v8"
)
// ProvideRedis provides a redis client based on the configuration.
// TODO: add support for TLS.
func ProvideRedis(config *types.Config) (redis.UniversalClient, error) {
if config.Redis.SentinelMode {
addrs := strings.Split(config.Redis.SentinelEndpoint, ",")
failoverOptions := &redis.FailoverOptions{
MasterName: config.Redis.SentinelMaster,
SentinelAddrs: addrs,
MaxRetries: config.Redis.MaxRetries,
MinIdleConns: config.Redis.MinIdleConnections,
}
if config.Redis.Password != "" {
failoverOptions.Password = config.Redis.Password
}
return redis.NewFailoverClient(failoverOptions), nil
}
options := &redis.Options{
Addr: config.Redis.Endpoint,
MaxRetries: config.Redis.MaxRetries,
MinIdleConns: config.Redis.MinIdleConnections,
}
if config.Redis.Password != "" {
options.Password = config.Redis.Password
}
return redis.NewClient(options), nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/server/config.go | cli/operations/server/config.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"unicode"
"github.com/harness/gitness/app/gitspace/infrastructure"
"github.com/harness/gitness/app/gitspace/orchestrator"
"github.com/harness/gitness/app/gitspace/orchestrator/ide"
"github.com/harness/gitness/app/services/branch"
"github.com/harness/gitness/app/services/cleanup"
"github.com/harness/gitness/app/services/codeowners"
"github.com/harness/gitness/app/services/gitspacedeleteevent"
"github.com/harness/gitness/app/services/gitspaceevent"
"github.com/harness/gitness/app/services/keywordsearch"
"github.com/harness/gitness/app/services/notification"
"github.com/harness/gitness/app/services/trigger"
"github.com/harness/gitness/app/services/webhook"
"github.com/harness/gitness/blob"
"github.com/harness/gitness/events"
gittypes "github.com/harness/gitness/git/types"
"github.com/harness/gitness/infraprovider"
"github.com/harness/gitness/job"
"github.com/harness/gitness/lock"
"github.com/harness/gitness/pubsub"
"github.com/harness/gitness/store/database"
"github.com/harness/gitness/types"
"github.com/kelseyhightower/envconfig"
"golang.org/x/text/runes"
"golang.org/x/text/transform"
"golang.org/x/text/unicode/norm"
)
const (
schemeHTTP = "http"
schemeHTTPS = "https"
schemeSSH = "ssh"
gitnessHomeDir = ".gitness"
blobDir = "blob"
)
// LoadConfig returns the system configuration from the
// host environment.
func LoadConfig() (*types.Config, error) {
config := new(types.Config)
err := envconfig.Process("", config)
if err != nil {
return nil, err
}
config.InstanceID, err = getSanitizedMachineName()
if err != nil {
return nil, fmt.Errorf("unable to ensure that instance ID is set in config: %w", err)
}
err = backfillURLs(config)
if err != nil {
return nil, fmt.Errorf("failed to backfil urls: %w", err)
}
if config.Git.HookPath == "" {
executablePath, err := os.Executable()
if err != nil {
return nil, fmt.Errorf("failed to get path of current executable: %w", err)
}
config.Git.HookPath = executablePath
}
if config.Git.Root == "" {
homedir, err := os.UserHomeDir()
if err != nil {
return nil, err
}
newPath := filepath.Join(homedir, gitnessHomeDir)
config.Git.Root = newPath
oldPath := filepath.Join(homedir, ".gitrpc")
if _, err := os.Stat(oldPath); err == nil {
if err := os.Rename(oldPath, newPath); err != nil {
config.Git.Root = oldPath
}
}
}
return config, nil
}
//nolint:gocognit // refactor if required
func backfillURLs(config *types.Config) error {
// default values for HTTP
// TODO: once we actually use the config.HTTP.Proto, we have to update that here.
scheme, host, port, path := schemeHTTP, "localhost", "", ""
if config.HTTP.Host != "" {
host = config.HTTP.Host
}
// by default drop scheme's default port
if config.HTTP.Port > 0 &&
(scheme != schemeHTTP || config.HTTP.Port != 80) &&
(scheme != schemeHTTPS || config.HTTP.Port != 443) {
port = fmt.Sprint(config.HTTP.Port)
}
// default values for SSH
sshHost, sshPort := "localhost", ""
if config.SSH.Host != "" {
sshHost = config.SSH.Host
}
// by default drop scheme's default port
if config.SSH.Port > 0 && config.SSH.Port != 22 {
sshPort = fmt.Sprint(config.SSH.Port)
}
// backfil internal URLS before continuing override with user provided base (which is external facing)
if config.URL.Internal == "" {
config.URL.Internal = combineToRawURL(scheme, "localhost", port, "")
}
if config.URL.Container == "" {
config.URL.Container = combineToRawURL(scheme, "host.docker.internal", port, "")
}
// override base with whatever user explicit override
//nolint:nestif // simple conditional override of all elements
if config.URL.Base != "" {
u, err := url.Parse(config.URL.Base)
if err != nil {
return fmt.Errorf("failed to parse base url '%s': %w", config.URL.Base, err)
}
if u.Scheme != schemeHTTP && u.Scheme != schemeHTTPS {
return fmt.Errorf(
"base url scheme '%s' is not supported (valid values: %v)",
u.Scheme,
[]string{
schemeHTTP,
schemeHTTPS,
},
)
}
// url parsing allows empty hostname - we don't want that
if u.Hostname() == "" {
return fmt.Errorf("a non-empty base url host has to be provided")
}
// take everything as is (e.g. if user explicitly adds port 80 for http we take it)
scheme = u.Scheme
host = u.Hostname()
port = u.Port()
path = u.Path
// overwrite sshhost with base url host, but keep port as is
sshHost = u.Hostname()
}
// backfill external facing URLs
if config.URL.GitSSH == "" {
config.URL.GitSSH = combineToRawURL(schemeSSH, sshHost, sshPort, "")
}
// create base URL object
baseURLRaw := combineToRawURL(scheme, host, port, path)
baseURL, err := url.Parse(baseURLRaw)
if err != nil {
return fmt.Errorf("failed to parse derived base url '%s': %w", baseURLRaw, err)
}
// backfill all external URLs that weren't explicitly overwritten
if config.URL.Base == "" {
config.URL.Base = baseURL.String()
}
if config.URL.API == "" {
config.URL.API = baseURL.JoinPath("api").String()
}
if config.URL.Git == "" {
config.URL.Git = baseURL.JoinPath("git").String()
}
if config.URL.UI == "" {
config.URL.UI = baseURL.String()
}
if config.URL.Registry == "" {
config.URL.Registry = combineToRawURL(scheme, "host.docker.internal", port, "")
}
return nil
}
func combineToRawURL(scheme, host, port, path string) string {
urlRAW := scheme + "://" + host
// only add port if explicitly provided
if port != "" {
urlRAW += ":" + port
}
// only add path if it's not empty and non-root
path = strings.Trim(path, "/")
if path != "" {
urlRAW += "/" + path
}
return urlRAW
}
// getSanitizedMachineName gets the name of the machine and returns it in sanitized format.
func getSanitizedMachineName() (string, error) {
// use the hostname as default id of the instance
hostName, err := os.Hostname()
if err != nil {
return "", err
}
// Always cast to lower and remove all unwanted chars
// NOTE: this could theoretically lead to overlaps, then it should be passed explicitly
// NOTE: for k8s names/ids below modifications are all noops
// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
// The following code will:
// * remove invalid runes
// * remove diacritical marks (ie "smörgåsbord" to "smorgasbord")
// * lowercase A-Z to a-z
// * leave only a-z, 0-9, '-', '.' and replace everything else with '_'
hostName, _, err = transform.String(
transform.Chain(
norm.NFD,
runes.ReplaceIllFormed(),
runes.Remove(runes.In(unicode.Mn)),
runes.Map(
func(r rune) rune {
switch {
case 'A' <= r && r <= 'Z':
return r + 32
case 'a' <= r && r <= 'z':
return r
case '0' <= r && r <= '9':
return r
case r == '-', r == '.':
return r
default:
return '_'
}
},
),
norm.NFC,
),
hostName,
)
if err != nil {
return "", err
}
return hostName, nil
}
// ProvideDatabaseConfig loads the database config from the main config.
func ProvideDatabaseConfig(config *types.Config) database.Config {
return database.Config{
Driver: config.Database.Driver,
Datasource: config.Database.Datasource,
}
}
// ProvideBlobStoreConfig loads the blob store config from the main config.
func ProvideBlobStoreConfig(config *types.Config) (blob.Config, error) {
// Prefix home directory in case of filesystem blobstore
if config.BlobStore.Provider == blob.ProviderFileSystem && config.BlobStore.Bucket == "" {
var homedir string
homedir, err := os.UserHomeDir()
if err != nil {
return blob.Config{}, err
}
config.BlobStore.Bucket = filepath.Join(homedir, gitnessHomeDir, blobDir)
}
return blob.Config{
Provider: config.BlobStore.Provider,
Bucket: config.BlobStore.Bucket,
KeyPath: config.BlobStore.KeyPath,
TargetPrincipal: config.BlobStore.TargetPrincipal,
ImpersonationLifetime: config.BlobStore.ImpersonationLifetime,
}, nil
}
// ProvideGitConfig loads the git config from the main config.
func ProvideGitConfig(config *types.Config) gittypes.Config {
return gittypes.Config{
Trace: config.Git.Trace,
Root: config.Git.Root,
TmpDir: config.Git.TmpDir,
HookPath: config.Git.HookPath,
LastCommitCache: gittypes.LastCommitCacheConfig{
Mode: config.Git.LastCommitCache.Mode,
Duration: config.Git.LastCommitCache.Duration,
},
}
}
// ProvideEventsConfig loads the events config from the main config.
func ProvideEventsConfig(config *types.Config) events.Config {
return events.Config{
Mode: config.Events.Mode,
Namespace: config.Events.Namespace,
MaxStreamLength: config.Events.MaxStreamLength,
ApproxMaxStreamLength: config.Events.ApproxMaxStreamLength,
}
}
// ProvideWebhookConfig loads the webhook service config from the main config.
func ProvideWebhookConfig(config *types.Config) webhook.Config {
return webhook.Config{
UserAgentIdentity: config.Webhook.UserAgentIdentity,
HeaderIdentity: config.Webhook.HeaderIdentity,
EventReaderName: config.InstanceID,
Concurrency: config.Webhook.Concurrency,
MaxRetries: config.Webhook.MaxRetries,
AllowPrivateNetwork: config.Webhook.AllowPrivateNetwork,
AllowLoopback: config.Webhook.AllowLoopback,
InternalSecret: config.Webhook.InternalSecret,
}
}
func ProvideNotificationConfig(config *types.Config) notification.Config {
return notification.Config{
EventReaderName: config.InstanceID,
Concurrency: config.Notification.Concurrency,
MaxRetries: config.Notification.MaxRetries,
}
}
// ProvideTriggerConfig loads the trigger service config from the main config.
func ProvideTriggerConfig(config *types.Config) trigger.Config {
return trigger.Config{
EventReaderName: config.InstanceID,
Concurrency: config.Webhook.Concurrency,
MaxRetries: config.Webhook.MaxRetries,
}
}
func ProvideBranchConfig(config *types.Config) branch.Config {
return branch.Config{
EventReaderName: config.InstanceID,
Concurrency: config.Branch.Concurrency,
MaxRetries: config.Branch.MaxRetries,
}
}
// ProvideLockConfig generates the `lock` package config from the Harness config.
func ProvideLockConfig(config *types.Config) lock.Config {
return lock.Config{
App: config.Lock.AppNamespace,
Namespace: config.Lock.DefaultNamespace,
Provider: config.Lock.Provider,
Expiry: config.Lock.Expiry,
Tries: config.Lock.Tries,
RetryDelay: config.Lock.RetryDelay,
DriftFactor: config.Lock.DriftFactor,
TimeoutFactor: config.Lock.TimeoutFactor,
}
}
// ProvidePubsubConfig loads the pubsub config from the main config.
func ProvidePubsubConfig(config *types.Config) pubsub.Config {
return pubsub.Config{
App: config.PubSub.AppNamespace,
Namespace: config.PubSub.DefaultNamespace,
Provider: config.PubSub.Provider,
HealthInterval: config.PubSub.HealthInterval,
SendTimeout: config.PubSub.SendTimeout,
ChannelSize: config.PubSub.ChannelSize,
}
}
// ProvideCleanupConfig loads the cleanup service config from the main config.
func ProvideCleanupConfig(config *types.Config) cleanup.Config {
return cleanup.Config{
WebhookExecutionsRetentionTime: config.Webhook.RetentionTime,
DeletedRepositoriesRetentionTime: config.Repos.DeletedRetentionTime,
}
}
// ProvideCodeOwnerConfig loads the codeowner config from the main config.
func ProvideCodeOwnerConfig(config *types.Config) codeowners.Config {
return codeowners.Config{
FilePaths: config.CodeOwners.FilePaths,
}
}
// ProvideKeywordSearchConfig loads the keyword search service config from the main config.
func ProvideKeywordSearchConfig(config *types.Config) keywordsearch.Config {
return keywordsearch.Config{
EventReaderName: config.InstanceID,
Concurrency: config.KeywordSearch.Concurrency,
MaxRetries: config.KeywordSearch.MaxRetries,
}
}
func ProvideJobsConfig(config *types.Config) job.Config {
return job.Config{
InstanceID: config.InstanceID,
BackgroundJobsMaxRunning: config.BackgroundJobs.MaxRunning,
BackgroundJobsRetentionTime: config.BackgroundJobs.RetentionTime,
}
}
// ProvideDockerConfig loads config for Docker.
func ProvideDockerConfig(config *types.Config) (*infraprovider.DockerConfig, error) {
if config.Docker.MachineHostName == "" {
gitnessBaseURL, err := url.Parse(config.URL.Base)
if err != nil {
return nil, fmt.Errorf("unable to parse Harness base URL %s: %w", gitnessBaseURL, err)
}
config.Docker.MachineHostName = gitnessBaseURL.Hostname()
}
return &infraprovider.DockerConfig{
DockerHost: config.Docker.Host,
DockerAPIVersion: config.Docker.APIVersion,
DockerCertPath: config.Docker.CertPath,
DockerTLSVerify: config.Docker.TLSVerify,
DockerMachineHostName: config.Docker.MachineHostName,
}, nil
}
// ProvideIDEVSCodeWebConfig loads the VSCode Web IDE config from the main config.
func ProvideIDEVSCodeWebConfig(config *types.Config) *ide.VSCodeWebConfig {
return &ide.VSCodeWebConfig{
Port: config.IDE.VSCodeWeb.Port,
}
}
// ProvideIDEVSCodeConfig loads the VSCode IDE config from the main config.
func ProvideIDEVSCodeConfig(config *types.Config) *ide.VSCodeConfig {
return &ide.VSCodeConfig{
Port: config.IDE.VSCode.Port,
PluginName: config.IDE.VSCode.PluginName,
}
}
// ProvideIDECursorConfig loads the Cursor IDE config from the main config.
func ProvideIDECursorConfig(config *types.Config) *ide.CursorConfig {
return &ide.CursorConfig{
Port: config.IDE.Cursor.Port,
}
}
// ProvideIDEWindsurfConfig loads the Windsurf IDE config from the main config.
func ProvideIDEWindsurfConfig(config *types.Config) *ide.WindsurfConfig {
return &ide.WindsurfConfig{
Port: config.IDE.Windsurf.Port,
}
}
// ProvideIDEJetBrainsConfig loads the IdeType IDE config from the main config.
func ProvideIDEJetBrainsConfig(config *types.Config) *ide.JetBrainsIDEConfig {
return &ide.JetBrainsIDEConfig{
IntelliJPort: config.IDE.Intellij.Port,
GolandPort: config.IDE.Goland.Port,
PyCharmPort: config.IDE.PyCharm.Port,
WebStormPort: config.IDE.WebStorm.Port,
PHPStormPort: config.IDE.PHPStorm.Port,
CLionPort: config.IDE.CLion.Port,
RubyMinePort: config.IDE.RubyMine.Port,
RiderPort: config.IDE.Rider.Port,
}
}
// ProvideGitspaceOrchestratorConfig loads the Gitspace orchestrator config from the main config.
func ProvideGitspaceOrchestratorConfig(config *types.Config) *orchestrator.Config {
return &orchestrator.Config{
DefaultBaseImage: config.Gitspace.DefaultBaseImage,
}
}
// ProvideGitspaceInfraProvisionerConfig loads the Gitspace infra provisioner config from the main config.
func ProvideGitspaceInfraProvisionerConfig(config *types.Config) *infrastructure.Config {
return &infrastructure.Config{
AgentPort: config.Gitspace.AgentPort,
}
}
// ProvideGitspaceEventConfig loads the gitspace event service config from the main config.
func ProvideGitspaceEventConfig(config *types.Config) *gitspaceevent.Config {
return &gitspaceevent.Config{
EventReaderName: config.InstanceID,
Concurrency: config.Gitspace.Events.Concurrency,
MaxRetries: config.Gitspace.Events.MaxRetries,
TimeoutInMins: config.Gitspace.Events.TimeoutInMins,
}
}
// ProvideGitspaceDeleteEventConfig loads the gitspace delete event service config from the main config.
func ProvideGitspaceDeleteEventConfig(config *types.Config) *gitspacedeleteevent.Config {
return &gitspacedeleteevent.Config{
EventReaderName: config.InstanceID,
Concurrency: config.Gitspace.Events.Concurrency,
MaxRetries: config.Gitspace.Events.MaxRetries,
TimeoutInMins: config.Gitspace.Events.TimeoutInMins,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/server/system.go | cli/operations/server/system.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"github.com/harness/gitness/app/bootstrap"
"github.com/harness/gitness/app/pipeline/resolver"
"github.com/harness/gitness/app/server"
"github.com/harness/gitness/app/services"
"github.com/harness/gitness/ssh"
"github.com/drone/runner-go/poller"
)
// System stores high level System sub-routines.
type System struct {
bootstrap bootstrap.Bootstrap
server *server.Server
sshServer *ssh.Server
resolverManager *resolver.Manager
poller *poller.Poller
services services.Services
}
// NewSystem returns a new system structure.
func NewSystem(
bootstrap bootstrap.Bootstrap,
server *server.Server,
sshServer *ssh.Server,
poller *poller.Poller,
resolverManager *resolver.Manager,
services services.Services,
) *System {
return &System{
bootstrap: bootstrap,
server: server,
sshServer: sshServer,
poller: poller,
resolverManager: resolverManager,
services: services,
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/server/config_test.go | cli/operations/server/config_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"testing"
"github.com/harness/gitness/types"
"github.com/stretchr/testify/require"
)
func TestBackfillURLsHTTPEmptyPort(t *testing.T) {
config := &types.Config{}
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "http://localhost", config.URL.Internal)
require.Equal(t, "http://host.docker.internal", config.URL.Container)
require.Equal(t, "http://localhost/api", config.URL.API)
require.Equal(t, "http://localhost/git", config.URL.Git)
require.Equal(t, "http://localhost", config.URL.UI)
}
func TestBackfillURLsSSHEmptyPort(t *testing.T) {
config := &types.Config{}
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "ssh://localhost", config.URL.GitSSH)
}
func TestBackfillURLsHTTPHostPort(t *testing.T) {
config := &types.Config{}
config.HTTP.Host = "myhost"
config.HTTP.Port = 1234
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "http://localhost:1234", config.URL.Internal)
require.Equal(t, "http://host.docker.internal:1234", config.URL.Container)
require.Equal(t, "http://myhost:1234/api", config.URL.API)
require.Equal(t, "http://myhost:1234/git", config.URL.Git)
require.Equal(t, "http://myhost:1234", config.URL.UI)
}
func TestBackfillURLsSSHHostPort(t *testing.T) {
config := &types.Config{}
config.SSH.Host = "myhost"
config.SSH.Port = 1234
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "ssh://myhost:1234", config.URL.GitSSH)
}
func TestBackfillURLsHTTPPortStripsDefaultHTTP(t *testing.T) {
config := &types.Config{}
config.HTTP.Port = 80
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "http://localhost", config.URL.Internal)
require.Equal(t, "http://host.docker.internal", config.URL.Container)
require.Equal(t, "http://localhost/api", config.URL.API)
require.Equal(t, "http://localhost/git", config.URL.Git)
require.Equal(t, "http://localhost", config.URL.UI)
}
// TODO: Update once we add proper https support - as of now nothing is stripped!
func TestBackfillURLsHTTPPortStripsDefaultHTTPS(t *testing.T) {
config := &types.Config{}
config.HTTP.Port = 443
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "http://localhost:443", config.URL.Internal)
require.Equal(t, "http://host.docker.internal:443", config.URL.Container)
require.Equal(t, "http://localhost:443/api", config.URL.API)
require.Equal(t, "http://localhost:443/git", config.URL.Git)
require.Equal(t, "http://localhost:443", config.URL.UI)
}
func TestBackfillURLsSSHPortStripsDefault(t *testing.T) {
config := &types.Config{}
config.SSH.Port = 22
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "ssh://localhost", config.URL.GitSSH)
}
func TestBackfillURLsBaseInvalidProtocol(t *testing.T) {
config := &types.Config{}
config.URL.Base = "abc://xyz:4321/test"
err := backfillURLs(config)
require.ErrorContains(t, err, "base url scheme 'abc' is not supported")
}
func TestBackfillURLsBaseNoHost(t *testing.T) {
config := &types.Config{}
config.URL.Base = "http:///test"
err := backfillURLs(config)
require.ErrorContains(t, err, "a non-empty base url host has to be provided")
}
func TestBackfillURLsBaseNoHostWithPort(t *testing.T) {
config := &types.Config{}
config.URL.Base = "http://:4321/test"
err := backfillURLs(config)
require.ErrorContains(t, err, "a non-empty base url host has to be provided")
}
func TestBackfillURLsBaseInvalidPort(t *testing.T) {
config := &types.Config{}
config.URL.Base = "http://localhost:abc/test"
err := backfillURLs(config)
require.ErrorContains(t, err, "invalid port \":abc\" after host")
}
func TestBackfillURLsBase(t *testing.T) {
config := &types.Config{}
config.HTTP.Host = "xyz"
config.HTTP.Port = 1234
config.SSH.Host = "kmno"
config.SSH.Port = 421
config.URL.Base = "https://xyz:4321/test"
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "http://localhost:1234", config.URL.Internal)
require.Equal(t, "http://host.docker.internal:1234", config.URL.Container)
require.Equal(t, "https://xyz:4321/test/api", config.URL.API)
require.Equal(t, "https://xyz:4321/test/git", config.URL.Git)
require.Equal(t, "https://xyz:4321/test", config.URL.UI)
require.Equal(t, "ssh://xyz:421", config.URL.GitSSH)
}
func TestBackfillURLsBaseDefaultPortHTTP(t *testing.T) {
config := &types.Config{}
config.HTTP.Port = 1234
config.URL.Base = "http://xyz/test"
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "http://localhost:1234", config.URL.Internal)
require.Equal(t, "http://host.docker.internal:1234", config.URL.Container)
require.Equal(t, "http://xyz/test/api", config.URL.API)
require.Equal(t, "http://xyz/test/git", config.URL.Git)
require.Equal(t, "http://xyz/test", config.URL.UI)
}
func TestBackfillURLsBaseDefaultPortHTTPExplicit(t *testing.T) {
config := &types.Config{}
config.HTTP.Port = 1234
config.URL.Base = "http://xyz:80/test"
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "http://localhost:1234", config.URL.Internal)
require.Equal(t, "http://host.docker.internal:1234", config.URL.Container)
require.Equal(t, "http://xyz:80/test/api", config.URL.API)
require.Equal(t, "http://xyz:80/test/git", config.URL.Git)
require.Equal(t, "http://xyz:80/test", config.URL.UI)
}
func TestBackfillURLsBaseDefaultPortHTTPS(t *testing.T) {
config := &types.Config{}
config.HTTP.Port = 1234
config.URL.Base = "https://xyz/test"
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "http://localhost:1234", config.URL.Internal)
require.Equal(t, "http://host.docker.internal:1234", config.URL.Container)
require.Equal(t, "https://xyz/test/api", config.URL.API)
require.Equal(t, "https://xyz/test/git", config.URL.Git)
require.Equal(t, "https://xyz/test", config.URL.UI)
}
func TestBackfillURLsBaseDefaultPortHTTPSExplicit(t *testing.T) {
config := &types.Config{}
config.HTTP.Port = 1234
config.URL.Base = "https://xyz:443/test"
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "http://localhost:1234", config.URL.Internal)
require.Equal(t, "http://host.docker.internal:1234", config.URL.Container)
require.Equal(t, "https://xyz:443/test/api", config.URL.API)
require.Equal(t, "https://xyz:443/test/git", config.URL.Git)
require.Equal(t, "https://xyz:443/test", config.URL.UI)
}
func TestBackfillURLsBaseRootPathStripped(t *testing.T) {
config := &types.Config{}
config.HTTP.Port = 1234
config.URL.Base = "https://xyz:4321/"
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "http://localhost:1234", config.URL.Internal)
require.Equal(t, "http://host.docker.internal:1234", config.URL.Container)
require.Equal(t, "https://xyz:4321/api", config.URL.API)
require.Equal(t, "https://xyz:4321/git", config.URL.Git)
require.Equal(t, "https://xyz:4321", config.URL.UI)
}
func TestBackfillURLsSSHBasePathIgnored(t *testing.T) {
config := &types.Config{}
config.SSH.Port = 1234
config.URL.Base = "https://xyz:4321/abc"
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "ssh://xyz:1234", config.URL.GitSSH)
}
func TestBackfillURLsCustom(t *testing.T) {
config := &types.Config{}
config.HTTP.Host = "abc"
config.HTTP.Port = 1234
config.SSH.Host = "abc"
config.SSH.Port = 1234
config.URL.Internal = "http://APIInternal/APIInternal/p"
config.URL.Container = "https://GitContainer/GitContainer/p"
config.URL.Base = "https://xyz:4321/test"
config.URL.API = "http://API:1111/API/p"
config.URL.Git = "https://GIT:443/GIT/p"
config.URL.UI = "http://UI:80/UI/p"
config.URL.GitSSH = "ssh://GITSSH:21/GITSSH/p"
err := backfillURLs(config)
require.NoError(t, err)
require.Equal(t, "http://APIInternal/APIInternal/p", config.URL.Internal)
require.Equal(t, "https://GitContainer/GitContainer/p", config.URL.Container)
require.Equal(t, "http://API:1111/API/p", config.URL.API)
require.Equal(t, "https://GIT:443/GIT/p", config.URL.Git)
require.Equal(t, "http://UI:80/UI/p", config.URL.UI)
require.Equal(t, "ssh://GITSSH:21/GITSSH/p", config.URL.GitSSH)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/server/server.go | cli/operations/server/server.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"fmt"
"os"
"os/signal"
"syscall"
"time"
"github.com/harness/gitness/app/pipeline/logger"
"github.com/harness/gitness/profiler"
"github.com/harness/gitness/types"
"github.com/harness/gitness/version"
"github.com/joho/godotenv"
"github.com/mattn/go-isatty"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"golang.org/x/sync/errgroup"
"gopkg.in/alecthomas/kingpin.v2"
)
type command struct {
envfile string
enableCI bool
initializer func(context.Context, *types.Config) (*System, error)
}
func (c *command) run(*kingpin.ParseContext) error {
// Create context that listens for the interrupt signal from the OS.
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
// load environment variables from file.
// no error handling needed when file is not present
_ = godotenv.Load(c.envfile)
// create the system configuration store by loading
// data from the environment.
config, err := LoadConfig()
if err != nil {
return fmt.Errorf("encountered an error while loading configuration: %w", err)
}
// configure the log level
SetupLogger(config)
// configure profiler
SetupProfiler(config)
// add logger to context
log := log.Logger.With().Logger()
ctx = log.WithContext(ctx)
// initialize system
system, err := c.initializer(ctx, config)
if err != nil {
return fmt.Errorf("encountered an error while wiring the system: %w", err)
}
// bootstrap the system
err = system.bootstrap(ctx)
if err != nil {
return fmt.Errorf("encountered an error while bootstrapping the system: %w", err)
}
// gCtx is canceled if any of the following occurs:
// - any go routine launched with g encounters an error
// - ctx is canceled
g, gCtx := errgroup.WithContext(ctx)
g.Go(func() error {
// initialize metric collector
if system.services.MetricCollector != nil {
if err := system.services.MetricCollector.Register(gCtx); err != nil {
log.Error().Err(err).Msg("failed to register metric collector")
return err
}
}
if system.services.RepoSizeCalculator != nil {
if err := system.services.RepoSizeCalculator.Register(gCtx); err != nil {
log.Error().Err(err).Msg("failed to register repo size calculator")
return err
}
}
if err := system.services.Cleanup.Register(gCtx); err != nil {
log.Error().Err(err).Msg("failed to register cleanup service")
return err
}
return system.services.JobScheduler.Run(gCtx)
})
// start server
gHTTP, shutdownHTTP := system.server.ListenAndServe()
g.Go(gHTTP.Wait)
if c.enableCI {
// start populating plugins
g.Go(func() error {
err := system.resolverManager.Populate(ctx)
if err != nil {
log.Error().Err(err).Msg("could not populate plugins")
}
return nil
})
// start poller for CI build executions.
g.Go(func() error {
system.poller.Poll(
logger.WithWrappedZerolog(ctx),
config.CI.ParallelWorkers,
)
return nil
})
}
if config.SSH.Enable {
g.Go(func() error {
log.Err(system.sshServer.ListenAndServe()).Send()
return nil
})
}
log.Info().
Str("host", config.HTTP.Host).
Int("port", config.HTTP.Port).
Str("revision", version.GitCommit).
Str("repository", version.GitRepository).
Stringer("version", version.Version).
Msg("server started")
// wait until the error group context is done
<-gCtx.Done()
// restore default behavior on the interrupt signal and notify user of shutdown.
stop()
log.Info().Msg("shutting down gracefully (press Ctrl+C again to force)")
// shutdown servers gracefully
shutdownCtx, cancel := context.WithTimeout(context.Background(), config.GracefulShutdownTime)
defer cancel()
if sErr := shutdownHTTP(shutdownCtx); sErr != nil {
log.Err(sErr).Msg("failed to shutdown http server gracefully")
}
if config.SSH.Enable {
if err := system.sshServer.Shutdown(shutdownCtx); err != nil {
log.Err(err).Msg("failed to shutdown ssh server gracefully")
}
}
// shutdown instrumentation
err = system.services.Instrumentation.Close(shutdownCtx)
if err != nil {
log.Err(err).Msg("failed to close instrumentation gracefully")
}
// shutdown job scheduler
system.services.JobScheduler.WaitJobsDone(shutdownCtx)
log.Info().Msg("wait for subroutines to complete")
err = g.Wait()
return err
}
// SetupLogger configures the global logger from the loaded configuration.
func SetupLogger(config *types.Config) {
// configure the log level
switch {
case config.Trace:
zerolog.SetGlobalLevel(zerolog.TraceLevel)
case config.Debug:
zerolog.SetGlobalLevel(zerolog.DebugLevel)
default:
zerolog.SetGlobalLevel(zerolog.InfoLevel)
}
// configure time format (ignored if running in terminal)
zerolog.TimeFieldFormat = time.RFC3339Nano
// if the terminal is a tty we should output the
// logs in pretty format
if isatty.IsTerminal(os.Stdout.Fd()) {
log.Logger = log.Output(
zerolog.ConsoleWriter{
Out: os.Stderr,
NoColor: false,
TimeFormat: "15:04:05.999",
},
)
}
}
func SetupProfiler(config *types.Config) {
profilerType, parsed := profiler.ParseType(config.Profiler.Type)
if !parsed {
log.Info().Msgf("No valid profiler so skipping profiling ['%s']", config.Profiler.Type)
return
}
gitnessProfiler, _ := profiler.New(profilerType)
gitnessProfiler.StartProfiling(config.Profiler.ServiceName, version.Version.String())
}
// Register the server command.
func Register(app *kingpin.Application, initializer func(context.Context, *types.Config) (*System, error)) {
c := new(command)
c.initializer = initializer
cmd := app.Command("server", "starts the server").
Action(c.run)
cmd.Arg("envfile", "load the environment variable file").
Default("").
StringVar(&c.envfile)
cmd.Flag("enable-ci", "start ci runners for build executions").
Default("true").
Envar("ENABLE_CI").
BoolVar(&c.enableCI)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/migrate/current.go | cli/operations/migrate/current.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migrate
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/store/database/migrate"
"gopkg.in/alecthomas/kingpin.v2"
)
type commandCurrent struct {
envfile string
}
func (c *commandCurrent) run(*kingpin.ParseContext) error {
ctx := setupLoggingContext(context.Background())
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
db, err := getDB(ctx, c.envfile)
if err != nil {
return err
}
version, err := migrate.Current(ctx, db)
if err != nil {
return err
}
fmt.Println(version)
return nil
}
func registerCurrent(app *kingpin.CmdClause) {
c := &commandCurrent{}
cmd := app.Command("current", "display the current version of the database").
Action(c.run)
cmd.Arg("envfile", "load the environment variable file").
Default("").
StringVar(&c.envfile)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/migrate/migrate.go | cli/operations/migrate/migrate.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migrate
import (
"context"
"fmt"
"github.com/harness/gitness/cli/operations/server"
"github.com/harness/gitness/store/database"
"github.com/jmoiron/sqlx"
"github.com/joho/godotenv"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"gopkg.in/alecthomas/kingpin.v2"
)
// Register the server command.
func Register(app *kingpin.Application) {
cmd := app.Command("migrate", "database migration tool")
registerCurrent(cmd)
registerTo(cmd)
}
func getDB(ctx context.Context, envfile string) (*sqlx.DB, error) {
_ = godotenv.Load(envfile)
config, err := server.LoadConfig()
if err != nil {
return nil, fmt.Errorf("failed to load configuration: %w", err)
}
db, err := database.Connect(ctx, config.Database.Driver, config.Database.Datasource)
if err != nil {
return nil, fmt.Errorf("failed to create database handle: %w", err)
}
return db, nil
}
func setupLoggingContext(ctx context.Context) context.Context {
zerolog.SetGlobalLevel(zerolog.TraceLevel)
log := log.Logger.With().Logger()
return log.WithContext(ctx)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/operations/migrate/to.go | cli/operations/migrate/to.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migrate
import (
"context"
"time"
"github.com/harness/gitness/app/store/database/migrate"
"gopkg.in/alecthomas/kingpin.v2"
)
type commandTo struct {
envfile string
version string
}
func (c *commandTo) run(_ *kingpin.ParseContext) error {
ctx := setupLoggingContext(context.Background())
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
db, err := getDB(ctx, c.envfile)
if err != nil {
return err
}
return migrate.To(ctx, db, c.version)
}
func registerTo(app *kingpin.CmdClause) {
c := &commandTo{}
cmd := app.Command("to", "migrates the database to the provided version").
Action(c.run)
cmd.Arg("version", "database version to migrate to").
Required().
StringVar(&c.version)
cmd.Arg("envfile", "load the environment variable file").
Default("").
StringVar(&c.envfile)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/session/session.go | cli/session/session.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package session
import (
"encoding/json"
"errors"
"fmt"
"os"
"time"
)
var (
ErrTokenExpired = errors.New("token is expired, please login")
)
type Session struct {
path string
URI string `json:"uri"`
ExpiresAt int64 `json:"expires_at"`
AccessToken string `json:"access_token"`
}
// New creates a new session to be stores to the provided path.
func New(path string) Session {
return Session{
path: path,
}
}
// LoadFromPath loads an existing session from a file.
func LoadFromPath(path string) (Session, error) {
session := Session{
path: path,
}
data, err := os.ReadFile(path)
if err != nil {
return session, fmt.Errorf("failed to read session from file: %w", err)
}
if err = json.Unmarshal(data, &session); err != nil {
return session, fmt.Errorf("failed to deserialize session: %w", err)
}
if time.Now().Unix() > session.ExpiresAt {
return session, ErrTokenExpired
}
return session, nil
}
// Store stores an existing session to the default file.
func (s Session) Store() error {
data, err := json.Marshal(s)
if err != nil {
return fmt.Errorf("failed to serialize session: %w", err)
}
err = os.WriteFile(s.path, data, 0o600)
if err != nil {
return fmt.Errorf("failed to write session to file: %w", err)
}
return nil
}
func (s Session) SetURI(uri string) Session {
s.URI = uri
return s
}
func (s Session) SetExpiresAt(expiresAt int64) Session {
s.ExpiresAt = expiresAt
return s
}
func (s Session) SetAccessToken(token string) Session {
s.AccessToken = token
return s
}
func (s Session) Path() string {
return s.path
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/cli/provide/provider.go | cli/provide/provider.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package provide
import (
"errors"
"io/fs"
"os"
"path/filepath"
"github.com/harness/gitness/cli/session"
"github.com/harness/gitness/client"
"github.com/adrg/xdg"
"github.com/rs/zerolog/log"
)
const DefaultServerURI = "http://localhost:3000"
func NewSession() session.Session {
ss, err := newSession()
if err != nil {
log.Err(err).Msg("failed to get active session")
os.Exit(1)
}
return ss
}
func Session() session.Session {
ss, err := loadSession()
if err != nil {
log.Err(err).Msg("failed to get active session")
os.Exit(1)
}
return ss
}
func Client() client.Client {
return newClient(Session())
}
func OpenClient(uri string) client.Client {
return newClient(session.Session{URI: uri})
}
func sessionPath() (string, error) {
return xdg.ConfigFile(filepath.Join("app", "config.json"))
}
func newSession() (session.Session, error) {
path, err := sessionPath()
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return session.Session{}, err
}
return session.New(path).SetURI(DefaultServerURI), nil
}
func loadSession() (session.Session, error) {
path, err := sessionPath()
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return session.Session{URI: DefaultServerURI}, nil
}
return session.Session{}, err
}
ss, err := session.LoadFromPath(path)
if err != nil {
return session.Session{}, err
}
if ss.URI == "" {
ss = ss.SetURI(DefaultServerURI)
}
return ss, nil
}
func newClient(ss session.Session) client.Client {
httpClient := client.NewToken(ss.URI, ss.AccessToken)
if os.Getenv("DEBUG") == "true" {
httpClient.SetDebug(true)
}
return httpClient
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/client/client.go | client/client.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httputil"
"net/url"
"github.com/harness/gitness/app/api/controller/user"
"github.com/harness/gitness/types"
"github.com/harness/gitness/version"
"github.com/rs/zerolog/log"
)
// ensure HTTPClient implements Client interface.
var _ Client = (*HTTPClient)(nil)
// HTTPClient provides an HTTP client for interacting
// with the remote API.
type HTTPClient struct {
client *http.Client
base string
token string
debug bool
}
// New returns a client at the specified url.
func New(uri string) *HTTPClient {
return NewToken(uri, "")
}
// NewToken returns a client at the specified url that
// authenticates all outbound requests with the given token.
func NewToken(uri, token string) *HTTPClient {
return &HTTPClient{http.DefaultClient, uri, token, false}
}
// SetClient sets the default http client. This can be
// used in conjunction with golang.org/x/oauth2 to
// authenticate requests to the server.
func (c *HTTPClient) SetClient(client *http.Client) {
c.client = client
}
// SetDebug sets the debug flag. When the debug flag is
// true, the http.Resposne body to stdout which can be
// helpful when debugging.
func (c *HTTPClient) SetDebug(debug bool) {
c.debug = debug
}
// Login authenticates the user and returns a JWT token.
func (c *HTTPClient) Login(ctx context.Context, input *user.LoginInput) (*types.TokenResponse, error) {
out := new(types.TokenResponse)
uri := fmt.Sprintf("%s/api/v1/login", c.base)
err := c.post(ctx, uri, true, input, out)
return out, err
}
// Register registers a new user and returns a JWT token.
func (c *HTTPClient) Register(ctx context.Context, input *user.RegisterInput) (*types.TokenResponse, error) {
out := new(types.TokenResponse)
uri := fmt.Sprintf("%s/api/v1/register", c.base)
err := c.post(ctx, uri, true, input, out)
return out, err
}
//
// User Endpoints
//
// Self returns the currently authenticated user.
func (c *HTTPClient) Self(ctx context.Context) (*types.User, error) {
out := new(types.User)
uri := fmt.Sprintf("%s/api/v1/user", c.base)
err := c.get(ctx, uri, out)
return out, err
}
// UserCreatePAT creates a new PAT for the user.
func (c *HTTPClient) UserCreatePAT(ctx context.Context, in user.CreateTokenInput) (*types.TokenResponse, error) {
out := new(types.TokenResponse)
uri := fmt.Sprintf("%s/api/v1/user/tokens", c.base)
err := c.post(ctx, uri, false, in, out)
return out, err
}
// User returns a user by ID or email.
func (c *HTTPClient) User(ctx context.Context, key string) (*types.User, error) {
out := new(types.User)
uri := fmt.Sprintf("%s/api/v1/users/%s", c.base, key)
err := c.get(ctx, uri, out)
return out, err
}
// UserList returns a list of all registered users.
func (c *HTTPClient) UserList(ctx context.Context, params types.UserFilter) ([]types.User, error) {
out := []types.User{}
uri := fmt.Sprintf("%s/api/v1/users?page=%d&limit=%d", c.base, params.Page, params.Size)
err := c.get(ctx, uri, &out)
return out, err
}
// UserCreate creates a new user account.
func (c *HTTPClient) UserCreate(ctx context.Context, user *types.User) (*types.User, error) {
out := new(types.User)
uri := fmt.Sprintf("%s/api/v1/users", c.base)
err := c.post(ctx, uri, false, user, out)
return out, err
}
// UserUpdate updates a user account by ID or email.
func (c *HTTPClient) UserUpdate(ctx context.Context, key string, user *types.UserInput) (*types.User, error) {
out := new(types.User)
uri := fmt.Sprintf("%s/api/v1/users/%s", c.base, key)
err := c.patch(ctx, uri, user, out)
return out, err
}
// UserDelete deletes a user account by ID or email.
func (c *HTTPClient) UserDelete(ctx context.Context, key string) error {
uri := fmt.Sprintf("%s/api/v1/users/%s", c.base, key)
err := c.delete(ctx, uri)
return err
}
//
// http request helper functions
//
// helper function for making an http GET request.
func (c *HTTPClient) get(ctx context.Context, rawurl string, out any) error {
return c.do(ctx, rawurl, "GET", false, nil, out)
}
// helper function for making an http POST request.
func (c *HTTPClient) post(ctx context.Context, rawurl string, noToken bool, in, out any) error {
return c.do(ctx, rawurl, "POST", noToken, in, out)
}
// helper function for making an http PATCH request.
func (c *HTTPClient) patch(ctx context.Context, rawurl string, in, out any) error {
return c.do(ctx, rawurl, "PATCH", false, in, out)
}
// helper function for making an http DELETE request.
func (c *HTTPClient) delete(ctx context.Context, rawurl string) error {
return c.do(ctx, rawurl, "DELETE", false, nil, nil)
}
// helper function to make an http request.
func (c *HTTPClient) do(ctx context.Context, rawurl, method string, noToken bool, in, out any) error {
// executes the http request and returns the body as
// and io.ReadCloser
body, err := c.stream(ctx, rawurl, method, noToken, in, out)
if body != nil {
defer func(body io.ReadCloser) {
_ = body.Close()
}(body)
}
if err != nil {
return err
}
// if a json response is expected, parse and return
// the json response.
if out != nil {
return json.NewDecoder(body).Decode(out)
}
return nil
}
// helper function to stream a http request.
func (c *HTTPClient) stream(ctx context.Context, rawurl, method string, noToken bool,
in, _ any) (io.ReadCloser, error) {
uri, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
// if we are posting or putting data, we need to
// write it to the body of the request.
var buf io.ReadWriter
if in != nil {
buf = &bytes.Buffer{}
if err = json.NewEncoder(buf).Encode(in); err != nil {
return nil, err
}
}
// creates a new http request.
req, err := http.NewRequestWithContext(ctx, method, uri.String(), buf)
if err != nil {
return nil, err
}
if in != nil {
req.Header.Set("Content-Type", "application/json")
}
if !noToken && c.token != "" {
req.Header.Set("Authorization", "Bearer "+c.token)
}
if _, ok := in.(*url.Values); ok {
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
}
// include the client version information in the
// http accept header for debugging purposes.
req.Header.Set("Accept", "application/json;version="+version.Version.String())
// send the http request.
resp, err := c.client.Do(req) //nolint:bodyclose
if err != nil {
return nil, err
}
if c.debug {
dump, _ := httputil.DumpResponse(resp, true)
log.Debug().Msgf("method %s, url %s", method, rawurl)
log.Debug().Msg(string(dump))
}
if resp.StatusCode >= http.StatusMultipleChoices {
defer func(Body io.ReadCloser) {
_ = Body.Close()
}(resp.Body)
err = &remoteError{}
if decodeErr := json.NewDecoder(resp.Body).Decode(err); decodeErr != nil {
return nil, decodeErr
}
return nil, err
}
return resp.Body, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/client/interface.go | client/interface.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"context"
"github.com/harness/gitness/app/api/controller/user"
"github.com/harness/gitness/types"
)
// Client to access the remote APIs.
type Client interface {
// Login authenticates the user and returns a JWT token.
Login(ctx context.Context, input *user.LoginInput) (*types.TokenResponse, error)
// Register registers a new user and returns a JWT token.
Register(ctx context.Context, input *user.RegisterInput) (*types.TokenResponse, error)
// Self returns the currently authenticated user.
Self(ctx context.Context) (*types.User, error)
// User returns a user by ID or email.
User(ctx context.Context, key string) (*types.User, error)
// UserList returns a list of all registered users.
UserList(ctx context.Context, params types.UserFilter) ([]types.User, error)
// UserCreate creates a new user account.
UserCreate(ctx context.Context, user *types.User) (*types.User, error)
// UserUpdate updates a user account by ID or email.
UserUpdate(ctx context.Context, key string, input *types.UserInput) (*types.User, error)
// UserDelete deletes a user account by ID or email.
UserDelete(ctx context.Context, key string) error
// UserCreatePAT creates a new PAT for the user.
UserCreatePAT(ctx context.Context, in user.CreateTokenInput) (*types.TokenResponse, error)
}
// remoteError store the error payload returned
// fro the remote API.
type remoteError struct {
Message string `json:"message"`
}
// Error returns the error message.
func (e *remoteError) Error() string {
return e.Message
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/encrypt/wire.go | encrypt/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encrypt
import (
"github.com/harness/gitness/types"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideEncrypter,
)
func ProvideEncrypter(config *types.Config) (Encrypter, error) {
if config.Encrypter.Secret == "" {
return &none{}, nil
}
return New(config.Encrypter.Secret, config.Encrypter.MixedContent)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/encrypt/aesgcm_test.go | encrypt/aesgcm_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encrypt
import (
"strings"
"testing"
)
const testKey32Bytes = "12345678901234567890123456789012"
func TestNew(t *testing.T) {
tests := []struct {
name string
key string
compat bool
expectErr bool
}{
{
name: "valid 32-byte key",
key: testKey32Bytes,
compat: false,
expectErr: false,
},
{
name: "valid 32-byte key with compat mode",
key: testKey32Bytes,
compat: true,
expectErr: false,
},
{
name: "invalid key - too short",
key: "short",
compat: false,
expectErr: true,
},
{
name: "invalid key - too long",
key: "123456789012345678901234567890123",
compat: false,
expectErr: true,
},
{
name: "empty key",
key: "",
compat: false,
expectErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
encrypter, err := New(tt.key, tt.compat)
if tt.expectErr {
if err == nil {
t.Errorf("expected error but got none")
}
if encrypter != nil {
t.Errorf("expected nil encrypter but got %v", encrypter)
}
return
}
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if encrypter == nil {
t.Errorf("expected encrypter but got nil")
}
})
}
}
func TestAesgcmEncryptDecrypt(t *testing.T) {
key := testKey32Bytes
encrypter, err := New(key, false)
if err != nil {
t.Fatalf("failed to create encrypter: %v", err)
}
tests := []struct {
name string
plaintext string
}{
{
name: "simple text",
plaintext: "hello world",
},
{
name: "empty string",
plaintext: "",
},
{
name: "long text",
plaintext: strings.Repeat("a", 1000),
},
{
name: "special characters",
plaintext: "!@#$%^&*()_+-=[]{}|;':\",./<>?",
},
{
name: "unicode text",
plaintext: "Hello 世界 🌍",
},
{
name: "newlines and tabs",
plaintext: "line1\nline2\tline3",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Encrypt
ciphertext, err := encrypter.Encrypt(tt.plaintext)
if err != nil {
t.Fatalf("encryption failed: %v", err)
}
// Verify ciphertext is not empty
if len(ciphertext) == 0 {
t.Errorf("ciphertext is empty")
}
// Decrypt
decrypted, err := encrypter.Decrypt(ciphertext)
if err != nil {
t.Fatalf("decryption failed: %v", err)
}
// Verify decrypted matches original
if decrypted != tt.plaintext {
t.Errorf("decrypted text does not match original\nexpected: %q\ngot: %q", tt.plaintext, decrypted)
}
})
}
}
func TestAesgcmEncryptUniqueness(t *testing.T) {
key := testKey32Bytes
encrypter, err := New(key, false)
if err != nil {
t.Fatalf("failed to create encrypter: %v", err)
}
plaintext := "test message"
// Encrypt the same plaintext multiple times
ciphertext1, err := encrypter.Encrypt(plaintext)
if err != nil {
t.Fatalf("encryption 1 failed: %v", err)
}
ciphertext2, err := encrypter.Encrypt(plaintext)
if err != nil {
t.Fatalf("encryption 2 failed: %v", err)
}
// Verify ciphertexts are different (due to random nonce)
if string(ciphertext1) == string(ciphertext2) {
t.Errorf("ciphertexts should be different due to random nonce")
}
// But both should decrypt to the same plaintext
decrypted1, err := encrypter.Decrypt(ciphertext1)
if err != nil {
t.Fatalf("decryption 1 failed: %v", err)
}
decrypted2, err := encrypter.Decrypt(ciphertext2)
if err != nil {
t.Fatalf("decryption 2 failed: %v", err)
}
if decrypted1 != plaintext || decrypted2 != plaintext {
t.Errorf("decrypted texts should match original plaintext")
}
}
func TestAesgcmDecryptInvalidCiphertext(t *testing.T) {
key := testKey32Bytes
encrypter, err := New(key, false)
if err != nil {
t.Fatalf("failed to create encrypter: %v", err)
}
tests := []struct {
name string
ciphertext []byte
expectErr bool
}{
{
name: "empty ciphertext",
ciphertext: []byte{},
expectErr: true,
},
{
name: "too short ciphertext",
ciphertext: []byte{1, 2, 3},
expectErr: true,
},
{
name: "corrupted ciphertext",
ciphertext: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
expectErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := encrypter.Decrypt(tt.ciphertext)
if tt.expectErr && err == nil {
t.Errorf("expected error but got none")
}
})
}
}
func TestAesgcmCompatMode(t *testing.T) {
key := testKey32Bytes
encrypter, err := New(key, true)
if err != nil {
t.Fatalf("failed to create encrypter: %v", err)
}
aesgcm, _ := encrypter.(*Aesgcm)
if !aesgcm.Compat {
t.Errorf("compat mode should be enabled")
}
// Test that invalid ciphertext returns the ciphertext as plaintext in compat mode
invalidCiphertext := []byte("not encrypted data")
decrypted, err := encrypter.Decrypt(invalidCiphertext)
if err != nil {
t.Errorf("compat mode should not return error for invalid ciphertext: %v", err)
}
if decrypted != string(invalidCiphertext) {
t.Errorf(
"compat mode should return ciphertext as plaintext\nexpected: %q\ngot: %q",
string(invalidCiphertext),
decrypted,
)
}
// Test that valid encrypted data still works in compat mode
plaintext := "test message"
ciphertext, err := encrypter.Encrypt(plaintext)
if err != nil {
t.Fatalf("encryption failed: %v", err)
}
decrypted, err = encrypter.Decrypt(ciphertext)
if err != nil {
t.Fatalf("decryption failed: %v", err)
}
if decrypted != plaintext {
t.Errorf("decrypted text does not match original\nexpected: %q\ngot: %q", plaintext, decrypted)
}
}
func TestAesgcmCompatModeShortCiphertext(t *testing.T) {
key := testKey32Bytes
encrypter, err := New(key, true)
if err != nil {
t.Fatalf("failed to create encrypter: %v", err)
}
// Test with very short ciphertext (less than nonce size)
shortCiphertext := []byte("short")
decrypted, err := encrypter.Decrypt(shortCiphertext)
if err != nil {
t.Errorf("compat mode should not return error for short ciphertext: %v", err)
}
if decrypted != string(shortCiphertext) {
t.Errorf(
"compat mode should return ciphertext as plaintext\nexpected: %q\ngot: %q",
string(shortCiphertext),
decrypted,
)
}
}
func TestAesgcmNonCompatModeInvalidCiphertext(t *testing.T) {
key := testKey32Bytes
encrypter, err := New(key, false)
if err != nil {
t.Fatalf("failed to create encrypter: %v", err)
}
// Test that invalid ciphertext returns error in non-compat mode
invalidCiphertext := []byte("not encrypted data")
_, err = encrypter.Decrypt(invalidCiphertext)
if err == nil {
t.Errorf("non-compat mode should return error for invalid ciphertext")
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/encrypt/encrypt.go | encrypt/encrypt.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encrypt
import (
"errors"
)
// indicates key size is too small.
var errKeySize = errors.New("encryption key must be 32 bytes")
// Encrypter provides field encryption and decryption.
// Encrypted values are currently limited to strings, which is
// reflected in the interface design.
type Encrypter interface {
Encrypt(plaintext string) ([]byte, error)
Decrypt(ciphertext []byte) (string, error)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/encrypt/aesgcm.go | encrypt/aesgcm.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encrypt
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"errors"
"io"
)
// Aesgcm provides an encrypter that uses the aesgcm encryption
// algorithm.
type Aesgcm struct {
block cipher.Block
Compat bool
}
// Encrypt encrypts the plaintext using aesgcm.
func (e *Aesgcm) Encrypt(plaintext string) ([]byte, error) {
gcm, err := cipher.NewGCM(e.block)
if err != nil {
return nil, err
}
nonce := make([]byte, gcm.NonceSize())
_, err = io.ReadFull(rand.Reader, nonce)
if err != nil {
return nil, err
}
return gcm.Seal(nonce, nonce, []byte(plaintext), nil), nil
}
// Decrypt decrypts the ciphertext using aesgcm.
func (e *Aesgcm) Decrypt(ciphertext []byte) (string, error) {
gcm, err := cipher.NewGCM(e.block)
if err != nil {
return "", err
}
if len(ciphertext) < gcm.NonceSize() {
// if the decryption utility is running in compatibility
// mode, it will return the ciphertext as plain text if
// decryption fails. This should be used when running the
// database in mixed-mode, where there is a mix of encrypted
// and unencrypted content.
if e.Compat {
return string(ciphertext), nil
}
return "", errors.New("malformed ciphertext")
}
plaintext, err := gcm.Open(nil,
ciphertext[:gcm.NonceSize()],
ciphertext[gcm.NonceSize():],
nil,
)
// if the decryption utility is running in compatibility
// mode, it will return the ciphertext as plain text if
// decryption fails. This should be used when running the
// database in mixed-mode, where there is a mix of encrypted
// and unencrypted content.
if err != nil && e.Compat {
return string(ciphertext), nil
}
return string(plaintext), err
}
// New provides a new aesgcm encrypter.
func New(key string, compat bool) (Encrypter, error) {
if len(key) != 32 {
return nil, errKeySize
}
b := []byte(key)
block, err := aes.NewCipher(b)
if err != nil {
return nil, err
}
return &Aesgcm{block: block, Compat: compat}, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/encrypt/none_test.go | encrypt/none_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encrypt
import (
"bytes"
"testing"
)
func TestNone_Encrypt(t *testing.T) {
encrypter := &none{}
t.Run("encrypt simple string", func(t *testing.T) {
plaintext := "hello world"
ciphertext, err := encrypter.Encrypt(plaintext)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !bytes.Equal(ciphertext, []byte(plaintext)) {
t.Errorf("expected ciphertext to be %v, got %v", []byte(plaintext), ciphertext)
}
})
t.Run("encrypt empty string", func(t *testing.T) {
plaintext := ""
ciphertext, err := encrypter.Encrypt(plaintext)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !bytes.Equal(ciphertext, []byte(plaintext)) {
t.Errorf("expected ciphertext to be empty, got %v", ciphertext)
}
})
t.Run("encrypt special characters", func(t *testing.T) {
plaintext := "!@#$%^&*()_+-=[]{}|;':\",./<>?"
ciphertext, err := encrypter.Encrypt(plaintext)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !bytes.Equal(ciphertext, []byte(plaintext)) {
t.Errorf("expected ciphertext to be %v, got %v", []byte(plaintext), ciphertext)
}
})
t.Run("encrypt unicode characters", func(t *testing.T) {
plaintext := "こんにちは世界 🌍"
ciphertext, err := encrypter.Encrypt(plaintext)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !bytes.Equal(ciphertext, []byte(plaintext)) {
t.Errorf("expected ciphertext to be %v, got %v", []byte(plaintext), ciphertext)
}
})
}
func TestNone_Decrypt(t *testing.T) {
encrypter := &none{}
t.Run("decrypt simple bytes", func(t *testing.T) {
ciphertext := []byte("hello world")
plaintext, err := encrypter.Decrypt(ciphertext)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if plaintext != string(ciphertext) {
t.Errorf("expected plaintext to be %s, got %s", string(ciphertext), plaintext)
}
})
t.Run("decrypt empty bytes", func(t *testing.T) {
ciphertext := []byte("")
plaintext, err := encrypter.Decrypt(ciphertext)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if plaintext != "" {
t.Errorf("expected plaintext to be empty, got %s", plaintext)
}
})
t.Run("decrypt nil bytes", func(t *testing.T) {
var ciphertext []byte
plaintext, err := encrypter.Decrypt(ciphertext)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if plaintext != "" {
t.Errorf("expected plaintext to be empty, got %s", plaintext)
}
})
t.Run("decrypt special characters", func(t *testing.T) {
ciphertext := []byte("!@#$%^&*()_+-=[]{}|;':\",./<>?")
plaintext, err := encrypter.Decrypt(ciphertext)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if plaintext != string(ciphertext) {
t.Errorf("expected plaintext to be %s, got %s", string(ciphertext), plaintext)
}
})
}
func TestNone_EncryptDecrypt_RoundTrip(t *testing.T) {
encrypter := &none{}
testCases := []string{
"hello world",
"",
"!@#$%^&*()",
"こんにちは世界",
"multi\nline\nstring",
"tab\tseparated\tvalues",
}
for _, tc := range testCases {
t.Run(tc, func(t *testing.T) {
ciphertext, err := encrypter.Encrypt(tc)
if err != nil {
t.Fatalf("encrypt failed: %v", err)
}
plaintext, err := encrypter.Decrypt(ciphertext)
if err != nil {
t.Fatalf("decrypt failed: %v", err)
}
if plaintext != tc {
t.Errorf("round trip failed: expected %s, got %s", tc, plaintext)
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/encrypt/none.go | encrypt/none.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encrypt
// none is an encryption strategy that stores secret
// values in plain text. This is the default strategy
// when no key is specified.
type none struct {
}
func (*none) Encrypt(plaintext string) ([]byte, error) {
return []byte(plaintext), nil
}
func (*none) Decrypt(ciphertext []byte) (string, error) {
return string(ciphertext), nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/lock/memory_test.go | lock/memory_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lock
import (
"context"
"errors"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func Test_inMemMutex_Lock(t *testing.T) {
manager := NewInMemory(Config{
App: "gitness",
Namespace: "pullreq",
Expiry: 3 * time.Second,
Tries: 10,
RetryDelay: 300 * time.Millisecond,
})
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(500 * time.Millisecond)
mx, err := manager.NewMutex("key1")
if err != nil {
t.Errorf("mutex not created, err: %v", err)
return
}
if err := mx.Lock(context.Background()); err != nil {
t.Errorf("error from go routine while locking %s, err: %v", mx.Key(), err)
return
}
err = mx.Unlock(context.Background())
require.NoError(t, err)
}()
mx, err := manager.NewMutex("key1")
if err != nil {
t.Errorf("mutex not created, err: %v", err)
return
}
if err := mx.Lock(context.Background()); err != nil {
t.Errorf("error while locking %v, err: %v", mx.Key(), err)
}
time.Sleep(1 * time.Second)
err = mx.Unlock(context.Background())
require.NoError(t, err)
wg.Wait()
}
func Test_inMemMutex_MaxTries(t *testing.T) {
manager := NewInMemory(Config{
App: "gitness",
Namespace: "pullreq",
Expiry: 1 * time.Second,
Tries: 2,
RetryDelay: 300 * time.Millisecond,
})
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(500 * time.Millisecond)
mx, err := manager.NewMutex("key1")
if err != nil {
t.Errorf("mutex not created, err: %v", err)
return
}
err = mx.Lock(context.Background())
if err == nil {
t.Errorf("error should be returned while locking %s instead of nil", mx.Key())
return
}
var errLock *Error
if !errors.As(err, &errLock) {
t.Errorf("expected error lock.Error, got: %v", err)
return
}
if errLock.Kind != ErrorKindMaxRetriesExceeded {
t.Errorf("expected lock.MaxRetriesExceeded, got: %v", err)
return
}
}()
mx, err := manager.NewMutex("key1")
if err != nil {
t.Errorf("mutex not created, err: %v", err)
return
}
if err := mx.Lock(context.Background()); err != nil {
t.Errorf("error while locking %v, err: %v", mx.Key(), err)
}
time.Sleep(1 * time.Second)
err = mx.Unlock(context.Background())
require.NoError(t, err)
wg.Wait()
}
func Test_inMemMutex_LockAndWait(t *testing.T) {
wg := &sync.WaitGroup{}
manager := NewInMemory(Config{
App: "gitness",
Namespace: "pullreq",
Expiry: 3 * time.Second,
Tries: 10,
RetryDelay: 300 * time.Millisecond,
})
fn := func(n int) {
mx, err := manager.NewMutex("Key1")
if err != nil {
t.Errorf("mutex not created routine %d, err: %v", n, err)
return
}
defer func() {
if err := mx.Unlock(context.Background()); err != nil {
t.Errorf("failed to unlock %d", n)
}
wg.Done()
}()
if err := mx.Lock(context.Background()); err != nil {
t.Errorf("failed to lock %d", n)
}
time.Sleep(50 * time.Millisecond)
}
wg.Add(3)
go fn(1)
go fn(2)
go fn(3)
wg.Wait()
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/lock/wire.go | lock/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lock
import (
"github.com/go-redis/redis/v8"
"github.com/google/wire"
)
var WireSet = wire.NewSet(
ProvideMutexManager,
)
func ProvideMutexManager(config Config, client redis.UniversalClient) MutexManager {
switch config.Provider {
case MemoryProvider:
return NewInMemory(config)
case RedisProvider:
return NewRedis(config, client)
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/lock/memory.go | lock/memory.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lock
import (
"context"
"crypto/rand"
"encoding/base64"
"sync"
"time"
)
// InMemory is a local implementation of a MutexManager that it's intended to be used during development.
type InMemory struct {
config Config // force value copy
mutex sync.Mutex
keys map[string]inMemEntry
}
// NewInMemory creates a new InMemory instance only used for development.
func NewInMemory(config Config) *InMemory {
keys := make(map[string]inMemEntry)
return &InMemory{
config: config,
keys: keys,
}
}
// NewMutex creates a mutex for the given key. The returned mutex is not held
// and must be acquired with a call to .Lock.
func (m *InMemory) NewMutex(key string, options ...Option) (Mutex, error) {
var (
token string
err error
)
// copy default values
config := m.config
// set default delayFunc
if config.DelayFunc == nil {
config.DelayFunc = func(_ int) time.Duration {
return config.RetryDelay
}
}
// override config with custom options
for _, opt := range options {
opt.Apply(&config)
}
// format key
key = formatKey(config.App, config.Namespace, key)
switch {
case config.Value != "":
token = config.Value
case config.GenValueFunc != nil:
token, err = config.GenValueFunc()
default:
token, err = randstr(32)
}
if err != nil {
return nil, NewError(ErrorKindGenerateTokenFailed, key, nil)
}
// waitTime logic is similar to redis implementation:
// https://github.com/go-redsync/redsync/blob/e1e5da6654c81a2069d6a360f1a31c21f05cd22d/mutex.go#LL81C4-L81C100
waitTime := config.Expiry
if config.TimeoutFactor > 0 {
waitTime = time.Duration(int64(float64(config.Expiry) * config.TimeoutFactor))
}
lock := inMemMutex{
expiry: config.Expiry,
waitTime: waitTime,
tries: config.Tries,
delayFunc: config.DelayFunc,
provider: m,
key: key,
token: token,
}
return &lock, nil
}
func (m *InMemory) acquire(key, token string, ttl time.Duration) bool {
m.mutex.Lock()
defer m.mutex.Unlock()
now := time.Now()
entry, ok := m.keys[key]
if ok && entry.validUntil.After(now) {
return false
}
m.keys[key] = inMemEntry{token, now.Add(ttl)}
return true
}
func (m *InMemory) release(key, token string) bool {
m.mutex.Lock()
defer m.mutex.Unlock()
entry, ok := m.keys[key]
if !ok || entry.token != token {
return false
}
delete(m.keys, key)
return true
}
type inMemEntry struct {
token string
validUntil time.Time
}
type inMemMutex struct {
mutex sync.Mutex // Used while manipulating the internal state of the lock itself
provider *InMemory
expiry time.Duration
waitTime time.Duration
tries int
delayFunc DelayFunc
key string
token string // A random string used to safely release the lock
isHeld bool
}
// Key returns the key to be locked.
func (m *inMemMutex) Key() string {
return m.key
}
// Lock acquires the lock. It fails with error if the lock is already held.
func (m *inMemMutex) Lock(ctx context.Context) error {
m.mutex.Lock()
defer m.mutex.Unlock()
if m.isHeld {
return NewError(ErrorKindLockHeld, m.key, nil)
}
if m.provider.acquire(m.key, m.token, m.expiry) {
m.isHeld = true
return nil
}
timeout := time.NewTimer(m.waitTime)
defer timeout.Stop()
for i := 1; !m.isHeld && i <= m.tries; i++ {
if err := m.retry(ctx, i, timeout); err != nil {
return err
}
}
return nil
}
func (m *inMemMutex) retry(ctx context.Context, attempt int, timeout *time.Timer) error {
if m.isHeld {
return nil
}
if attempt == m.tries {
return NewError(ErrorKindMaxRetriesExceeded, m.key, nil)
}
delay := time.NewTimer(m.delayFunc(attempt))
defer delay.Stop()
select {
case <-ctx.Done():
return NewError(ErrorKindContext, m.key, ctx.Err())
case <-timeout.C:
return NewError(ErrorKindCannotLock, m.key, nil)
case <-delay.C: // just wait
}
if m.provider.acquire(m.key, m.token, m.expiry) {
m.isHeld = true
}
return nil
}
// Unlock releases the lock. It fails with error if the lock is not currently held.
func (m *inMemMutex) Unlock(_ context.Context) error {
m.mutex.Lock()
defer m.mutex.Unlock()
if !m.isHeld || !m.provider.release(m.key, m.token) {
return NewError(ErrorKindLockNotHeld, m.key, nil)
}
m.isHeld = false
return nil
}
func randstr(size int) (string, error) {
buffer := make([]byte, size)
if _, err := rand.Read(buffer); err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(buffer), nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/lock/redis.go | lock/redis.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lock
import (
"context"
"github.com/harness/gitness/errors"
redislib "github.com/go-redis/redis/v8"
"github.com/go-redsync/redsync/v4"
"github.com/go-redsync/redsync/v4/redis/goredis/v8"
)
// Redis wrapper for redsync.
type Redis struct {
config Config
rs *redsync.Redsync
}
// NewRedis create an instance of redisync to be used to obtain a mutual exclusion
// lock.
func NewRedis(config Config, client redislib.UniversalClient) *Redis {
pool := goredis.NewPool(client)
return &Redis{
config: config,
rs: redsync.New(pool),
}
}
// Acquire new lock.
func (r *Redis) NewMutex(key string, options ...Option) (Mutex, error) {
// copy default values
config := r.config
// customize config
for _, opt := range options {
opt.Apply(&config)
}
// convert to redis helper functions
args := make([]redsync.Option, 0, 8)
args = append(args,
redsync.WithExpiry(config.Expiry),
redsync.WithTimeoutFactor(config.TimeoutFactor),
redsync.WithTries(config.Tries),
redsync.WithRetryDelay(config.RetryDelay),
redsync.WithDriftFactor(config.DriftFactor),
)
if config.DelayFunc != nil {
args = append(args, redsync.WithRetryDelayFunc(redsync.DelayFunc(config.DelayFunc)))
}
if config.GenValueFunc != nil {
args = append(args, redsync.WithGenValueFunc(config.GenValueFunc))
}
uniqKey := formatKey(config.App, config.Namespace, key)
mutex := r.rs.NewMutex(uniqKey, args...)
return &RedisMutex{
mutex: mutex,
}, nil
}
type RedisMutex struct {
mutex *redsync.Mutex
}
// Key returns the key to be locked.
func (l *RedisMutex) Key() string {
return l.mutex.Name()
}
// Lock acquires the lock. It fails with error if the lock is already held.
func (l *RedisMutex) Lock(ctx context.Context) error {
err := l.mutex.LockContext(ctx)
if err != nil {
return translateRedisErr(err, l.Key())
}
return nil
}
// Unlock releases the lock. It fails with error if the lock is not currently held.
func (l *RedisMutex) Unlock(ctx context.Context) error {
_, err := l.mutex.UnlockContext(ctx)
if err != nil {
return translateRedisErr(err, l.Key())
}
return nil
}
func translateRedisErr(err error, key string) error {
var kind ErrorKind
switch {
case errors.Is(err, redsync.ErrFailed):
kind = ErrorKindCannotLock
case errors.Is(err, redsync.ErrExtendFailed), errors.IsType[*redsync.RedisError](err):
kind = ErrorKindProviderError
case errors.IsType[*redsync.ErrTaken](err), errors.IsType[*redsync.ErrNodeTaken](err):
kind = ErrorKindLockHeld
}
return NewError(kind, key, err)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/lock/config.go | lock/config.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lock
import "time"
type Provider string
const (
MemoryProvider Provider = "inmemory"
RedisProvider Provider = "redis"
)
// A DelayFunc is used to decide the amount of time to wait between retries.
type DelayFunc func(tries int) time.Duration
type Config struct {
App string // app namespace prefix
Namespace string
Provider Provider
Expiry time.Duration
Tries int
RetryDelay time.Duration
DelayFunc DelayFunc
DriftFactor float64
TimeoutFactor float64
GenValueFunc func() (string, error)
Value string
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/lock/lock.go | lock/lock.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lock
import (
"context"
"fmt"
)
// ErrorKind enum displays human readable message
// in error.
type ErrorKind string
const (
ErrorKindLockHeld ErrorKind = "lock already held"
ErrorKindLockNotHeld ErrorKind = "lock not held"
ErrorKindProviderError ErrorKind = "lock provider error"
ErrorKindCannotLock ErrorKind = "timeout while trying to acquire lock"
ErrorKindContext ErrorKind = "context error while trying to acquire lock"
ErrorKindMaxRetriesExceeded ErrorKind = "max retries exceeded to acquire lock"
ErrorKindGenerateTokenFailed ErrorKind = "token generation failed"
)
// Error is custom unique type for all type of errors.
type Error struct {
Kind ErrorKind
Key string
Err error
}
func NewError(kind ErrorKind, key string, err error) *Error {
return &Error{
Kind: kind,
Key: key,
Err: err,
}
}
// Error implements error interface.
func (e Error) Error() string {
if e.Err != nil {
return fmt.Sprintf("%s on key %s with err: %v", e.Kind, e.Key, e.Err)
}
return fmt.Sprintf("%s on key %s", e.Kind, e.Key)
}
// MutexManager describes a Distributed Lock Manager.
type MutexManager interface {
// NewMutex creates a mutex for the given key. The returned mutex is not held
// and must be acquired with a call to .Lock.
NewMutex(key string, options ...Option) (Mutex, error)
}
type Mutex interface {
// Key returns the key to be locked.
Key() string
// Lock acquires the lock. It fails with error if the lock is already held.
Lock(ctx context.Context) error
// Unlock releases the lock. It fails with error if the lock is not currently held.
Unlock(ctx context.Context) error
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/lock/util.go | lock/util.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lock
import "strings"
func formatKey(app, ns, key string) string {
return app + ":" + ns + ":" + key
}
func SplitKey(uniqKey string) (namespace, key string) {
parts := strings.Split(uniqKey, ":")
key = uniqKey
if len(parts) > 2 {
namespace = parts[1]
key = parts[2]
}
return
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/lock/options.go | lock/options.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lock
import (
"time"
)
// An Option configures a mutex.
type Option interface {
Apply(*Config)
}
// OptionFunc is a function that configures a mutex.
type OptionFunc func(*Config)
// Apply calls f(config).
func (f OptionFunc) Apply(config *Config) {
f(config)
}
// WithNamespace returns an option that configures Mutex.ns.
func WithNamespace(ns string) Option {
return OptionFunc(func(m *Config) {
m.Namespace = ns
})
}
// WithExpiry can be used to set the expiry of a mutex to the given value.
func WithExpiry(expiry time.Duration) Option {
return OptionFunc(func(m *Config) {
m.Expiry = expiry
})
}
// WithTries can be used to set the number of times lock acquire is attempted.
func WithTries(tries int) Option {
return OptionFunc(func(m *Config) {
m.Tries = tries
})
}
// WithRetryDelay can be used to set the amount of time to wait between retries.
func WithRetryDelay(delay time.Duration) Option {
return OptionFunc(func(m *Config) {
m.DelayFunc = func(_ int) time.Duration {
return delay
}
})
}
// WithRetryDelayFunc can be used to override default delay behavior.
func WithRetryDelayFunc(delayFunc DelayFunc) Option {
return OptionFunc(func(m *Config) {
m.DelayFunc = delayFunc
})
}
// WithDriftFactor can be used to set the clock drift factor.
func WithDriftFactor(factor float64) Option {
return OptionFunc(func(m *Config) {
m.DriftFactor = factor
})
}
// WithTimeoutFactor can be used to set the timeout factor.
func WithTimeoutFactor(factor float64) Option {
return OptionFunc(func(m *Config) {
m.TimeoutFactor = factor
})
}
// WithGenValueFunc can be used to set the custom value generator.
func WithGenValueFunc(genValueFunc func() (string, error)) Option {
return OptionFunc(func(m *Config) {
m.GenValueFunc = genValueFunc
})
}
// WithValue can be used to assign the random value without having to call lock.
// This allows the ownership of a lock to be "transferred" and allows the lock to be unlocked from elsewhere.
func WithValue(v string) Option {
return OptionFunc(func(m *Config) {
m.Value = v
})
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/lock/util_test.go | lock/util_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lock
import "testing"
const myKey = "mykey"
func TestFormatKey(t *testing.T) {
t.Run("format with all parts", func(t *testing.T) {
result := formatKey("myapp", "mynamespace", myKey)
expected := "myapp:mynamespace:mykey"
if result != expected {
t.Errorf("expected %s, got %s", expected, result)
}
})
t.Run("format with empty app", func(t *testing.T) {
result := formatKey("", "mynamespace", myKey)
expected := ":mynamespace:mykey"
if result != expected {
t.Errorf("expected %s, got %s", expected, result)
}
})
t.Run("format with empty namespace", func(t *testing.T) {
result := formatKey("myapp", "", myKey)
expected := "myapp::mykey"
if result != expected {
t.Errorf("expected %s, got %s", expected, result)
}
})
t.Run("format with empty key", func(t *testing.T) {
result := formatKey("myapp", "mynamespace", "")
expected := "myapp:mynamespace:"
if result != expected {
t.Errorf("expected %s, got %s", expected, result)
}
})
t.Run("format with all empty", func(t *testing.T) {
result := formatKey("", "", "")
expected := "::"
if result != expected {
t.Errorf("expected %s, got %s", expected, result)
}
})
t.Run("format with special characters", func(t *testing.T) {
result := formatKey("app-1", "ns_2", "key.3")
expected := "app-1:ns_2:key.3"
if result != expected {
t.Errorf("expected %s, got %s", expected, result)
}
})
}
func TestSplitKey(t *testing.T) {
t.Run("split valid key with three parts", func(t *testing.T) {
namespace, key := SplitKey("myapp:mynamespace:mykey")
if namespace != "mynamespace" {
t.Errorf("expected namespace to be 'mynamespace', got '%s'", namespace)
}
if key != myKey {
t.Errorf("expected key to be 'mykey', got '%s'", key)
}
})
t.Run("split key with more than three parts", func(t *testing.T) {
namespace, key := SplitKey("myapp:mynamespace:mykey:extra")
if namespace != "mynamespace" {
t.Errorf("expected namespace to be 'mynamespace', got '%s'", namespace)
}
// SplitKey only takes the third part, not everything after
if key != myKey {
t.Errorf("expected key to be 'mykey', got '%s'", key)
}
})
t.Run("split key with two parts", func(t *testing.T) {
namespace, key := SplitKey("myapp:mynamespace")
if namespace != "" {
t.Errorf("expected namespace to be empty, got '%s'", namespace)
}
if key != "myapp:mynamespace" {
t.Errorf("expected key to be 'myapp:mynamespace', got '%s'", key)
}
})
t.Run("split key with one part", func(t *testing.T) {
namespace, key := SplitKey(myKey)
if namespace != "" {
t.Errorf("expected namespace to be empty, got '%s'", namespace)
}
if key != myKey {
t.Errorf("expected key to be 'mykey', got '%s'", key)
}
})
t.Run("split empty key", func(t *testing.T) {
namespace, key := SplitKey("")
if namespace != "" {
t.Errorf("expected namespace to be empty, got '%s'", namespace)
}
if key != "" {
t.Errorf("expected key to be empty, got '%s'", key)
}
})
t.Run("split key with empty parts", func(t *testing.T) {
namespace, key := SplitKey("myapp::mykey")
if namespace != "" {
t.Errorf("expected namespace to be empty, got '%s'", namespace)
}
if key != myKey {
t.Errorf("expected key to be 'mykey', got '%s'", key)
}
})
}
func TestFormatAndSplitKey_RoundTrip(t *testing.T) {
testCases := []struct {
app string
namespace string
key string
}{
{"myapp", "mynamespace", myKey},
{"app1", "ns1", "key1"},
{"", "ns", "key"},
{"app", "", "key"},
{"app", "ns", ""},
}
for _, tc := range testCases {
t.Run(tc.app+":"+tc.namespace+":"+tc.key, func(t *testing.T) {
formatted := formatKey(tc.app, tc.namespace, tc.key)
ns, k := SplitKey(formatted)
if ns != tc.namespace {
t.Errorf("namespace mismatch: expected '%s', got '%s'", tc.namespace, ns)
}
if k != tc.key {
t.Errorf("key mismatch: expected '%s', got '%s'", tc.key, k)
}
})
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/validation/helpers.go | registry/validation/helpers.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validation
import (
"net/url"
"strings"
)
// IsValidURL checks if URL is valid.
func IsValidURL(uri string) bool {
if u, err := url.ParseRequestURI(uri); err != nil ||
(u.Scheme != "http" && u.Scheme != "https") ||
!validPort(portOnly(u.Host)) {
return false
}
return true
}
func validPort(p string) bool {
for _, r := range []byte(p) {
if r < '0' || r > '9' {
return false
}
}
return true
}
func portOnly(hostport string) string {
colon := strings.IndexByte(hostport, ':')
if colon == -1 {
return ""
}
if i := strings.Index(hostport, "]:"); i != -1 {
return hostport[i+len("]:"):]
}
if strings.Contains(hostport, "]") {
return ""
}
return hostport[colon+len(":"):]
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/services/asyncprocessing/wire.go | registry/services/asyncprocessing/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package asyncprocessing
import (
"context"
"github.com/harness/gitness/app/services/locker"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/events"
"github.com/harness/gitness/registry/app/api/interfaces"
"github.com/harness/gitness/registry/app/events/asyncprocessing"
"github.com/harness/gitness/registry/app/pkg/filemanager"
"github.com/harness/gitness/registry/app/store"
"github.com/harness/gitness/registry/app/utils/cargo"
"github.com/harness/gitness/registry/app/utils/gopackage"
"github.com/harness/gitness/secret"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/types"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideRegistryPostProcessingConfig,
ProvideService,
ProvideRpmHelper,
)
func ProvideService(
ctx context.Context,
tx dbtx.Transactor,
rpmRegistryHelper RpmHelper,
cargoRegistryHelper cargo.RegistryHelper,
gopackageRegistryHelper gopackage.RegistryHelper,
locker *locker.Locker,
artifactsReaderFactory *events.ReaderFactory[*asyncprocessing.Reader],
config Config,
registryDao store.RegistryRepository,
taskRepository store.TaskRepository,
taskSourceRepository store.TaskSourceRepository,
taskEventRepository store.TaskEventRepository,
eventsSystem *events.System,
postProcessingReporter *asyncprocessing.Reporter,
packageWrapper interfaces.PackageWrapper,
) (*Service, error) {
return NewService(
ctx,
tx,
rpmRegistryHelper,
cargoRegistryHelper,
gopackageRegistryHelper,
locker,
artifactsReaderFactory,
config,
registryDao,
taskRepository,
taskSourceRepository,
taskEventRepository,
eventsSystem,
postProcessingReporter,
packageWrapper,
)
}
func ProvideRegistryPostProcessingConfig(config *types.Config) Config {
return Config{
EventReaderName: config.InstanceID,
Concurrency: config.Registry.PostProcessing.Concurrency,
MaxRetries: config.Registry.PostProcessing.MaxRetries,
AllowLoopback: config.Registry.PostProcessing.AllowLoopback,
}
}
func ProvideRpmHelper(
fileManager filemanager.FileManager,
artifactDao store.ArtifactRepository,
upstreamProxyStore store.UpstreamProxyConfigRepository,
spaceFinder refcache.SpaceFinder,
secretService secret.Service,
registryDao store.RegistryRepository,
) RpmHelper {
return NewRpmHelper(
fileManager,
artifactDao,
upstreamProxyStore,
spaceFinder,
secretService,
registryDao,
)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/services/asyncprocessing/service.go | registry/services/asyncprocessing/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package asyncprocessing
import (
"context"
"encoding/json"
"errors"
"fmt"
"strconv"
"time"
"github.com/harness/gitness/app/api/request"
"github.com/harness/gitness/app/auth"
"github.com/harness/gitness/app/services/locker"
"github.com/harness/gitness/events"
"github.com/harness/gitness/registry/app/api/interfaces"
"github.com/harness/gitness/registry/app/api/openapi/contracts/artifact"
"github.com/harness/gitness/registry/app/events/asyncprocessing"
"github.com/harness/gitness/registry/app/store"
"github.com/harness/gitness/registry/app/utils/cargo"
"github.com/harness/gitness/registry/app/utils/gopackage"
"github.com/harness/gitness/registry/types"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/stream"
coretypes "github.com/harness/gitness/types"
"github.com/rs/zerolog/log"
)
const (
timeout = 1 * time.Hour
eventsReaderGroupName = "registry:postprocessing"
)
// TaskHandler is a function type for handling tasks.
type TaskHandler func(ctx context.Context, task *types.Task, eventID string) error
type Service struct {
tx dbtx.Transactor
rpmRegistryHelper RpmHelper
cargoRegistryHelper cargo.RegistryHelper
gopackageRegistryHelper gopackage.RegistryHelper
locker *locker.Locker
registryDao store.RegistryRepository
taskRepository store.TaskRepository
taskSourceRepository store.TaskSourceRepository
taskEventRepository store.TaskEventRepository
innerReporter *events.GenericReporter
postProcessingReporter *asyncprocessing.Reporter
packageWrapper interfaces.PackageWrapper
taskHandlers map[types.TaskKind]TaskHandler
}
func NewService(
ctx context.Context,
tx dbtx.Transactor,
rpmRegistryHelper RpmHelper,
cargoRegistryHelper cargo.RegistryHelper,
gopackageRegistryHelper gopackage.RegistryHelper,
locker *locker.Locker,
artifactsReaderFactory *events.ReaderFactory[*asyncprocessing.Reader],
config Config,
registryDao store.RegistryRepository,
taskRepository store.TaskRepository,
taskSourceRepository store.TaskSourceRepository,
taskEventRepository store.TaskEventRepository,
eventsSystem *events.System,
postProcessingReporter *asyncprocessing.Reporter,
packageWrapper interfaces.PackageWrapper,
) (*Service, error) {
if err := config.Prepare(); err != nil {
return nil, fmt.Errorf("provided postprocessing service config is invalid: %w", err)
}
innerReporter, err := events.NewReporter(eventsSystem, asyncprocessing.RegistryAsyncProcessing)
if err != nil {
return nil, errors.New("failed to create new GenericReporter for registry async processing from event system")
}
s := &Service{
rpmRegistryHelper: rpmRegistryHelper,
cargoRegistryHelper: cargoRegistryHelper,
gopackageRegistryHelper: gopackageRegistryHelper,
locker: locker,
tx: tx,
registryDao: registryDao,
taskRepository: taskRepository,
taskSourceRepository: taskSourceRepository,
taskEventRepository: taskEventRepository,
innerReporter: innerReporter,
postProcessingReporter: postProcessingReporter,
packageWrapper: packageWrapper,
taskHandlers: make(map[types.TaskKind]TaskHandler),
}
_, err = artifactsReaderFactory.Launch(ctx, eventsReaderGroupName, config.EventReaderName,
func(r *asyncprocessing.Reader) error {
const idleTimeout = 1 * time.Minute
r.Configure(
stream.WithConcurrency(config.Concurrency),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(config.MaxRetries),
))
// register events with common wrapper
_ = r.RegisterExecuteAsyncTask(wrapHandler(
s.handleEventExecuteAsyncTask,
))
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to launch registry event reader for postprocessing: %w", err)
}
return s, nil
}
type Config struct {
EventReaderName string
Concurrency int
MaxRetries int
AllowLoopback bool
}
func (c *Config) Prepare() error {
if c == nil {
return errors.New("config is required")
}
if c.EventReaderName == "" {
return errors.New("Config.EventReaderName is required")
}
if c.Concurrency < 1 {
return errors.New("Config.Concurrency has to be a positive number")
}
if c.MaxRetries < 0 {
return errors.New("Config.MaxRetries can't be negative")
}
return nil
}
func wrapHandler[T any](
handler events.HandlerFunc[T],
) events.HandlerFunc[T] {
return func(ctx context.Context, e *events.Event[T]) error {
return handler(ctx, e)
}
}
func (s *Service) handleEventExecuteAsyncTask(
ctx context.Context,
e *events.Event[*asyncprocessing.ExecuteAsyncTaskPayload],
) error {
unlock, err := s.locker.LockResource(ctx, e.Payload.TaskKey, timeout)
if err != nil {
log.Ctx(ctx).Error().Msgf("failed acquire lock by key, eventID: %s, eventId:%s err: %v", e.Payload.TaskKey, e.ID, err)
return err
}
defer unlock()
task, err := s.taskRepository.Find(ctx, e.Payload.TaskKey)
if err != nil {
return err
}
err = s.ProcessingStatusUpdate(ctx, task, e.ID)
if err != nil {
return fmt.Errorf("failed to update task status: %w", err)
}
var processingErr error
//nolint:nestif
switch task.Kind {
case types.TaskKindBuildRegistryIndex:
err := s.handleBuildRegistryIndex(ctx, task, e.ID)
if err != nil {
processingErr = fmt.Errorf("failed to build registry index: %w", err)
}
case types.TaskKindBuildPackageIndex:
err := s.handleBuildPackageIndex(ctx, task, e.ID)
if err != nil {
processingErr = fmt.Errorf("failed to build package index: %w", err)
}
case types.TaskKindBuildPackageMetadata:
err := s.handleBuildPackageMetadata(ctx, task, e.ID)
if err != nil {
processingErr = fmt.Errorf("failed to build package metadata: %w", err)
}
default:
if handler, exists := s.taskHandlers[task.Kind]; exists {
err = handler(ctx, task, e.ID)
if err != nil {
processingErr = fmt.Errorf("failed to handle task kind [%s]: %w", task.Kind, err)
}
} else {
processingErr = fmt.Errorf("unsupported task kind [%s] for task [%s]", task.Kind, task.Key)
}
}
runAgain, err := s.finalStatusUpdate(ctx, e, task, processingErr)
if err != nil {
log.Ctx(ctx).Error().Msgf("failed to update final status for task [%s]: %v", task.Key, err)
}
if runAgain {
eventID, err := events.ReporterSendEvent(s.innerReporter, ctx, asyncprocessing.ExecuteAsyncTask, e.Payload)
if err != nil {
log.Ctx(ctx).Err(err).Msgf("failed to send execute async task event")
return err
}
log.Ctx(ctx).Debug().Msgf("reported execute async task event with id '%s'", eventID)
}
return nil
}
func (s *Service) handleBuildRegistryIndex(ctx context.Context, task *types.Task, eventID string) error {
var processingErr error
var payload types.BuildRegistryIndexTaskPayload
err := json.Unmarshal(task.Payload, &payload)
if err != nil {
log.Ctx(ctx).Error().Msgf("failed to unmarshal task payload for task [%s]: %v", task.Key, err)
return fmt.Errorf("failed to unmarshal task payload: %w", err)
}
registry, err := s.registryDao.Get(ctx, payload.RegistryID)
if err != nil {
log.Ctx(ctx).Error().Msgf("failed to get registry [%d] for registry build index event: %s, err: %v",
payload.RegistryID, eventID, err)
return fmt.Errorf("failed to get registry: %w", err)
}
//nolint:exhaustive
switch registry.PackageType {
case artifact.PackageTypeRPM:
err := s.rpmRegistryHelper.BuildRegistryFiles(ctx, *registry, payload.PrincipalID)
if err != nil {
processingErr = fmt.Errorf("failed to build RPM registry files for registry [%d]: %w",
payload.RegistryID, err)
}
registryIDs, err2 := s.registryDao.FetchRegistriesIDByUpstreamProxyID(
ctx, strconv.FormatInt(registry.ID, 10), registry.RootParentID,
)
if err2 != nil {
log.Ctx(ctx).Error().Msgf("failed to fetch registries whyle building registry "+
"files by upstream proxy ID for registry [%d]: %v", payload.RegistryID, err2)
}
if len(registryIDs) > 0 {
for _, id := range registryIDs {
s.postProcessingReporter.BuildRegistryIndexWithPrincipal(
ctx, id, make([]types.SourceRef, 0), payload.PrincipalID,
)
}
}
default:
err := s.packageWrapper.BuildRegistryIndexAsync(ctx, payload)
if err != nil {
processingErr = fmt.Errorf("failed to build registry index for registry [%d]: %w", payload.RegistryID, err)
}
}
return processingErr
}
func (s *Service) handleBuildPackageIndex(
ctx context.Context,
task *types.Task,
eventID string,
) error {
var processingErr error
var payload types.BuildPackageIndexTaskPayload
err := json.Unmarshal(task.Payload, &payload)
if err != nil {
log.Ctx(ctx).Error().Msgf("failed to unmarshal task payload for task [%s]: %v", task.Key, err)
return fmt.Errorf("failed to unmarshal task payload: %w", err)
}
registry, err := s.registryDao.Get(ctx, payload.RegistryID)
if err != nil {
log.Ctx(ctx).Error().Msgf("failed to get registry [%d] for registry build index event: %s, err: %v",
payload.RegistryID, eventID, err)
return fmt.Errorf("failed to get registry: %w", err)
}
//nolint:exhaustive
switch registry.PackageType {
case artifact.PackageTypeGO:
err := s.gopackageRegistryHelper.UpdatePackageIndex(
ctx, payload.PrincipalID, registry.RootParentID, registry.ID, payload.Image,
)
if err != nil {
processingErr = fmt.Errorf("failed to build GO package index for registry [%d] package [%s]: %w",
payload.RegistryID, payload.Image, err)
}
default:
err := s.packageWrapper.BuildPackageIndexAsync(ctx, payload)
if err != nil {
processingErr = fmt.Errorf("failed to build package index for registry [%d]: %w", payload.RegistryID, err)
}
}
return processingErr
}
func (s *Service) handleBuildPackageMetadata(
ctx context.Context,
task *types.Task,
eventID string,
) error {
var processingErr error
var payload types.BuildPackageMetadataTaskPayload
err := json.Unmarshal(task.Payload, &payload)
if err != nil {
log.Ctx(ctx).Error().Msgf("failed to unmarshal task payload for task [%s]: %v", task.Key, err)
return fmt.Errorf("failed to unmarshal task payload: %w", err)
}
// Set auth session
ctx2 := request.WithAuthSession(ctx, &auth.Session{
Principal: coretypes.Principal{
ID: payload.PrincipalID,
},
})
// Get registry
registry, err := s.registryDao.Get(ctx2, payload.RegistryID)
if err != nil {
log.Ctx(ctx2).Error().Msgf("failed to get registry [%d] for registry build index event: %s, err: %v",
payload.RegistryID, eventID, err)
return fmt.Errorf("failed to get registry: %w", err)
}
//nolint:exhaustive
switch registry.PackageType {
case artifact.PackageTypeGO:
err := s.gopackageRegistryHelper.UpdatePackageMetadata(
ctx2, registry.RootParentID, registry.ID, payload.Image, payload.Version,
)
if err != nil {
processingErr = fmt.Errorf("failed to build GO package metadata for registry [%d] package [%s]: %w",
payload.RegistryID, payload.Image, err)
}
default:
err := s.packageWrapper.BuildPackageMetadataAsync(ctx, payload)
if err != nil {
processingErr = fmt.Errorf("failed to build package metadata for registry [%d]: %w", payload.RegistryID, err)
}
}
return processingErr
}
//nolint:nestif
func (s *Service) finalStatusUpdate(
ctx context.Context,
e *events.Event[*asyncprocessing.ExecuteAsyncTaskPayload],
task *types.Task,
processingErr error,
) (bool, error) {
var runAgain bool
err := s.tx.WithTx(
ctx, func(ctx context.Context) error {
_, err := s.taskRepository.LockForUpdate(ctx, task)
if err != nil {
log.Ctx(ctx).Error().Msgf("failed to lock task [%s] for update: %v", task.Key, err)
return fmt.Errorf("failed to lock task for update: %w", err)
}
if processingErr != nil {
log.Error().Ctx(ctx).Msgf("processing error for task [%s]: %v", task.Key, processingErr)
err = s.taskSourceRepository.UpdateSourceStatus(ctx, e.ID, types.TaskStatusFailure, processingErr.Error())
if err != nil {
return err
}
runAgain, err = s.taskRepository.CompleteTask(ctx, task.Key, types.TaskStatusFailure)
if err != nil {
return err
}
} else {
err = s.taskSourceRepository.UpdateSourceStatus(ctx, e.ID, types.TaskStatusSuccess, "")
if err != nil {
return err
}
runAgain, err = s.taskRepository.CompleteTask(ctx, task.Key, types.TaskStatusSuccess)
if err != nil {
return err
}
}
return err
})
if err != nil {
return false, fmt.Errorf("failed to update final statuses of task and sources, eventID:%s, task key: %s, err: %w",
e.ID, task.Key, err)
}
return runAgain, nil
}
// RegisterTaskHandler registers a custom handler for a specific task kind.
func (s *Service) RegisterTaskHandler(kind types.TaskKind, handler TaskHandler) {
s.taskHandlers[kind] = handler
}
func (s *Service) ProcessingStatusUpdate(ctx context.Context, task *types.Task, runID string) error {
err := s.tx.WithTx(
ctx, func(ctx context.Context) error {
_, err := s.taskRepository.LockForUpdate(ctx, task)
if err != nil {
log.Ctx(ctx).Error().Msgf("failed to lock task [%s] for update: %v", task.Key, err)
return fmt.Errorf("failed to lock task for update: %w", err)
}
err = s.taskRepository.UpdateStatus(ctx, task.Key, types.TaskStatusProcessing)
if err != nil {
return err
}
err = s.taskRepository.SetRunAgain(ctx, task.Key, false)
if err != nil {
log.Ctx(ctx).Error().Msgf("failed to set task [%s] to run again: %v", task.Key, err)
return fmt.Errorf("failed to set task to run again: %w", err)
}
err = s.taskSourceRepository.ClaimSources(ctx, task.Key, runID)
if err != nil {
return err
}
err = s.taskEventRepository.LogTaskEvent(ctx, task.Key, "started", task.Payload)
if err != nil {
log.Ctx(ctx).Error().Msgf("failed to log task event for task [%s]: %v", task.Key, err)
}
return nil
},
)
return err
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/services/asyncprocessing/rpm_helper.go | registry/services/asyncprocessing/rpm_helper.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package asyncprocessing
//nolint:gosec
import (
"bytes"
"compress/gzip"
"context"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"net/url"
"strings"
"time"
"github.com/harness/gitness/app/services/refcache"
"github.com/harness/gitness/registry/app/api/openapi/contracts/artifact"
rpmmetadata "github.com/harness/gitness/registry/app/metadata/rpm"
"github.com/harness/gitness/registry/app/pkg/base"
"github.com/harness/gitness/registry/app/pkg/filemanager"
"github.com/harness/gitness/registry/app/pkg/rpm"
"github.com/harness/gitness/registry/app/store"
rpmtypes "github.com/harness/gitness/registry/app/utils/rpm/types"
"github.com/harness/gitness/registry/types"
"github.com/harness/gitness/secret"
"github.com/klauspost/compress/zstd"
"github.com/rs/zerolog/log"
"github.com/ulikunitz/xz"
)
const (
RepoMdFile = "repomd.xml"
RepoDataPrefix = "repodata/"
PrimaryFile = "primary.xml.gz"
OtherFile = "other.xml.gz"
FileListsFile = "filelists.xml.gz"
artifactBatchLimit = 50
packageStartElements = "package"
)
type RpmHelper interface {
BuildRegistryFiles(
ctx context.Context,
registry types.Registry,
principalID int64,
) error
}
type rpmHelper struct {
fileManager filemanager.FileManager
artifactDao store.ArtifactRepository
upstreamProxyStore store.UpstreamProxyConfigRepository
spaceFinder refcache.SpaceFinder
secretService secret.Service
registryDao store.RegistryRepository
}
type registryData interface {
getReader(ctx context.Context) (io.ReadCloser, error)
getFileRef() string
}
func (l localRepoData) getReader(ctx context.Context) (io.ReadCloser, error) {
primaryReader, _, _, err := l.fileManager.DownloadFile(
ctx, "/"+l.fileRef, l.registryID, l.registryIdentifier, l.rootIdentifier, false,
)
if err != nil {
return nil, fmt.Errorf("failed to get primary.xml.gz: %w", err)
}
return primaryReader, nil
}
func (l localRepoData) getFileRef() string {
return l.fileRef
}
type localRepoData struct {
fileManager filemanager.FileManager
fileRef string
registryID int64
registryIdentifier string
rootIdentifier string
}
type remoteRepoData struct {
helper rpm.RemoteRegistryHelper
fileRef string
}
func (r remoteRepoData) getReader(ctx context.Context) (io.ReadCloser, error) {
readCloser, err := r.helper.GetMetadataFile(ctx, "/"+r.fileRef)
if err != nil {
return nil, fmt.Errorf("failed to get primary.xml.gz: %w", err)
}
return readCloser, nil
}
func (r remoteRepoData) getFileRef() string {
return r.fileRef
}
func NewRpmHelper(
fileManager filemanager.FileManager,
artifactDao store.ArtifactRepository,
upstreamProxyStore store.UpstreamProxyConfigRepository,
spaceFinder refcache.SpaceFinder,
secretService secret.Service,
registryDao store.RegistryRepository,
) RpmHelper {
return &rpmHelper{
fileManager: fileManager,
artifactDao: artifactDao,
upstreamProxyStore: upstreamProxyStore,
spaceFinder: spaceFinder,
secretService: secretService,
registryDao: registryDao,
}
}
func (l *rpmHelper) BuildRegistryFiles(
ctx context.Context,
registry types.Registry,
principalID int64,
) error {
rootSpace, err := l.spaceFinder.FindByID(ctx, registry.RootParentID)
if err != nil {
return fmt.Errorf("failed to find root space by ID: %w", err)
}
existingPackageInfos, err := l.getExistingArtifactInfos(ctx, registry.ID)
if err != nil {
return err
}
if registry.Type == artifact.RegistryTypeUPSTREAM {
return l.buildForUpstream(ctx, registry.ID, registry.RootParentID, existingPackageInfos,
rootSpace.Identifier, principalID)
}
return l.buildForVirtual(ctx, registry.ID, registry.Name, registry.RootParentID, registry.ParentID,
rootSpace.Identifier, existingPackageInfos, principalID)
}
func (l *rpmHelper) buildForVirtual(
ctx context.Context,
registryID int64,
registryIdentifier string,
rootParentID int64,
parentID int64,
rootIdentifier string,
existingPackageInfos []*rpmtypes.PackageInfo,
principalID int64,
) error {
registries, err := base.GetOrderedRepos(ctx, l.registryDao, registryIdentifier, parentID, true)
if err != nil {
return err
}
var primary, fileLists, other *rpmtypes.RepoData
primaryRegistryData, err := l.getRegistryData(ctx, registries, rootIdentifier, "primary")
if err != nil {
return err
}
primary, err = l.buildPrimary(ctx, existingPackageInfos, registryID, rootParentID, rootIdentifier,
registryIdentifier, false, primaryRegistryData, principalID)
if err != nil {
return err
}
fileListsRegistryData, err := l.getRegistryData(ctx, registries, rootIdentifier, "filelists")
if err != nil {
return err
}
fileLists, err = l.buildFileLists(ctx, existingPackageInfos, registryID, rootParentID,
rootIdentifier, fileListsRegistryData, principalID)
if err != nil {
return err
}
otherRegistryData, err := l.getRegistryData(ctx, registries, rootIdentifier, "other")
if err != nil {
return err
}
other, err = l.buildOther(ctx, existingPackageInfos, registryID, rootParentID,
rootIdentifier, otherRegistryData, principalID)
if err != nil {
return err
}
err = l.buildRepoMDFile(ctx, registryID, rootParentID,
primary, fileLists, other, rootIdentifier, principalID)
return err
}
func (l *rpmHelper) buildForUpstream(
ctx context.Context,
registryID int64,
rootParentID int64,
existingPackageInfos []*rpmtypes.PackageInfo,
rootIdentifier string,
principalID int64,
) error {
var primary, fileLists, other *rpmtypes.RepoData
upstream, err := l.upstreamProxyStore.Get(ctx, registryID)
if err != nil {
return err
}
helper, err := rpm.NewRemoteRegistryHelper(ctx, l.spaceFinder, *upstream, l.secretService)
if err != nil {
return err
}
primaryRef, fileListsRef, otherRef, err := l.getRefs(ctx, helper)
if err != nil {
return err
}
prd := &remoteRepoData{
helper: helper,
fileRef: primaryRef,
}
primary, err = l.buildPrimary(ctx, existingPackageInfos, registryID, rootParentID, rootIdentifier,
upstream.RepoKey, true, []registryData{prd}, principalID)
if err != nil {
return err
}
if fileListsRef != "" {
frd := &remoteRepoData{
helper: helper,
fileRef: fileListsRef,
}
fileLists, err = l.buildFileLists(ctx, existingPackageInfos, registryID, rootParentID,
rootIdentifier, []registryData{frd}, principalID)
if err != nil {
return err
}
}
if otherRef != "" {
ord := &remoteRepoData{
helper: helper,
fileRef: otherRef,
}
other, err = l.buildOther(ctx, existingPackageInfos, registryID, rootParentID,
rootIdentifier, []registryData{ord}, principalID)
if err != nil {
return err
}
}
// Build the repodata file
err = l.buildRepoMDFile(ctx, registryID, rootParentID, primary, fileLists,
other, rootIdentifier, principalID)
if err != nil {
return err
}
return nil
}
func (l *rpmHelper) getRegistryData(
ctx context.Context,
registries []types.Registry,
rootIdentifier string,
refType string,
) ([]registryData, error) {
var rd = make([]registryData, 0)
for i := 1; i < len(registries); i++ {
r := registries[i]
fileRef, err := l.getRefsForHarnessRepos(ctx, r.ID, r.Name, rootIdentifier, refType, r.Type)
if err != nil {
return nil, err
}
if fileRef != "" {
rd = append(rd, &localRepoData{
l.fileManager,
fileRef,
r.ID,
r.Name,
rootIdentifier,
})
}
}
return rd, nil
}
func (l *rpmHelper) getExistingArtifactInfos(
ctx context.Context,
registryID int64,
) ([]*rpmtypes.PackageInfo, error) {
lastArtifactID := int64(0)
var packageInfos []*rpmtypes.PackageInfo
for {
artifacts, err := l.artifactDao.GetAllArtifactsByRepo(ctx, registryID, artifactBatchLimit, lastArtifactID)
if err != nil {
return nil, err
}
for _, a := range *artifacts {
metadata := rpmmetadata.RpmMetadata{}
err := json.Unmarshal(a.Metadata, &metadata)
if err != nil {
return nil, err
}
packageInfos = append(packageInfos, &rpmtypes.PackageInfo{
Name: a.Name,
Sha256: metadata.GetFiles()[0].Sha256,
Size: metadata.GetFiles()[0].Size,
VersionMetadata: &metadata.VersionMetadata,
FileMetadata: &metadata.FileMetadata,
})
if a.ID > lastArtifactID {
lastArtifactID = a.ID
}
}
if len(*artifacts) < artifactBatchLimit {
break
}
}
return packageInfos, nil
}
func (l *rpmHelper) buildRepoMDFile(
ctx context.Context,
registryID int64,
rootParentID int64,
primary *rpmtypes.RepoData,
fileLists *rpmtypes.RepoData,
other *rpmtypes.RepoData,
rootIdentifier string,
principalID int64,
) error {
err := l.buildRepomd(ctx, []*rpmtypes.RepoData{
primary,
fileLists,
other,
}, registryID, rootParentID, rootIdentifier, principalID)
return err
}
func (l *rpmHelper) getRefsForHarnessRepos(
ctx context.Context,
registryID int64,
registryIdentifier string,
rootIdentifier string,
refType string,
regType artifact.RegistryType,
) (string, error) {
fileReader, _, _, err := l.fileManager.DownloadFile(
ctx, "/repodata/repomd.xml", registryID, registryIdentifier, rootIdentifier, false,
)
if err != nil {
if regType == artifact.RegistryTypeVIRTUAL && strings.Contains(err.Error(), "file not found") {
log.Ctx(ctx).Warn().Err(err).Msgf("unable to find repomd files for registry: [%s]", registryIdentifier)
return "", nil
}
return "", err
}
defer fileReader.Close()
var repoMD rpmtypes.Repomd
if err := xml.NewDecoder(fileReader).Decode(&repoMD); err != nil {
return "", fmt.Errorf("failed to parse repomd.xml: %w", err)
}
for _, data := range repoMD.Data {
if refType == data.Type {
return data.Location.Href, nil
}
}
return "", nil
}
func (l *rpmHelper) getRefs(ctx context.Context, helper rpm.RemoteRegistryHelper) (string, string, string, error) {
closer, err := helper.GetMetadataFile(ctx, "/repodata/repomd.xml")
if err != nil {
return "", "", "", err
}
defer closer.Close()
var repoMD rpmtypes.Repomd
var primaryRef string
var filelistsRef string
var otherRef string
if err := xml.NewDecoder(closer).Decode(&repoMD); err != nil {
return "", "", "", fmt.Errorf("failed to parse repomd.xml: %w", err)
}
for _, data := range repoMD.Data {
switch data.Type {
case "primary":
primaryRef = data.Location.Href
case "filelists":
filelistsRef = data.Location.Href
case "other":
otherRef = data.Location.Href
}
}
return primaryRef, filelistsRef, otherRef, nil
}
func (l *rpmHelper) validateRootElement(fileListsDecoder *xml.Decoder, rootElement string) error {
var startElem xml.StartElement
for {
token, err := fileListsDecoder.Token()
if err != nil {
return fmt.Errorf("failed to read start element: %w", err)
}
if se, ok := token.(xml.StartElement); ok {
startElem = se
break
}
}
if startElem.Name.Local != rootElement {
return fmt.Errorf("unexpected root element: %v", startElem.Name.Local)
}
return nil
}
func (l *rpmHelper) buildPrimary(
ctx context.Context,
existingPis []*rpmtypes.PackageInfo,
registryID int64,
rootParentID int64,
rootIdentifier string,
repoKey string,
overridePath bool,
repoDataList []registryData,
principalID int64,
) (*rpmtypes.RepoData, error) {
pr, pw := io.Pipe()
go func() {
defer pw.Close()
set := make(map[string]struct{})
gzw := gzip.NewWriter(pw)
defer gzw.Close()
encoder := xml.NewEncoder(gzw)
if _, err := gzw.Write([]byte(xml.Header)); err != nil {
pw.CloseWithError(fmt.Errorf("failed to write XML header: %w", err))
return
}
if err := encoder.EncodeToken(xml.StartElement{Name: xml.Name{Local: "metadata"},
Attr: []xml.Attr{
{Name: xml.Name{Local: "xmlns"}, Value: "http://linux.duke.edu/metadata/common"},
{Name: xml.Name{Local: "xmlns:rpm"}, Value: "http://linux.duke.edu/metadata/rpm"},
{Name: xml.Name{Local: "packages"}, Value: fmt.Sprintf("%d", 0)}, // TODO fix size
}}); err != nil {
pw.CloseWithError(fmt.Errorf("failed to encode metadata start: %w", err))
return
}
for _, pi := range existingPis {
rootPackagePath := fmt.Sprintf("../../%s/rpm/package", repoKey)
_, pp := getPrimaryPackage(pi, rootPackagePath)
if err := encoder.Encode(pp); err != nil {
pw.CloseWithError(fmt.Errorf("failed to encode package: %w", err))
return
}
set[fmt.Sprintf("%s:%s-%s:%s", pp.Name, pp.Version.Version, pp.Version.Release,
pp.Architecture)] = struct{}{}
}
for _, rd := range repoDataList {
readCloser, err := rd.getReader(ctx)
if err != nil {
pw.CloseWithError(fmt.Errorf("failed to get primary.xml.gz: %w", err))
return
}
defer readCloser.Close()
reader, err := getReader(rd.getFileRef(), readCloser)
if err != nil {
pw.CloseWithError(fmt.Errorf("failed to create reader: %w", err))
return
}
if closer, ok := reader.(io.Closer); ok {
defer closer.Close()
}
decoder := xml.NewDecoder(reader)
err = l.validateRootElement(decoder, "metadata")
if err != nil {
pw.CloseWithError(fmt.Errorf("failed to validate primary.xml root element: %w", err))
return
}
for {
token, err := decoder.Token()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
pw.CloseWithError(fmt.Errorf("error reading primary.xml: %w", err))
return
}
packageStartElement, ok := token.(xml.StartElement)
if !ok || packageStartElement.Name.Local != packageStartElements {
continue
}
var pkg rpmtypes.PrimaryPackage
if err := decoder.DecodeElement(&pkg, &packageStartElement); err != nil {
pw.CloseWithError(fmt.Errorf("failed to decode package: %w", err))
return
}
if overridePath {
packageVersion := fmt.Sprintf("%s-%s", pkg.Version.Version, pkg.Version.Release)
pkg.Location.Href = fmt.Sprintf("../../%s/rpm/package/%s/%s/%s/%s/%s", repoKey,
url.PathEscape(pkg.Name),
url.PathEscape(packageVersion),
url.PathEscape(pkg.Architecture),
url.PathEscape(fmt.Sprintf("%s-%s.%s.rpm", pkg.Name, packageVersion, pkg.Architecture)),
pkg.Location.Href)
}
// If the source is a local registry (in a virtual aggregate), ensure hrefs point via that registry.
// For local upstreams in a virtual registry, if href doesn't already start with ../../<registryIdentifier>/rpm/
// then prefix it so downloads route through the child registry endpoint.
if lrd, ok := rd.(*localRepoData); ok {
prefix := "../../" + lrd.registryIdentifier + "/rpm/"
if !strings.HasPrefix(pkg.Location.Href, prefix) {
pkg.Location.Href = prefix + pkg.Location.Href
}
}
pkgref := fmt.Sprintf("%s:%s-%s:%s", pkg.Name, pkg.Version.Version,
pkg.Version.Release, pkg.Architecture)
if _, exists := set[pkgref]; !exists {
if err := encoder.Encode(pkg); err != nil {
pw.CloseWithError(fmt.Errorf("failed to encode package: %w", err))
return
}
set[fmt.Sprintf("%s:%s-%s:%s", pkg.Name, pkg.Version.Version, pkg.Version.Release,
pkg.Architecture)] = struct{}{}
}
}
}
if err := encoder.EncodeToken(xml.EndElement{Name: xml.Name{Local: "metadata"}}); err != nil {
pw.CloseWithError(fmt.Errorf("failed to encode metadata end: %w", err))
return
}
if err := encoder.Flush(); err != nil {
pw.CloseWithError(fmt.Errorf("failed to flush encoder: %w", err))
return
}
}()
info, tempFileName, err := l.fileManager.UploadTempFile(ctx, rootIdentifier, nil, PrimaryFile, pr)
if err != nil {
return nil, fmt.Errorf("failed to upload file: %w", err)
}
filePath := RepoDataPrefix + info.Sha256 + "-" + PrimaryFile
err = l.fileManager.MoveTempFile(ctx, filePath, registryID, rootParentID, rootIdentifier,
info, tempFileName, principalID)
if err != nil {
return nil, fmt.Errorf("failed to move temp file [%s] to [%s]: %w", tempFileName, filePath, err)
}
return getRepoData(info, filePath, "primary"), nil
}
func getReader(fileRef string, readCloser io.ReadCloser) (io.Reader, error) {
switch {
case strings.HasSuffix(strings.ToLower(fileRef), ".xz"):
xzReader, err := xz.NewReader(readCloser)
if err != nil {
return nil, fmt.Errorf("failed to create xz reader: %w", err)
}
return xzReader, nil
case strings.HasSuffix(strings.ToLower(fileRef), ".zst"):
zstReader, err := zstd.NewReader(readCloser)
if err != nil {
return nil, fmt.Errorf("failed to create zstd reader: %w", err)
}
return zstReader, nil
default:
gzipReader, err := gzip.NewReader(readCloser)
if err != nil {
return nil, fmt.Errorf("failed to create gzip reader: %w", err)
}
return gzipReader, nil
}
}
func (l *rpmHelper) buildOther(
ctx context.Context,
pds []*rpmtypes.PackageInfo,
registryID int64,
rootParentID int64,
rootIdentifier string,
repoDataList []registryData,
principalID int64,
) (*rpmtypes.RepoData, error) {
pr, pw := io.Pipe()
go func() {
defer pw.Close()
set := make(map[string]struct{})
gzw := gzip.NewWriter(pw)
defer gzw.Close()
encoder := xml.NewEncoder(gzw)
if _, err := gzw.Write([]byte(xml.Header)); err != nil {
pw.CloseWithError(fmt.Errorf("failed to write XML header: %w", err))
return
}
if err := encoder.EncodeToken(xml.StartElement{Name: xml.Name{Local: "otherdata"},
Attr: []xml.Attr{
{Name: xml.Name{Local: "xmlns"}, Value: "http://linux.duke.edu/metadata/other"},
{Name: xml.Name{Local: "packages"}, Value: fmt.Sprintf("%d", 0)}, // TODO fix size
}}); err != nil {
pw.CloseWithError(fmt.Errorf("failed to encode metadata start: %w", err))
return
}
for _, pd := range pds {
op := getOtherPackage(pd)
if err := encoder.Encode(op); err != nil {
pw.CloseWithError(fmt.Errorf("failed to encode package: %w", err))
return
}
set[op.Pkgid] = struct{}{}
}
for _, rd := range repoDataList {
readCloser, err := rd.getReader(ctx)
if err != nil {
pw.CloseWithError(fmt.Errorf("failed to get other.xml.gz: %w", err))
return
}
defer readCloser.Close()
reader, err := getReader(rd.getFileRef(), readCloser)
if err != nil {
pw.CloseWithError(fmt.Errorf("failed to create reader: %w", err))
return
}
if closer, ok := reader.(io.Closer); ok {
defer closer.Close()
}
otherDecoder := xml.NewDecoder(reader)
err = l.validateRootElement(otherDecoder, "otherdata")
if err != nil {
pw.CloseWithError(fmt.Errorf("failed to validate other.xml root element: %w", err))
return
}
for {
token, err := otherDecoder.Token()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
pw.CloseWithError(fmt.Errorf("error reading other.xml: %w", err))
}
packageStartElement, ok := token.(xml.StartElement)
if !ok || packageStartElement.Name.Local != packageStartElements {
continue
}
var pkg rpmtypes.OtherPackage
if err := otherDecoder.DecodeElement(&pkg, &packageStartElement); err != nil {
pw.CloseWithError(fmt.Errorf("failed to decode other package: %w", err))
}
if _, exists := set[pkg.Pkgid]; !exists {
if err := encoder.Encode(pkg); err != nil {
pw.CloseWithError(fmt.Errorf("failed to encode package: %w", err))
return
}
set[pkg.Pkgid] = struct{}{}
}
}
}
if err := encoder.EncodeToken(xml.EndElement{Name: xml.Name{Local: "otherdata"}}); err != nil {
pw.CloseWithError(fmt.Errorf("failed to encode metadata end: %w", err))
return
}
if err := encoder.Flush(); err != nil {
pw.CloseWithError(fmt.Errorf("failed to flush encoder: %w", err))
return
}
}()
info, tempFileName, err := l.fileManager.UploadTempFile(ctx, rootIdentifier, nil, OtherFile, pr)
if err != nil {
return nil, fmt.Errorf("failed to upload file: %w", err)
}
filePath := RepoDataPrefix + info.Sha256 + "-" + OtherFile
err = l.fileManager.MoveTempFile(ctx, filePath, registryID,
rootParentID, rootIdentifier, info, tempFileName, principalID)
if err != nil {
return nil, fmt.Errorf("failed to move temp file [%s] to [%s]: %w", tempFileName, filePath, err)
}
return getRepoData(info, filePath, "other"), nil
}
func (l *rpmHelper) buildFileLists(
ctx context.Context,
pis []*rpmtypes.PackageInfo,
registryID int64,
rootParentID int64,
rootIdentifier string,
repoDataList []registryData,
principalID int64,
) (*rpmtypes.RepoData, error) { //nolint:dupl
pr, pw := io.Pipe()
go func() {
defer pw.Close()
set := make(map[string]struct{})
gzw := gzip.NewWriter(pw)
defer gzw.Close()
encoder := xml.NewEncoder(gzw)
if _, err := gzw.Write([]byte(xml.Header)); err != nil {
pw.CloseWithError(fmt.Errorf("failed to write XML header: %w", err))
return
}
if err := encoder.EncodeToken(xml.StartElement{Name: xml.Name{Local: "filelists"},
Attr: []xml.Attr{
{Name: xml.Name{Local: "xmlns"}, Value: "http://linux.duke.edu/metadata/filelists"},
{Name: xml.Name{Local: "packages"}, Value: fmt.Sprintf("%d", 0)}, // TODO fix size
}}); err != nil {
pw.CloseWithError(fmt.Errorf("failed to encode metadata start: %w", err))
return
}
for _, pi := range pis {
fp := getFileListsPackage(pi)
if err := encoder.Encode(fp); err != nil {
pw.CloseWithError(fmt.Errorf("failed to encode package: %w", err))
return
}
set[fp.Pkgid] = struct{}{}
}
for _, rd := range repoDataList {
readCloser, err := rd.getReader(ctx)
if err != nil {
pw.CloseWithError(fmt.Errorf("failed to get filelists.xml.gz: %w", err))
return
}
defer readCloser.Close()
reader, err := getReader(rd.getFileRef(), readCloser)
if err != nil {
pw.CloseWithError(fmt.Errorf("failed to create reader: %w", err))
return
}
if closer, ok := reader.(io.Closer); ok {
defer closer.Close()
}
fileListsDecoder := xml.NewDecoder(reader)
err = l.validateRootElement(fileListsDecoder, "filelists")
if err != nil {
pw.CloseWithError(fmt.Errorf("failed to validate filelists.xml root element: %w", err))
return
}
for {
token, err := fileListsDecoder.Token()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
pw.CloseWithError(fmt.Errorf("error reading filelists.xml: %w", err))
return
}
packageStartElement, ok := token.(xml.StartElement)
if !ok || packageStartElement.Name.Local != packageStartElements {
continue
}
var pkg rpmtypes.FileListPackage
if err := fileListsDecoder.DecodeElement(&pkg, &packageStartElement); err != nil {
pw.CloseWithError(fmt.Errorf("failed to decode filelists package: %w", err))
return
}
if _, exists := set[pkg.Pkgid]; !exists {
if err := encoder.Encode(pkg); err != nil {
pw.CloseWithError(fmt.Errorf("failed to encode package: %w", err))
return
}
set[pkg.Pkgid] = struct{}{}
}
}
}
if err := encoder.EncodeToken(xml.EndElement{Name: xml.Name{Local: "filelists"}}); err != nil {
pw.CloseWithError(fmt.Errorf("failed to encode metadata end: %w", err))
return
}
if err := encoder.Flush(); err != nil {
pw.CloseWithError(fmt.Errorf("failed to flush encoder: %w", err))
return
}
}()
info, tempFileName, err := l.fileManager.UploadTempFile(ctx, rootIdentifier, nil, FileListsFile, pr)
if err != nil {
return nil, fmt.Errorf("failed to upload file: %w", err)
}
filePath := RepoDataPrefix + info.Sha256 + "-" + FileListsFile
err = l.fileManager.MoveTempFile(ctx, filePath, registryID,
rootParentID, rootIdentifier, info, tempFileName, principalID)
if err != nil {
return nil, fmt.Errorf("failed to move temp file [%s] to [%s]: %w", tempFileName, filePath, err)
}
return getRepoData(info, filePath, "filelists"), nil
}
func getRepoData(
info types.FileInfo,
filePath string,
dataType string,
) *rpmtypes.RepoData {
return &rpmtypes.RepoData{
Type: dataType,
Checksum: rpmtypes.RepoChecksum{
Type: "sha256",
Value: info.Sha256,
},
OpenChecksum: rpmtypes.RepoChecksum{
Type: "sha256",
Value: info.Sha256,
},
Location: rpmtypes.RepoLocation{
Href: filePath,
},
Timestamp: time.Now().Unix(),
Size: info.Size,
OpenSize: info.Size,
}
}
func getFileListsPackage(pi *rpmtypes.PackageInfo) *rpmtypes.FileListPackage {
fp := &rpmtypes.FileListPackage{
Pkgid: pi.Sha256,
Name: pi.Name,
Architecture: pi.FileMetadata.Architecture,
Version: rpmtypes.FileListVersion{
Epoch: pi.FileMetadata.Epoch,
Version: pi.FileMetadata.Version,
Release: pi.FileMetadata.Release,
},
Files: pi.FileMetadata.Files,
}
return fp
}
func getOtherPackage(pd *rpmtypes.PackageInfo) *rpmtypes.OtherPackage {
op := &rpmtypes.OtherPackage{
Pkgid: pd.Sha256,
Name: pd.Name,
Architecture: pd.FileMetadata.Architecture,
Version: rpmtypes.OtherVersion{
Epoch: pd.FileMetadata.Epoch,
Version: pd.FileMetadata.Version,
Release: pd.FileMetadata.Release,
},
Changelogs: pd.FileMetadata.Changelogs,
}
return op
}
func (l *rpmHelper) buildRepomd(
ctx context.Context,
data []*rpmtypes.RepoData,
registryID int64,
rootParentID int64,
rootIdentifier string,
principalID int64,
) error {
var buf bytes.Buffer
buf.WriteString(xml.Header)
if err := xml.NewEncoder(&buf).Encode(&rpmtypes.Repomd{
Xmlns: "http://linux.duke.edu/metadata/repo",
XmlnsRpm: "http://linux.duke.edu/metadata/rpm",
Data: data,
}); err != nil {
return err
}
repomdContent, _ := rpmtypes.CreateHashedBufferFromReader(&buf)
defer repomdContent.Close()
_, err := l.fileManager.UploadFile(ctx, RepoDataPrefix+RepoMdFile, registryID,
rootParentID, rootIdentifier, repomdContent, repomdContent, RepoMdFile, principalID)
if err != nil {
return err
}
return nil
}
func getPrimaryPackage(pi *rpmtypes.PackageInfo, rootPackagePath string) (string, *rpmtypes.PrimaryPackage) {
files := make([]*rpmmetadata.File, 0, 3)
for _, f := range pi.FileMetadata.Files {
if f.IsExecutable {
files = append(files, f)
}
}
packageVersion := fmt.Sprintf("%s-%s", pi.FileMetadata.Version, pi.FileMetadata.Release)
pathVersion := packageVersion
if pi.FileMetadata.Epoch != "" && pi.FileMetadata.Epoch != "0" {
pathVersion = pi.FileMetadata.Epoch + ":" + pathVersion
}
key := fmt.Sprintf("%s:%s:%s", pi.Name, packageVersion, pi.FileMetadata.Architecture)
location := fmt.Sprintf("%s/%s/%s/%s/%s",
rootPackagePath,
url.PathEscape(pi.Name),
url.PathEscape(pathVersion),
url.PathEscape(pi.FileMetadata.Architecture),
url.PathEscape(fmt.Sprintf("%s-%s.%s.rpm", pi.Name, packageVersion, pi.FileMetadata.Architecture)))
pp := &rpmtypes.PrimaryPackage{
Type: "rpm",
Name: pi.Name,
Architecture: pi.FileMetadata.Architecture,
Version: rpmtypes.PrimaryVersion{
Epoch: pi.FileMetadata.Epoch,
Version: pi.FileMetadata.Version,
Release: pi.FileMetadata.Release,
},
Checksum: rpmtypes.PrimaryChecksum{
Type: "sha256",
Checksum: pi.Sha256,
Pkgid: "YES",
},
Summary: pi.VersionMetadata.Summary,
Description: pi.VersionMetadata.Description,
Packager: pi.FileMetadata.Packager,
URL: pi.VersionMetadata.ProjectURL,
Time: rpmtypes.PrimaryTimes{
File: pi.FileMetadata.FileTime,
Build: pi.FileMetadata.BuildTime,
},
Size: rpmtypes.PrimarySizes{
Package: pi.Size,
Installed: pi.FileMetadata.InstalledSize,
Archive: pi.FileMetadata.ArchiveSize,
},
Location: rpmtypes.PrimaryLocation{
Href: location,
},
Format: rpmtypes.PrimaryFormat{
License: pi.VersionMetadata.License,
Vendor: pi.FileMetadata.Vendor,
Group: pi.FileMetadata.Group,
Buildhost: pi.FileMetadata.BuildHost,
Sourcerpm: pi.FileMetadata.SourceRpm,
Provides: rpmtypes.PrimaryEntryList{
Entries: pi.FileMetadata.Provides,
},
Requires: rpmtypes.PrimaryEntryList{
Entries: pi.FileMetadata.Requires,
},
Conflicts: rpmtypes.PrimaryEntryList{
Entries: pi.FileMetadata.Conflicts,
},
Obsoletes: rpmtypes.PrimaryEntryList{
Entries: pi.FileMetadata.Obsoletes,
},
Files: files,
},
}
return key, pp
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/services/webhook/wire.go | registry/services/webhook/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"encoding/gob"
"github.com/harness/gitness/app/services/refcache"
gitnesswebhook "github.com/harness/gitness/app/services/webhook"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/encrypt"
"github.com/harness/gitness/events"
"github.com/harness/gitness/registry/app/events/artifact"
registrystore "github.com/harness/gitness/registry/app/store"
"github.com/harness/gitness/secret"
"github.com/harness/gitness/store/database/dbtx"
"github.com/google/wire"
)
// WireSet provides a wire set for this package.
var WireSet = wire.NewSet(
ProvideService,
)
func ProvideService(
ctx context.Context,
config gitnesswebhook.Config,
tx dbtx.Transactor,
artifactsReaderFactory *events.ReaderFactory[*artifact.Reader],
webhookStore registrystore.WebhooksRepository,
webhookExecutionStore registrystore.WebhooksExecutionRepository,
spaceStore store.SpaceStore,
urlProvider url.Provider,
principalStore store.PrincipalStore,
webhookURLProvider gitnesswebhook.URLProvider,
spacePathStore store.SpacePathStore,
secretService secret.Service,
registryRepository registrystore.RegistryRepository,
encrypter encrypt.Encrypter,
spaceFinder refcache.SpaceFinder,
) (*Service, error) {
gob.Register(&artifact.DockerArtifact{})
gob.Register(&artifact.HelmArtifact{})
return NewService(
ctx,
config,
tx,
artifactsReaderFactory,
webhookStore,
webhookExecutionStore,
spaceStore,
urlProvider,
principalStore,
webhookURLProvider,
spacePathStore,
secretService,
registryRepository,
encrypter,
spaceFinder,
)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/services/webhook/helper.go | registry/services/webhook/helper.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"fmt"
"net/url"
urlprovider "github.com/harness/gitness/app/url"
"github.com/harness/gitness/registry/app/api/openapi/contracts/artifact"
registryevents "github.com/harness/gitness/registry/app/events/artifact"
"github.com/harness/gitness/registry/app/pkg"
"github.com/rs/zerolog/log"
)
const ociPrefix = "oci://"
func GetArtifactCreatedPayload(
ctx context.Context,
info pkg.RegistryInfo,
principalID int64,
registryID int64,
regIdentifier string,
tag string,
digest string,
urlProvider urlprovider.Provider,
) registryevents.ArtifactCreatedPayload {
payload := registryevents.ArtifactCreatedPayload{
RegistryID: registryID,
PrincipalID: principalID,
ArtifactType: info.PackageType,
}
artifactURL := urlProvider.RegistryURL(ctx, info.RootIdentifier, regIdentifier) + "/" + info.Image + ":" + tag
urlWithoutProtocol := GetRepoURLWithoutProtocol(ctx, artifactURL)
baseArtifact := registryevents.BaseArtifact{
Name: info.Image,
Ref: fmt.Sprintf("%s:%s", info.Image, tag),
}
//nolint:exhaustive
switch info.PackageType {
case artifact.PackageTypeDOCKER:
payload.Artifact = ®istryevents.DockerArtifact{
BaseArtifact: baseArtifact,
Tag: tag,
URL: urlWithoutProtocol,
Digest: digest,
}
case artifact.PackageTypeHELM:
payload.Artifact = ®istryevents.HelmArtifact{
BaseArtifact: baseArtifact,
Tag: tag,
URL: ociPrefix + urlWithoutProtocol,
Digest: digest,
}
}
return payload
}
func GetArtifactDeletedPayload(
ctx context.Context,
principalID int64,
registryID int64,
regIdentifier string,
version string,
digest string,
rootIdentifier string,
packageType artifact.PackageType,
image string,
urlProvider urlprovider.Provider,
isUntaggedImagesEnabled bool,
) registryevents.ArtifactDeletedPayload {
payload := registryevents.ArtifactDeletedPayload{
RegistryID: registryID,
PrincipalID: principalID,
ArtifactType: packageType,
}
var versionSeparator string
var tag string
if isUntaggedImagesEnabled {
versionSeparator = "@"
} else {
versionSeparator = ":"
tag = version
}
artifactURL := urlProvider.RegistryURL(ctx, rootIdentifier, regIdentifier) +
"/" + image + versionSeparator + version
urlWithoutProtocol := GetRepoURLWithoutProtocol(ctx, artifactURL)
baseArtifact := registryevents.BaseArtifact{
Name: image,
Ref: fmt.Sprintf("%s%s%s", image, versionSeparator, version),
}
//nolint: exhaustive
switch packageType {
case artifact.PackageTypeDOCKER:
payload.Artifact = ®istryevents.DockerArtifact{
BaseArtifact: baseArtifact,
Tag: tag,
Digest: digest,
URL: urlWithoutProtocol,
}
case artifact.PackageTypeHELM:
payload.Artifact = ®istryevents.HelmArtifact{
BaseArtifact: baseArtifact,
Tag: tag,
Digest: digest,
URL: ociPrefix + urlWithoutProtocol,
}
}
return payload
}
func GetArtifactCreatedPayloadForCommonArtifacts(
principalID int64,
registryID int64,
packageType artifact.PackageType,
artifact string,
version string,
) registryevents.ArtifactCreatedPayload {
return registryevents.ArtifactCreatedPayload{
RegistryID: registryID,
PrincipalID: principalID,
ArtifactType: packageType,
Artifact: ®istryevents.CommonArtifact{
BaseArtifact: registryevents.BaseArtifact{
Name: artifact,
Ref: fmt.Sprintf("%s:%s", artifact, version),
},
Version: version,
Type: packageType,
},
}
}
func GetArtifactDeletedPayloadForCommonArtifacts(
principalID int64,
registryID int64,
packageType artifact.PackageType,
artifact string,
version string,
) registryevents.ArtifactDeletedPayload {
return registryevents.ArtifactDeletedPayload{
RegistryID: registryID,
PrincipalID: principalID,
ArtifactType: packageType,
Artifact: ®istryevents.CommonArtifact{
BaseArtifact: registryevents.BaseArtifact{
Name: artifact,
Ref: fmt.Sprintf("%s:%s", artifact, version),
},
Version: version,
Type: packageType,
},
}
}
func GetRepoURLWithoutProtocol(ctx context.Context, registryURL string) string {
repoURL := registryURL
parsedURL, err := url.Parse(repoURL)
if err != nil {
log.Ctx(ctx).Error().Stack().Err(err).Msg("Error parsing URL: ")
return ""
}
return parsedURL.Host + parsedURL.Path
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/services/webhook/service.go | registry/services/webhook/service.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"fmt"
"time"
"github.com/harness/gitness/app/services/refcache"
gitnesswebhook "github.com/harness/gitness/app/services/webhook"
"github.com/harness/gitness/app/store"
"github.com/harness/gitness/app/url"
"github.com/harness/gitness/encrypt"
"github.com/harness/gitness/events"
events2 "github.com/harness/gitness/registry/app/events/artifact"
registrystore "github.com/harness/gitness/registry/app/store"
"github.com/harness/gitness/secret"
"github.com/harness/gitness/store/database/dbtx"
"github.com/harness/gitness/stream"
)
const (
eventsReaderGroupName = "gitness:webhook"
)
// Verify Service implements ServiceInterface.
var _ ServiceInterface = (*Service)(nil)
// Service is responsible for processing webhook events.
type Service struct {
WebhookExecutor *gitnesswebhook.WebhookExecutor
tx dbtx.Transactor
urlProvider url.Provider
spaceStore store.SpaceStore
principalStore store.PrincipalStore
config gitnesswebhook.Config
spacePathStore store.SpacePathStore
registryRepository registrystore.RegistryRepository
spaceFinder refcache.SpaceFinder
}
func NewService(
ctx context.Context,
config gitnesswebhook.Config,
tx dbtx.Transactor,
artifactsReaderFactory *events.ReaderFactory[*events2.Reader],
webhookStore registrystore.WebhooksRepository,
webhookExecutionStore registrystore.WebhooksExecutionRepository,
spaceStore store.SpaceStore,
urlProvider url.Provider,
principalStore store.PrincipalStore,
webhookURLProvider gitnesswebhook.URLProvider,
spacePathStore store.SpacePathStore,
secretService secret.Service,
registryRepository registrystore.RegistryRepository,
encrypter encrypt.Encrypter,
spaceFinder refcache.SpaceFinder,
) (*Service, error) {
if err := config.Prepare(); err != nil {
return nil, fmt.Errorf("provided webhook service config is invalid: %w", err)
}
webhookExecutorStore := &RegistryWebhookExecutorStore{
webhookStore: webhookStore,
webhookExecutionStore: webhookExecutionStore,
}
executor := gitnesswebhook.NewWebhookExecutor(config, webhookURLProvider, encrypter, spacePathStore,
secretService, principalStore, webhookExecutorStore, gitnesswebhook.ArtifactRegistryTrigger)
service := &Service{
WebhookExecutor: executor,
tx: tx,
spaceStore: spaceStore,
urlProvider: urlProvider,
principalStore: principalStore,
config: config,
spacePathStore: spacePathStore,
registryRepository: registryRepository,
spaceFinder: spaceFinder,
}
_, err := artifactsReaderFactory.Launch(ctx, eventsReaderGroupName, config.EventReaderName,
func(r *events2.Reader) error {
const idleTimeout = 1 * time.Minute
r.Configure(
stream.WithConcurrency(config.Concurrency),
stream.WithHandlerOptions(
stream.WithIdleTimeout(idleTimeout),
stream.WithMaxRetries(config.MaxRetries),
))
// register events
_ = r.RegisterArtifactCreated(service.handleEventArtifactCreated)
_ = r.RegisterArtifactDeleted(service.handleEventArtifactDeleted)
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to launch registry event reader for webhooks: %w", err)
}
return service, nil
}
func (s *Service) ReTriggerWebhookExecution(
ctx context.Context,
webhookExecutionID int64,
) (*gitnesswebhook.TriggerResult, error) {
return s.WebhookExecutor.RetriggerWebhookExecution(ctx, webhookExecutionID)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/services/webhook/interface.go | registry/services/webhook/interface.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
gitnesswebhook "github.com/harness/gitness/app/services/webhook"
)
// ServiceInterface interface for webhook operations.
type ServiceInterface interface {
ReTriggerWebhookExecution(ctx context.Context, webhookExecutionID int64) (*gitnesswebhook.TriggerResult, error)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/services/webhook/repository.go | registry/services/webhook/repository.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
registrystore "github.com/harness/gitness/registry/app/store"
"github.com/harness/gitness/types"
)
type RegistryWebhookExecutorStore struct {
webhookStore registrystore.WebhooksRepository
webhookExecutionStore registrystore.WebhooksExecutionRepository
}
func (s *RegistryWebhookExecutorStore) Find(ctx context.Context, id int64) (*types.WebhookExecutionCore, error) {
return s.webhookExecutionStore.Find(ctx, id)
}
func (s *RegistryWebhookExecutorStore) ListWebhooks(
ctx context.Context,
parents []types.WebhookParentInfo,
) ([]*types.WebhookCore, error) {
return s.webhookStore.ListAllByRegistry(ctx, parents)
}
func (s *RegistryWebhookExecutorStore) ListForTrigger(
ctx context.Context,
triggerID string,
) ([]*types.WebhookExecutionCore, error) {
return s.webhookExecutionStore.ListForTrigger(ctx, triggerID)
}
func (s *RegistryWebhookExecutorStore) CreateWebhookExecution(
ctx context.Context,
hook *types.WebhookExecutionCore,
) error {
return s.webhookExecutionStore.Create(ctx, hook)
}
func (s *RegistryWebhookExecutorStore) UpdateOptLock(
ctx context.Context, hook *types.WebhookCore,
execution *types.WebhookExecutionCore,
) (*types.WebhookCore, error) {
fn := func(hook *types.WebhookCore) error {
hook.LatestExecutionResult = &execution.Result
return nil
}
return s.webhookStore.UpdateOptLock(ctx, hook, fn)
}
func (s *RegistryWebhookExecutorStore) FindWebhook(
ctx context.Context,
id int64,
) (*types.WebhookCore, error) {
return s.webhookStore.Find(ctx, id)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/services/webhook/handler_artifact.go | registry/services/webhook/handler_artifact.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"fmt"
gitnesswebhook "github.com/harness/gitness/app/services/webhook"
"github.com/harness/gitness/events"
"github.com/harness/gitness/registry/app/api/openapi/contracts/artifact"
registryevents "github.com/harness/gitness/registry/app/events/artifact"
registrytypes "github.com/harness/gitness/registry/types"
"github.com/harness/gitness/types"
"github.com/harness/gitness/types/enum"
)
// ArtifactEventPayload describes the payload of Artifact related webhook triggers.
type ArtifactEventPayload struct {
Trigger enum.WebhookTrigger `json:"trigger"`
Registry RegistryInfo `json:"registry"`
Principal gitnesswebhook.PrincipalInfo `json:"principal"`
ArtifactInfo *registryevents.ArtifactInfo `json:"artifact_info"`
}
type RegistryInfo struct {
ID int64 `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
URL string `json:"url"`
}
// handleEventArtifactCreated handles branch created events
// and triggers branch created webhooks for the source repo.
func (s *Service) handleEventArtifactCreated(
ctx context.Context,
event *events.Event[*registryevents.ArtifactCreatedPayload],
) error {
return s.triggerForEventWithArtifact(ctx, enum.WebhookTriggerArtifactCreated,
event.ID, event.Payload.PrincipalID, event.Payload.RegistryID,
func(
principal *types.Principal,
registry *registrytypes.Registry,
) (any, error) {
space, err := s.spaceFinder.FindByID(ctx, registry.ParentID)
if err != nil {
return nil, err
}
return &ArtifactEventPayload{
Trigger: enum.WebhookTriggerArtifactCreated,
Registry: RegistryInfo{
ID: registry.ID,
Name: registry.Name,
Description: registry.Description,
URL: s.urlProvider.GenerateUIRegistryURL(ctx, space.Path, registry.Name),
},
Principal: gitnesswebhook.PrincipalInfo{
ID: principal.ID,
UID: principal.UID,
DisplayName: principal.DisplayName,
Email: principal.Email,
Type: principal.Type,
Created: principal.Created,
Updated: principal.Updated,
},
ArtifactInfo: getArtifactInfo(event.Payload.Artifact),
}, nil
})
}
// handleEventArtifactDeleted handles branch deleted events
// and triggers branch deleted webhooks for the source repo.
func (s *Service) handleEventArtifactDeleted(
ctx context.Context,
event *events.Event[*registryevents.ArtifactDeletedPayload],
) error {
return s.triggerForEventWithArtifact(ctx, enum.WebhookTriggerArtifactDeleted,
event.ID, event.Payload.PrincipalID, event.Payload.RegistryID,
func(
principal *types.Principal,
registry *registrytypes.Registry,
) (any, error) {
space, err := s.spaceFinder.FindByID(ctx, registry.ParentID)
if err != nil {
return nil, err
}
return &ArtifactEventPayload{
Trigger: enum.WebhookTriggerArtifactDeleted,
Registry: RegistryInfo{
ID: registry.ID,
Name: registry.Name,
Description: registry.Description,
URL: s.urlProvider.GenerateUIRegistryURL(ctx, space.Path, registry.Name),
},
Principal: gitnesswebhook.PrincipalInfo{
ID: principal.ID,
UID: principal.UID,
DisplayName: principal.DisplayName,
Email: principal.Email,
Type: principal.Type,
Created: principal.Created,
Updated: principal.Updated,
},
ArtifactInfo: getArtifactInfo(event.Payload.Artifact),
}, nil
})
}
func getArtifactInfo(eventArtifact registryevents.Artifact) *registryevents.ArtifactInfo {
artifactInfo := registryevents.ArtifactInfo{}
if dockerArtifact, ok := eventArtifact.(*registryevents.DockerArtifact); ok {
artifactInfo.Type = artifact.PackageTypeDOCKER
artifactInfo.Name = dockerArtifact.Name
artifactInfo.Version = dockerArtifact.Tag
artifactInfo.Artifact = &dockerArtifact
} else if helmArtifact, ok := eventArtifact.(*registryevents.HelmArtifact); ok {
artifactInfo.Type = artifact.PackageTypeHELM
artifactInfo.Name = helmArtifact.Name
artifactInfo.Version = helmArtifact.Tag
artifactInfo.Artifact = &helmArtifact
} else if commonArtifact, ok := eventArtifact.(*registryevents.CommonArtifact); ok {
artifactInfo.Type = commonArtifact.Type
artifactInfo.Name = commonArtifact.Name
artifactInfo.Version = commonArtifact.Version
artifactInfo.Artifact = &commonArtifact
}
return &artifactInfo
}
// triggerForEventWithArtifact triggers all webhooks for the given registry and triggerType
// using the eventID to generate a deterministic triggerID and using the output of bodyFn as payload.
// The method tries to find the registry and principal and provides both to the bodyFn to generate the body.
// NOTE: technically we could avoid this call if we send the data via the event (though then events will get big).
func (s *Service) triggerForEventWithArtifact(
ctx context.Context,
triggerType enum.WebhookTrigger,
eventID string,
principalID int64,
registryID int64,
createBodyFn func(*types.Principal, *registrytypes.Registry) (any, error),
) error {
principal, err := s.WebhookExecutor.FindPrincipalForEvent(ctx, principalID)
if err != nil {
return err
}
registry, err := s.registryRepository.Get(ctx, registryID)
if err != nil {
return err
}
body, err := createBodyFn(principal, registry)
if err != nil {
return fmt.Errorf("body creation function failed: %w", err)
}
parents, err := s.getParentInfoRegistry(ctx, registry.ID, true)
if err != nil {
return fmt.Errorf("failed to get webhook parent info: %w", err)
}
return s.WebhookExecutor.TriggerForEvent(ctx, eventID, parents, triggerType, body)
}
func (s *Service) getParentInfoRegistry(
ctx context.Context,
registryID int64,
inherited bool,
) ([]types.WebhookParentInfo, error) {
var parents []types.WebhookParentInfo
parents = append(parents, types.WebhookParentInfo{
ID: registryID,
Type: enum.WebhookParentRegistry,
})
if inherited {
registry, err := s.registryRepository.Get(ctx, registryID)
if err != nil {
return nil, fmt.Errorf("failed to get registry: %w", err)
}
ids, err := s.spaceStore.GetAncestorIDs(ctx, registry.ParentID)
if err != nil {
return nil, fmt.Errorf("failed to get parent space ids: %w", err)
}
for _, id := range ids {
parents = append(parents, types.WebhookParentInfo{
Type: enum.WebhookParentSpace,
ID: id,
})
}
}
return parents, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/storage/middleware.go | registry/app/storage/middleware.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
var (
registryoptions []Option
)
// GetRegistryOptions returns list of StorageOption.
func GetRegistryOptions() []Option {
return registryoptions
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/storage/blobStore.go | registry/app/storage/blobStore.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"fmt"
"io"
"mime/multipart"
"net/http"
"github.com/harness/gitness/registry/app/dist_temp/dcontext"
"github.com/harness/gitness/registry/app/driver"
"github.com/harness/gitness/registry/types"
"github.com/rs/zerolog/log"
)
const (
HeaderContentDigest = "Content-Digest"
)
type genericBlobStore struct {
driver driver.StorageDriver
rootParentRef string
redirect bool
}
func (bs *genericBlobStore) Get(
ctx context.Context,
filePath string, size int64, filename string,
) (*FileReader, string, error) {
dcontext.GetLogger(ctx, log.Ctx(ctx).Debug()).Msg("(*genericBlobStore).Get")
if bs.redirect {
redirectURL, err := bs.driver.RedirectURL(ctx, http.MethodGet, filePath, filename)
if err != nil {
return nil, "", err
}
if redirectURL != "" {
// Redirect to storage URL.
// http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
return nil, redirectURL, nil
}
// Fallback to serving the content directly.
}
br, err := NewFileReader(ctx, bs.driver, filePath, size)
if err != nil {
return nil, "", err
}
return br, "", nil
}
func (bs *genericBlobStore) GetWithNoRedirect(ctx context.Context, filePath string, size int64) (*FileReader, error) {
dcontext.GetLogger(ctx, log.Ctx(ctx).Debug()).Msg("(*genericBlobStore).Get")
br, err := NewFileReader(ctx, bs.driver, filePath, size)
if err != nil {
return nil, err
}
return br, nil
}
var _ GenericBlobStore = &genericBlobStore{}
// Create begins a blob write session, returning a handle.
func (bs *genericBlobStore) Create(ctx context.Context, filePath string) (driver.FileWriter, error) {
dcontext.GetLogger(ctx, log.Ctx(ctx).Debug()).Msg("(*genericBlobStore).Create")
path, err := pathFor(
uploadFilePathSpec{
path: filePath,
},
)
if err != nil {
return nil, err
}
return bs.newBlobUpload(ctx, path, false)
}
func (bs *genericBlobStore) newBlobUpload(
ctx context.Context,
path string, a bool,
) (driver.FileWriter, error) {
fw, err := bs.driver.Writer(ctx, path, a)
if err != nil {
return nil, err
}
return fw, nil
}
// Write takes a file writer and a multipart form file or file reader,
// streams the file to the writer, and calculates hashes.
func (bs *genericBlobStore) Write(
ctx context.Context, w driver.FileWriter, file multipart.File,
fileReader io.Reader,
) (types.FileInfo, error) {
// Create new hash.Hash instances for SHA256 and SHA512
sha1Hasher := sha1.New()
sha256Hasher := sha256.New()
sha512Hasher := sha512.New()
md5Hasher := md5.New()
// Create a MultiWriter to write to both hashers simultaneously
mw := io.MultiWriter(sha1Hasher, sha256Hasher, sha512Hasher, md5Hasher, w)
// Copy the data from S3 object stream to the MultiWriter
var err error
var totalBytesWritten int64
if fileReader != nil {
totalBytesWritten, err = io.Copy(mw, fileReader)
} else {
totalBytesWritten, err = io.Copy(mw, file)
}
if err != nil {
return types.FileInfo{}, fmt.Errorf("failed to copy file to s3: %w", err)
}
err = w.Commit(ctx)
if err != nil {
return types.FileInfo{}, err
}
return types.FileInfo{
Sha1: fmt.Sprintf("%x", sha1Hasher.Sum(nil)),
Sha256: fmt.Sprintf("%x", sha256Hasher.Sum(nil)),
Sha512: fmt.Sprintf("%x", sha512Hasher.Sum(nil)),
MD5: fmt.Sprintf("%x", md5Hasher.Sum(nil)),
Size: totalBytesWritten,
}, nil
}
func (bs *genericBlobStore) Move(ctx context.Context, srcPath string, dstPath string) error {
dcontext.GetLogger(ctx, log.Ctx(ctx).Debug()).Msg("(*genericBlobStore).Move")
err := bs.driver.Move(ctx, srcPath, dstPath)
if err != nil {
return err
}
return nil
}
func (bs *genericBlobStore) Delete(ctx context.Context, filePath string) error {
dcontext.GetLogger(ctx, log.Ctx(ctx).Debug()).Msg("(*genericBlobStore).Delete")
err := bs.driver.Delete(ctx, filePath)
if err != nil {
return err
}
return nil
}
func (bs *genericBlobStore) Stat(ctx context.Context, filePath string) (int64, error) {
dcontext.GetLogger(ctx, log.Ctx(ctx).Debug()).Msg("(*genericBlobStore).Stat")
fileInfo, err := bs.driver.Stat(ctx, filePath)
if err != nil {
return -1, err
}
return fileInfo.Size(), nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/storage/filereader.go | registry/app/storage/filereader.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"github.com/harness/gitness/registry/app/driver"
)
const fileReaderBufferSize = 4 * 1024 * 1024
// remoteFileReader provides a read seeker interface to files stored in
// storagedriver. Used to implement part of layer interface and will be used
// to implement read side of LayerUpload.
type FileReader struct {
driver driver.StorageDriver
ctx context.Context
// identifying fields
path string
size int64 // size is the total size, must be set.
// mutable fields
rc io.ReadCloser // remote read closer
brd *bufio.Reader // internal buffered io
offset int64 // offset is the current read offset
err error // terminal error, if set, reader is closed
}
// NewFileReader initializes a file reader for the remote file. The reader
// takes on the size and path that must be determined externally with a stat
// call. The reader operates optimistically, assuming that the file is already
// there.
func NewFileReader(ctx context.Context, driver driver.StorageDriver, path string, size int64) (*FileReader, error) {
return &FileReader{
ctx: ctx,
driver: driver,
path: path,
size: size,
}, nil
}
func (fr *FileReader) Read(p []byte) (n int, err error) {
if fr.err != nil {
return 0, fr.err
}
rd, err := fr.reader()
if err != nil {
return 0, err
}
n, err = rd.Read(p)
fr.offset += int64(n)
// Simulate io.EOR error if we reach filesize.
if err == nil && fr.offset >= fr.size {
err = io.EOF
}
return n, err
}
func (fr *FileReader) Seek(offset int64, whence int) (int64, error) {
if fr.err != nil {
return 0, fr.err
}
var err error
newOffset := fr.offset
switch whence {
case io.SeekCurrent:
newOffset += offset
case io.SeekEnd:
newOffset = fr.size + offset
case io.SeekStart:
newOffset = offset
}
if newOffset < 0 {
err = fmt.Errorf("cannot seek to negative position")
} else {
if fr.offset != newOffset {
fr.reset()
}
// No problems, set the offset.
fr.offset = newOffset
}
return fr.offset, err
}
func (fr *FileReader) Close() error {
return fr.closeWithErr(fmt.Errorf("FileReader: closed"))
}
// reader prepares the current reader at the lrs offset, ensuring its buffered
// and ready to go.
func (fr *FileReader) reader() (io.Reader, error) {
if fr.err != nil {
return nil, fr.err
}
if fr.rc != nil {
return fr.brd, nil
}
// If we don't have a reader, open one up.
rc, err := fr.driver.Reader(fr.ctx, fr.path, fr.offset)
if err != nil {
if errors.As(err, &driver.PathNotFoundError{}) {
return io.NopCloser(bytes.NewReader([]byte{})), nil
}
return nil, err
}
fr.rc = rc
if fr.brd == nil {
fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize)
} else {
fr.brd.Reset(fr.rc)
}
return fr.brd, nil
}
// resetReader resets the reader, forcing the read method to open up a new
// connection and rebuild the buffered reader. This should be called when the
// offset and the reader will become out of sync, such as during a seek
// operation.
func (fr *FileReader) reset() {
if fr.err != nil {
return
}
if fr.rc != nil {
fr.rc.Close()
fr.rc = nil
}
}
func (fr *FileReader) closeWithErr(err error) error {
if fr.err != nil {
return fr.err
}
fr.err = err
// close and release reader chain
if fr.rc != nil {
fr.rc.Close()
}
fr.rc = nil
fr.brd = nil
return fr.err
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/storage/errors.go | registry/app/storage/errors.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"errors"
"fmt"
"strings"
"github.com/opencontainers/go-digest"
)
// ErrAccessDenied is returned when an access to a requested resource is
// denied.
var ErrAccessDenied = errors.New("access denied")
// ErrUnsupported is returned when an unimplemented or unsupported action is
// performed.
var ErrUnsupported = errors.New("operation unsupported")
// TagUnknownError is returned if the given tag is not known by the tag service.
type TagUnknownError struct {
Tag string
}
func (err TagUnknownError) Error() string {
return fmt.Sprintf("unknown tag=%s", err.Tag)
}
// RegistryUnknownError is returned if the named repository is not known by
// the StorageService.
type RegistryUnknownError struct {
Name string
}
func (err RegistryUnknownError) Error() string {
return fmt.Sprintf("unknown registry name=%s", err.Name)
}
// RegistryNameInvalidError should be used to denote an invalid repository
// name. Reason may set, indicating the cause of invalidity.
type RegistryNameInvalidError struct {
Name string
Reason error
}
func (err RegistryNameInvalidError) Error() string {
return fmt.Sprintf("registry name %q invalid: %v", err.Name, err.Reason)
}
// ManifestUnknownError is returned if the manifest is not known by the
// StorageService.
type ManifestUnknownError struct {
Name string
Tag string
}
func (err ManifestUnknownError) Error() string {
return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
}
// ManifestUnknownRevisionError is returned when a manifest cannot be found by
// revision within a repository.
type ManifestUnknownRevisionError struct {
Name string
Revision digest.Digest
}
func (err ManifestUnknownRevisionError) Error() string {
return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
}
// ManifestUnverifiedError is returned when the StorageService is unable to verify
// the manifest.
type ManifestUnverifiedError struct{}
func (ManifestUnverifiedError) Error() string {
return "unverified manifest"
}
// ManifestReferencesExceedLimitError is returned when a manifest has too many references.
type ManifestReferencesExceedLimitError struct {
References int
Limit int
}
func (err ManifestReferencesExceedLimitError) Error() string {
return fmt.Sprintf("%d manifest references exceed reference limit of %d", err.References, err.Limit)
}
// ManifestPayloadSizeExceedsLimitError is returned when a manifest is bigger than the configured payload
// size limit.
type ManifestPayloadSizeExceedsLimitError struct {
PayloadSize int
Limit int
}
// Error implements the error interface for ManifestPayloadSizeExceedsLimitError.
func (err ManifestPayloadSizeExceedsLimitError) Error() string {
return fmt.Sprintf("manifest payload size of %d exceeds limit of %d", err.PayloadSize, err.Limit)
}
// ManifestVerificationErrors provides a type to collect errors encountered
// during manifest verification. Currently, it accepts errors of all types,
// but it may be narrowed to those involving manifest verification.
type ManifestVerificationErrors []error
func (errs ManifestVerificationErrors) Error() string {
parts := make([]string, 0, len(errs))
for _, err := range errs {
parts = append(parts, err.Error())
}
return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
}
// ManifestBlobUnknownError returned when a referenced blob cannot be found.
type ManifestBlobUnknownError struct {
Digest digest.Digest
}
func (err ManifestBlobUnknownError) Error() string {
return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
}
// ManifestNameInvalidError should be used to denote an invalid manifest
// name. Reason may set, indicating the cause of invalidity.
type ManifestNameInvalidError struct {
Name string
Reason error
}
func (err ManifestNameInvalidError) Error() string {
return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/storage/blobs.go | registry/app/storage/blobs.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"io"
"mime/multipart"
"github.com/harness/gitness/registry/app/driver"
"github.com/harness/gitness/registry/app/manifest"
"github.com/harness/gitness/registry/types"
"github.com/distribution/reference"
"github.com/opencontainers/go-digest"
)
var (
// ErrBlobExists returned when blob already exists.
ErrBlobExists = errors.New("blob exists")
// ErrBlobDigestUnsupported when blob digest is an unsupported version.
ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
// ErrBlobUnknown when blob is not found.
ErrBlobUnknown = errors.New("unknown blob")
// ErrBlobUploadUnknown returned when upload is not found.
ErrBlobUploadUnknown = errors.New("blob upload unknown")
// ErrBlobInvalidLength returned when the blob has an expected length on
// commit, meaning mismatched with the descriptor or an invalid value.
ErrBlobInvalidLength = errors.New("blob invalid length")
)
// BlobInvalidDigestError returned when digest check fails.
type BlobInvalidDigestError struct {
Digest digest.Digest
Reason error
}
func (err BlobInvalidDigestError) Error() string {
return fmt.Sprintf(
"invalid digest for referenced layer: %v, %v",
err.Digest, err.Reason,
)
}
// BlobMountedError returned when a blob is mounted from another repository
// instead of initiating an upload session.
type BlobMountedError struct {
From reference.Canonical
Descriptor manifest.Descriptor
}
func (err BlobMountedError) Error() string {
return fmt.Sprintf(
"blob mounted from: %v to: %v",
err.From, err.Descriptor,
)
}
// BlobWriter provides a handle for inserting data into a blob store.
// Instances should be obtained from BlobWriteService.Writer and
// BlobWriteService.Resume. If supported by the store, a writer can be
// recovered with the id.
type BlobWriter interface {
io.WriteCloser
// Size returns the number of bytes written to this blob.
Size() int64
// ID returns the identifier for this writer. The ID can be used with the
// Blob service to later resume the write.
ID() string
// Commit completes the blob writer process. The content is verified
// against the provided provisional descriptor, which may result in an
// error. Depending on the implementation, written data may be validated
// against the provisional descriptor fields. If MediaType is not present,
// the implementation may reject the commit or assign "application/octet-
// stream" to the blob. The returned descriptor may have a different
// digest depending on the blob store, referred to as the canonical
// descriptor.
Commit(ctx context.Context, pathPrefix string, provisional manifest.Descriptor) (
canonical manifest.Descriptor, err error,
)
// Cancel ends the blob write without storing any data and frees any
// associated resources. Any data written thus far will be lost. Cancel
// implementations should allow multiple calls even after a commit that
// result in a no-op. This allows use of Cancel in a defer statement,
// increasing the assurance that it is correctly called.
Cancel(ctx context.Context) error
}
// OciBlobStore represent the entire suite of blob related operations. Such an
// implementation can access, read, write, delete and serve blobs.
type OciBlobStore interface {
// ServeBlobInternal attempts to serve the blob, identified by dgst, via http. The
// service may decide to redirect the client elsewhere or serve the data
// directly.
//
// This handler only issues successful responses, such as 2xx or 3xx,
// meaning it serves data or issues a redirect. If the blob is not
// available, an error will be returned and the caller may still issue a
// response.
//
// The implementation may serve the same blob from a different digest
// domain. The appropriate headers will be set for the blob, unless they
// have already been set by the caller.
ServeBlobInternal(
ctx context.Context,
pathPrefix string,
dgst digest.Digest,
headers map[string]string,
method string,
) (*FileReader, string, int64, error)
GetBlobInternal(
ctx context.Context,
pathPrefix string,
dgst digest.Digest,
) (*FileReader, int64, error)
Delete(ctx context.Context, pathPrefix string, dgst digest.Digest) error
// Stat provides metadata about a blob identified by the digest. If the
// blob is unknown to the describer, ErrBlobUnknown will be returned.
Stat(ctx context.Context, pathPrefix string, dgst digest.Digest) (manifest.Descriptor, error)
// Get returns the entire blob identified by digest along with the descriptor.
Get(ctx context.Context, pathPrefix string, dgst digest.Digest) ([]byte, error)
// Open provides an [io.ReadSeekCloser] to the blob identified by the provided
// descriptor. If the blob is not known to the service, an error is returned.
Open(ctx context.Context, pathPrefix string, dgst digest.Digest) (io.ReadSeekCloser, error)
// Put inserts the content p into the blob service, returning a descriptor
// or an error.
Put(ctx context.Context, pathPrefix string, p []byte) (manifest.Descriptor, error)
// Create allocates a new blob writer to add a blob to this service. The
// returned handle can be written to and later resumed using an opaque
// identifier. With this approach, one can Close and Resume a BlobWriter
// multiple times until the BlobWriter is committed or cancelled.
Create(ctx context.Context) (BlobWriter, error)
// Resume attempts to resume a write to a blob, identified by an id.
Resume(ctx context.Context, id string) (BlobWriter, error)
Path() string
}
// GenericBlobStore represent the entire suite of Generic blob related operations. Such an
// implementation can access, read, write, delete and serve blobs.
type GenericBlobStore interface {
// Create allocates a new blob writer to add a blob to this service. The
// returned handle can be written to and later resumed using an opaque
// identifier. With this approach, one can Close and Resume a BlobWriter
// multiple times until the BlobWriter is committed or cancelled.
Create(ctx context.Context, filePath string) (driver.FileWriter, error)
// Write writes the file to the blob store. There are two ways to write the file and fileReader takes the precedence.
Write(ctx context.Context, w driver.FileWriter, file multipart.File, fileReader io.Reader) (types.FileInfo, error)
Move(ctx context.Context, srcPath string, dstPath string) error
Delete(ctx context.Context, filePath string) error
Get(ctx context.Context, filePath string, size int64, filename string) (*FileReader, string, error)
GetWithNoRedirect(ctx context.Context, filePath string, size int64) (*FileReader, error)
Stat(ctx context.Context, filePath string) (int64, error)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/storage/io.go | registry/app/storage/io.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"io"
"github.com/harness/gitness/registry/app/driver"
)
const (
maxBlobGetSize = 4 * 1024 * 1024
)
func getContent(ctx context.Context, driver driver.StorageDriver, p string) ([]byte, error) {
r, err := driver.Reader(ctx, p, 0)
if err != nil {
return nil, err
}
defer r.Close()
return readAllLimited(r, maxBlobGetSize)
}
func readAllLimited(r io.Reader, limit int64) ([]byte, error) {
r = limitReader(r, limit)
return io.ReadAll(r)
}
// limitReader returns a new reader limited to n bytes. Unlike io.LimitReader,
// this returns an error when the limit reached.
func limitReader(r io.Reader, n int64) io.Reader {
return &limitedReader{r: r, n: n}
}
// limitedReader implements a reader that errors when the limit is reached.
//
// Partially cribbed from net/http.MaxBytesReader.
type limitedReader struct {
r io.Reader // underlying reader
n int64 // max bytes remaining
err error // sticky error
}
func (l *limitedReader) Read(p []byte) (n int, err error) {
if l.err != nil {
return 0, l.err
}
if len(p) == 0 {
return 0, nil
}
// If they asked for a 32KB byte read but only 5 bytes are
// remaining, no need to read 32KB. 6 bytes will answer the
// question of the whether we hit the limit or go past it.
if int64(len(p)) > l.n+1 {
p = p[:l.n+1]
}
n, err = l.r.Read(p)
if int64(n) <= l.n {
l.n -= int64(n)
l.err = err
return n, err
}
n = int(l.n)
l.n = 0
l.err = errors.New("storage: read exceeds limit")
return n, l.err
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/storage/storageservice.go | registry/app/storage/storageservice.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"github.com/harness/gitness/registry/app/driver"
"github.com/opencontainers/go-digest"
)
type Service struct {
deleteEnabled bool
resumableDigestEnabled bool
redirect bool
driver driver.StorageDriver
}
// Option is the type used for functional options for NewRegistry.
type Option func(*Service) error
// EnableRedirect is a functional option for NewRegistry. It causes the backend
// blob server to attempt using (StorageDriver).RedirectURL to serve all blobs.
func EnableRedirect(registry *Service) error {
registry.redirect = true
return nil
}
// EnableDelete is a functional option for NewRegistry. It enables deletion on
// the registry.
func EnableDelete(registry *Service) error {
registry.deleteEnabled = true
return nil
}
func NewStorageService(driver driver.StorageDriver, options ...Option) (*Service, error) {
registry := &Service{
resumableDigestEnabled: true,
driver: driver,
}
for _, option := range options {
if err := option(registry); err != nil {
return nil, err
}
}
return registry, nil
}
func (storage *Service) OciBlobsStore(ctx context.Context, repoKey string, rootParentRef string) OciBlobStore {
return &ociBlobStore{
repoKey: repoKey,
ctx: ctx,
driver: storage.driver,
pathFn: PathFn,
redirect: storage.redirect,
deleteEnabled: storage.deleteEnabled,
resumableDigestEnabled: storage.resumableDigestEnabled,
rootParentRef: rootParentRef,
}
}
func (storage *Service) GenericBlobsStore(rootParentRef string) GenericBlobStore {
return &genericBlobStore{
driver: storage.driver,
redirect: storage.redirect,
rootParentRef: rootParentRef,
}
}
// path returns the canonical path for the blob identified by digest. The blob
// may or may not exist.
func PathFn(pathPrefix string, dgst digest.Digest) (string, error) {
bp, err := pathFor(
blobDataPathSpec{
digest: dgst,
path: pathPrefix,
},
)
if err != nil {
return "", err
}
return bp, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/storage/paths.go | registry/app/storage/paths.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"fmt"
"path"
"strings"
a "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact"
"github.com/opencontainers/go-digest"
)
const (
storagePathRoot = "/"
docker = "docker"
blobs = "blobs"
)
// PackageType constants using iota.
const (
PackageTypeDOCKER = iota
)
func pathFor(spec pathSpec) (string, error) {
rootPrefix := []string{storagePathRoot}
switch v := spec.(type) {
case blobsPathSpec:
blobsPathPrefix := rootPrefix
blobsPathPrefix = append(blobsPathPrefix, blobs)
return path.Join(blobsPathPrefix...), nil
case blobPathSpec:
components, err := digestPathComponents(v.digest, true)
if err != nil {
return "", err
}
blobPathPrefix := rootPrefix
blobPathPrefix = append(blobPathPrefix, v.path, docker, blobs)
return path.Join(append(blobPathPrefix, components...)...), nil
case blobDataPathSpec:
components, err := digestPathComponents(v.digest, true)
if err != nil {
return "", err
}
components = append(components, "data")
blobPathPrefix := rootPrefix
blobPathPrefix = append(blobPathPrefix, v.path, docker, "blobs")
return path.Join(append(blobPathPrefix, components...)...), nil
case uploadDataPathSpec:
return path.Join(append(rootPrefix, v.path, docker, "_uploads", v.repoName, v.id, "data")...), nil
case uploadHashStatePathSpec:
offset := fmt.Sprintf("%d", v.offset)
if v.list {
offset = "" // Limit to the prefix for listing offsets.
}
return path.Join(
append(
rootPrefix, v.path, docker, "_uploads", v.repoName, v.id, "hashstates",
string(v.alg), offset,
)...,
), nil
case repositoriesRootPathSpec:
return path.Join(rootPrefix...), nil
case uploadFilePathSpec:
return path.Join(append(rootPrefix, v.path)...), nil
default:
return "", fmt.Errorf("unknown path spec: %#v", v)
}
}
// pathSpec is a type to mark structs as path specs. There is no
// implementation because we'd like to keep the specs and the mappers
// decoupled.
type pathSpec interface {
pathSpec()
}
// blobAlgorithmReplacer does some very simple path sanitization for user
// input. Paths should be "safe" before getting this far due to strict digest
// requirements but we can add further path conversion here, if needed.
var blobAlgorithmReplacer = strings.NewReplacer(
"+", "/",
".", "/",
";", "/",
)
// blobsPathSpec contains the path for the blobs directory.
type blobsPathSpec struct{}
func (blobsPathSpec) pathSpec() {}
// blobPathSpec contains the path for the registry global blob store.
type blobPathSpec struct {
digest digest.Digest
path string
}
func (blobPathSpec) pathSpec() {}
// blobDataPathSpec contains the path for the StorageService global blob store. For
// now, this contains layer data, exclusively.
type blobDataPathSpec struct {
digest digest.Digest
path string
}
func (blobDataPathSpec) pathSpec() {}
// uploadDataPathSpec defines the path parameters of the data file for
// uploads.
type uploadDataPathSpec struct {
path string
repoName string
id string
}
func (uploadDataPathSpec) pathSpec() {}
// uploadDataPathSpec defines the path parameters of the data file for
// uploads.
type uploadFilePathSpec struct {
path string
}
func (uploadFilePathSpec) pathSpec() {}
// uploadHashStatePathSpec defines the path parameters for the file that stores
// the hash function state of an upload at a specific byte offset. If `list` is
// set, then the path mapper will generate a list prefix for all hash state
// offsets for the upload identified by the name, id, and alg.
type uploadHashStatePathSpec struct {
path string
repoName string
id string
alg digest.Algorithm
offset int64
list bool
}
func (uploadHashStatePathSpec) pathSpec() {}
// repositoriesRootPathSpec returns the root of repositories.
type repositoriesRootPathSpec struct{}
func (repositoriesRootPathSpec) pathSpec() {}
// digestPathComponents provides a consistent path breakdown for a given
// digest. For a generic digest, it will be as follows:
//
// <algorithm>/<hex digest>
//
// If multilevel is true, the first two bytes of the digest will separate
// groups of digest folder. It will be as follows:
//
// <algorithm>/<first two bytes of digest>/<full digest>
func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) {
if err := dgst.Validate(); err != nil {
return nil, err
}
algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm()))
hex := dgst.Encoded()
prefix := []string{algorithm}
var suffix []string
if multilevel {
suffix = append(suffix, hex[:2])
}
suffix = append(suffix, hex)
return append(prefix, suffix...), nil
}
// BlobPath returns the path for a blob based on the package type.
func BlobPath(acctID string, packageType string, sha256 string) (string, error) {
// sample = sha256:50f564aff30aeb53eb88b0eb2c2ba59878e9854681989faa5ff7396bdfaf509b
sha256 = strings.TrimPrefix(sha256, "sha256:")
sha256Prefix := sha256[:2]
switch packageType {
case string(a.PackageTypeDOCKER):
acctID = strings.ToLower(acctID) // lowercase for OCI compliance
// format: /accountId(lowercase)/docker/blobs/sha256/(2 character prefix of sha)/sha/data
return fmt.Sprintf("/%s/docker/blobs/sha256/%s/%s/data", acctID, sha256Prefix, sha256), nil
default:
return fmt.Sprintf("/%s/files/%s", acctID, sha256), nil
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/storage/blobwriter_resumable.go | registry/app/storage/blobwriter_resumable.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !noresumabledigest
// +build !noresumabledigest
package storage
import (
"context"
"encoding"
"errors"
"fmt"
"hash"
"path"
"strconv"
storagedriver "github.com/harness/gitness/registry/app/driver"
"github.com/rs/zerolog/log"
)
// resumeDigest attempts to restore the state of the internal hash function
// by loading the most recent saved hash state equal to the current size of the blob.
func (bw *blobWriter) resumeDigest(ctx context.Context) error {
if !bw.resumableDigestEnabled {
return errResumableDigestNotAvailable
}
h, ok := bw.digester.Hash().(encoding.BinaryUnmarshaler)
if !ok {
return errResumableDigestNotAvailable
}
offset := bw.fileWriter.Size()
if offset == bw.written {
// State of digester is already at the requested offset.
return nil
}
// List hash states from storage backend.
var hashStateMatch hashStateEntry
hashStates, err := bw.getStoredHashStates(ctx)
if err != nil {
return fmt.Errorf("unable to get stored hash states with offset %d: %w", offset, err)
}
// Find the highest stored hashState with offset equal to
// the requested offset.
for _, hashState := range hashStates {
if hashState.offset == offset {
hashStateMatch = hashState
break // Found an exact offset match.
}
}
if hashStateMatch.offset == 0 {
// No need to load any state, just reset the hasher.
h.(hash.Hash).Reset() //nolint:errcheck
} else {
storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path)
if err != nil {
return err
}
if err = h.UnmarshalBinary(storedState); err != nil {
return err
}
bw.written = hashStateMatch.offset
}
// Mind the gap.
if gapLen := offset - bw.written; gapLen > 0 {
return errResumableDigestNotAvailable
}
return nil
}
type hashStateEntry struct {
offset int64
path string
}
// getStoredHashStates returns a slice of hashStateEntries for this upload.
func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) {
uploadHashStatePathPrefix, err := pathFor(
uploadHashStatePathSpec{
path: bw.blobStore.rootParentRef,
repoName: bw.blobStore.repoKey,
id: bw.id,
alg: bw.digester.Digest().Algorithm(),
list: true,
},
)
if err != nil {
return nil, err
}
paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix)
if err != nil {
if ok := errors.As(err, &storagedriver.PathNotFoundError{}); !ok {
return nil, err
}
// Treat PathNotFoundError as no entries.
paths = nil
}
hashStateEntries := make([]hashStateEntry, 0, len(paths))
for _, p := range paths {
pathSuffix := path.Base(p)
// The suffix should be the offset.
offset, err := strconv.ParseInt(pathSuffix, 0, 64)
if err != nil {
log.Ctx(ctx).Error().Msgf("unable to parse offset from upload state path %q: %s", p, err)
}
hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p})
}
return hashStateEntries, nil
}
func (bw *blobWriter) storeHashState(ctx context.Context) error {
if !bw.resumableDigestEnabled {
return errResumableDigestNotAvailable
}
h, ok := bw.digester.Hash().(encoding.BinaryMarshaler)
if !ok {
return errResumableDigestNotAvailable
}
state, err := h.MarshalBinary()
if err != nil {
return err
}
uploadHashStatePath, err := pathFor(
uploadHashStatePathSpec{
path: bw.blobStore.rootParentRef,
repoName: bw.blobStore.repoKey,
id: bw.id,
alg: bw.digester.Digest().Algorithm(),
offset: bw.written,
},
)
if err != nil {
return err
}
return bw.driver.PutContent(ctx, uploadHashStatePath, state)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/storage/gcstoragelient.go | registry/app/storage/gcstoragelient.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"github.com/harness/gitness/registry/app/driver"
"github.com/opencontainers/go-digest"
"github.com/rs/zerolog/log"
)
type GcStorageClient struct {
StorageDeleter driver.StorageDeleter
}
func NewGcStorageClient(storageDeleter driver.StorageDeleter) *GcStorageClient {
return &GcStorageClient{
StorageDeleter: storageDeleter,
}
}
// RemoveBlob removes a blob from the filesystem.
func (sc *GcStorageClient) RemoveBlob(ctx context.Context, dgst digest.Digest, rootParentRef string) error {
blobPath, err := pathFor(blobPathSpec{digest: dgst, path: rootParentRef})
if err != nil {
return err
}
log.Ctx(ctx).Info().Msgf("deleting blob from storage, digest: %s , path: %s", dgst.String(), rootParentRef)
if err := sc.StorageDeleter.Delete(ctx, blobPath); err != nil {
return err
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/storage/blobwriter.go | registry/app/storage/blobwriter.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"io"
"path"
"time"
"github.com/harness/gitness/registry/app/dist_temp/dcontext"
"github.com/harness/gitness/registry/app/driver"
"github.com/harness/gitness/registry/app/manifest"
"github.com/opencontainers/go-digest"
"github.com/rs/zerolog/log"
)
var errResumableDigestNotAvailable = errors.New("resumable digest not available")
const (
// digestSha256Empty is the canonical sha256 digest of empty data.
digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
)
// blobWriter is used to control the various aspects of resumable
// blob upload.
type blobWriter struct {
ctx context.Context
blobStore *ociBlobStore
id string
digester digest.Digester
written int64 // track the write to digester
fileWriter driver.FileWriter
driver driver.StorageDriver
path string
resumableDigestEnabled bool
committed bool
}
var _ BlobWriter = &blobWriter{}
// ID returns the identifier for this upload.
func (bw *blobWriter) ID() string {
return bw.id
}
// Commit marks the upload as completed, returning a valid descriptor. The
// final size and digest are checked against the first descriptor provided.
func (bw *blobWriter) Commit(ctx context.Context, pathPrefix string, desc manifest.Descriptor) (
manifest.Descriptor, error,
) {
dcontext.GetLogger(ctx, log.Debug()).Msg("(*blobWriter).Commit")
if err := bw.fileWriter.Commit(ctx); err != nil {
return manifest.Descriptor{}, err
}
bw.Close()
desc.Size = bw.Size()
canonical, err := bw.validateBlob(ctx, desc)
if err != nil {
return manifest.Descriptor{}, err
}
if err := bw.moveBlob(ctx, pathPrefix, canonical); err != nil {
return manifest.Descriptor{}, err
}
if err := bw.removeResources(ctx); err != nil {
return manifest.Descriptor{}, err
}
bw.committed = true
return canonical, nil
}
// Cancel the blob upload process, releasing any resources associated with
// the writer and canceling the operation.
func (bw *blobWriter) Cancel(ctx context.Context) error {
dcontext.GetLogger(ctx, log.Debug()).Msg("(*blobWriter).Cancel")
if err := bw.fileWriter.Cancel(ctx); err != nil {
return err
}
if err := bw.Close(); err != nil {
dcontext.GetLogger(ctx, log.Error()).Msgf("error closing blobwriter: %s", err)
}
return bw.removeResources(ctx)
}
func (bw *blobWriter) Size() int64 {
return bw.fileWriter.Size()
}
func (bw *blobWriter) Write(p []byte) (int, error) {
// Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to
// match the current write position.
if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && !errors.Is(err, errResumableDigestNotAvailable) {
return 0, err
}
_, err := bw.fileWriter.Write(p)
if err != nil {
return 0, err
}
n, err := bw.digester.Hash().Write(p)
bw.written += int64(n)
return n, err
}
func (bw *blobWriter) Close() error {
if bw.committed {
return errors.New("blobwriter close after commit")
}
if err := bw.storeHashState(bw.blobStore.ctx); err != nil && !errors.Is(err, errResumableDigestNotAvailable) {
return err
}
return bw.fileWriter.Close()
}
// validateBlob checks the data against the digest, returning an error if it
// does not match. The canonical descriptor is returned.
func (bw *blobWriter) validateBlob(ctx context.Context, desc manifest.Descriptor) (manifest.Descriptor, error) {
var (
verified, fullHash bool
canonical digest.Digest
)
if desc.Digest == "" {
// if no descriptors are provided, we have nothing to validate
// against. We don't really want to support this for the registry.
return manifest.Descriptor{}, BlobInvalidDigestError{
Reason: fmt.Errorf("cannot validate against empty digest"),
}
}
var size int64
// Stat the on disk file
if fi, err := bw.driver.Stat(ctx, bw.path); err != nil {
if errors.As(err, &driver.PathNotFoundError{}) {
desc.Size = 0
} else {
// Any other error we want propagated up the stack.
return manifest.Descriptor{}, err
}
} else {
if fi.IsDir() {
return manifest.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path)
}
size = fi.Size()
}
if desc.Size > 0 {
if desc.Size != size {
return manifest.Descriptor{}, ErrBlobInvalidLength
}
} else {
// if provided 0 or negative length, we can assume caller doesn't know or
// care about length.
desc.Size = size
}
if err := bw.resumeDigest(ctx); err == nil {
canonical = bw.digester.Digest()
if canonical.Algorithm() == desc.Digest.Algorithm() {
// Common case: client and server prefer the same canonical digest
// algorithm - currently SHA256.
verified = desc.Digest == canonical
} else {
// The client wants to use a different digest algorithm. They'll just
// have to be patient and wait for us to download and re-hash the
// uploaded content using that digest algorithm.
fullHash = true
}
} else if errors.Is(err, errResumableDigestNotAvailable) {
// Not using resumable digests, so we need to hash the entire layer.
fullHash = true
} else {
return manifest.Descriptor{}, err
}
if fullHash && bw.written == size && digest.Canonical == desc.Digest.Algorithm() {
// a fantastic optimization: if the the written data and the size are
// the same, we don't need to read the data from the backend. This is
// because we've written the entire file in the lifecycle of the
// current instance.
canonical = bw.digester.Digest()
verified = desc.Digest == canonical
}
if fullHash && !verified {
// If the check based on size fails, we fall back to the slowest of
// paths. We may be able to make the size-based check a stronger
// guarantee, so this may be defensive.
digester := digest.Canonical.Digester()
verifier := desc.Digest.Verifier()
// Read the file from the backend driver and validate it.
fr, err := NewFileReader(ctx, bw.driver, bw.path, desc.Size)
if err != nil {
return manifest.Descriptor{}, err
}
defer fr.Close()
tr := io.TeeReader(fr, digester.Hash())
if _, err := io.Copy(verifier, tr); err != nil {
return manifest.Descriptor{}, err
}
canonical = digester.Digest()
verified = verifier.Verified()
}
if !verified {
dcontext.GetLoggerWithFields(
ctx, log.Ctx(ctx).Error(),
map[any]any{
"canonical": canonical,
"provided": desc.Digest,
}, "canonical", "provided",
).
Msg("canonical digest does match provided digest")
return manifest.Descriptor{}, BlobInvalidDigestError{
Digest: desc.Digest,
Reason: fmt.Errorf("content does not match digest"),
}
}
// update desc with canonical hash
desc.Digest = canonical
if desc.MediaType == "" {
desc.MediaType = "application/octet-stream"
}
return desc, nil
}
// moveBlob moves the data into its final, hash-qualified destination,
// identified by dgst. The layer should be validated before commencing the
// move.
func (bw *blobWriter) moveBlob(ctx context.Context, pathPrefix string, desc manifest.Descriptor) error {
blobPath, err := pathFor(
blobDataPathSpec{
digest: desc.Digest,
path: pathPrefix,
},
)
if err != nil {
return err
}
// Check for existence
if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil {
log.Ctx(ctx).Info().Msgf("Error type: %T, value: %v\n", err, err)
if !errors.As(err, &driver.PathNotFoundError{}) {
return err
}
} else {
// If the path exists, we can assume that the content has already
// been uploaded, since the blob storage is content-addressable.
// While it may be corrupted, detection of such corruption belongs
// elsewhere.
return nil
}
// If no data was received, we may not actually have a file on disk. Check
// the size here and write a zero-length file to blobPath if this is the
// case. For the most part, this should only ever happen with zero-length
// blobs.
if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil {
if errors.As(err, &driver.PathNotFoundError{}) {
if desc.Digest == digestSha256Empty {
return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{})
}
// We let this fail during the move below.
log.Ctx(ctx).Warn().
Interface("upload.id", bw.ID()).
Interface("digest", desc.Digest).
Msg("attempted to move zero-length content with non-zero digest")
} else {
return err // unrelated error
}
}
return bw.blobStore.driver.Move(ctx, bw.path, blobPath)
}
// removeResources should clean up all resources associated with the upload
// instance. An error will be returned if the clean up cannot proceed. If the
// resources are already not present, no error will be returned.
func (bw *blobWriter) removeResources(ctx context.Context) error {
dataPath, err := pathFor(
uploadDataPathSpec{
path: bw.blobStore.rootParentRef,
repoName: bw.blobStore.repoKey,
id: bw.id,
},
)
if err != nil {
return err
}
// Resolve and delete the containing directory, which should include any
// upload related files.
dirPath := path.Dir(dataPath)
if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil {
if !errors.As(err, &driver.PathNotFoundError{}) {
// This should be uncommon enough such that returning an error
// should be okay. At this point, the upload should be mostly
// complete, but perhaps the backend became unaccessible.
dcontext.GetLogger(ctx, log.Error()).Msgf("unable to delete layer upload resources %q: %v", dirPath, err)
return err
}
}
return nil
}
func (bw *blobWriter) Reader() (io.ReadCloser, error) {
try := 1
for try <= 5 {
_, err := bw.driver.Stat(bw.ctx, bw.path)
if err == nil {
break
}
if errors.As(err, &driver.PathNotFoundError{}) {
dcontext.GetLogger(bw.ctx, log.Debug()).Msgf("Nothing found on try %d, sleeping...", try)
time.Sleep(1 * time.Second)
try++
} else {
return nil, err
}
}
readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0)
if err != nil {
return nil, err
}
return readCloser, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/storage/ociblobstore.go | registry/app/storage/ociblobstore.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"io"
"time"
"github.com/harness/gitness/registry/app/dist_temp/dcontext"
"github.com/harness/gitness/registry/app/driver"
"github.com/harness/gitness/registry/app/manifest"
"github.com/google/uuid"
"github.com/opencontainers/go-digest"
"github.com/rs/zerolog/log"
)
const (
HeaderAccept = "Accept"
HeaderAuthorization = "Authorization"
HeaderCacheControl = "Cache-Control"
HeaderContentLength = "Content-Length"
HeaderContentRange = "Content-Range"
HeaderContentType = "Content-Type"
HeaderDockerContentDigest = "Docker-Content-Digest"
HeaderDockerUploadUUID = "Docker-Upload-UUID"
HeaderEtag = "Etag"
HeaderIfNoneMatch = "If-None-Match"
HeaderLink = "Link"
HeaderLocation = "Location"
HeaderOCIFiltersApplied = "OCI-Filters-Applied"
HeaderOCISubject = "OCI-Subject"
HeaderRange = "Range"
)
const blobCacheControlMaxAge = 365 * 24 * time.Hour
type ociBlobStore struct {
repoKey string
driver driver.StorageDriver
// only to be used where context can't come through method args
ctx context.Context
deleteEnabled bool
resumableDigestEnabled bool
pathFn func(pathPrefix string, dgst digest.Digest) (string, error)
redirect bool // allows disabling RedirectURL redirects
rootParentRef string
}
var _ OciBlobStore = &ociBlobStore{}
func (bs *ociBlobStore) Path() string {
return bs.rootParentRef
}
// Create begins a blob write session, returning a handle.
func (bs *ociBlobStore) Create(ctx context.Context) (BlobWriter, error) {
dcontext.GetLogger(ctx, log.Ctx(ctx).Debug()).Msg("(*ociBlobStore).Create")
uuid := uuid.NewString()
path, err := pathFor(
uploadDataPathSpec{
path: bs.rootParentRef,
repoName: bs.repoKey,
id: uuid,
},
)
if err != nil {
return nil, err
}
return bs.newBlobUpload(ctx, uuid, path, false)
}
func (bs *ociBlobStore) Resume(ctx context.Context, id string) (BlobWriter, error) {
dcontext.GetLogger(ctx, log.Ctx(ctx).Debug()).Msg("(*ociBlobStore).Resume")
path, err := pathFor(
uploadDataPathSpec{
path: bs.rootParentRef,
repoName: bs.repoKey,
id: id,
},
)
if err != nil {
return nil, err
}
return bs.newBlobUpload(ctx, id, path, true)
}
func (bs *ociBlobStore) Delete(_ context.Context, _ string, _ digest.Digest) error {
return ErrUnsupported
}
func (bs *ociBlobStore) ServeBlobInternal(
ctx context.Context,
pathPrefix string,
dgst digest.Digest,
headers map[string]string,
method string,
) (*FileReader, string, int64, error) {
desc, err := bs.Stat(ctx, pathPrefix, dgst)
if err != nil {
return nil, "", 0, err
}
if desc.MediaType != "" {
// Set the repository local content type.
headers[HeaderContentType] = desc.MediaType
}
size := desc.Size
path, err := bs.pathFn(pathPrefix, desc.Digest)
if err != nil {
return nil, "", size, err
}
if bs.redirect {
redirectURL, err := bs.driver.RedirectURL(ctx, method, path, "")
if err != nil {
return nil, "", size, err
}
if redirectURL != "" {
// Redirect to storage URL.
// http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
return nil, redirectURL, size, nil
}
// Fallback to serving the content directly.
}
br, err := NewFileReader(ctx, bs.driver, path, desc.Size)
if err != nil {
if br != nil {
br.Close()
}
return nil, "", size, err
}
headers[HeaderEtag] = fmt.Sprintf(`"%s"`, desc.Digest)
// If-None-Match handled by ServeContent
headers[HeaderCacheControl] = fmt.Sprintf(
"max-age=%.f",
blobCacheControlMaxAge.Seconds(),
)
if headers[HeaderDockerContentDigest] == "" {
headers[HeaderDockerContentDigest] = desc.Digest.String()
}
if headers[HeaderContentType] == "" {
// Set the content type if not already set.
headers[HeaderContentType] = desc.MediaType
}
if headers[HeaderContentLength] == "" {
// Set the content length if not already set.
headers[HeaderContentLength] = fmt.Sprint(desc.Size)
}
return br, "", size, err
}
func (bs *ociBlobStore) GetBlobInternal(
ctx context.Context,
pathPrefix string,
dgst digest.Digest,
) (*FileReader, int64, error) {
desc, err := bs.Stat(ctx, pathPrefix, dgst)
if err != nil {
return nil, 0, err
}
size := desc.Size
path, err := bs.pathFn(pathPrefix, desc.Digest)
if err != nil {
return nil, size, err
}
br, err := NewFileReader(ctx, bs.driver, path, desc.Size)
if err != nil {
if br != nil {
br.Close()
}
return nil, size, err
}
return br, size, err
}
func (bs *ociBlobStore) Get(
ctx context.Context, pathPrefix string,
dgst digest.Digest,
) ([]byte, error) {
canonical, err := bs.Stat(ctx, pathPrefix, dgst)
if err != nil {
return nil, err
}
bp, err := bs.pathFn(pathPrefix, canonical.Digest)
if err != nil {
return nil, err
}
p, err := getContent(ctx, bs.driver, bp)
if err != nil {
if errors.As(err, &driver.PathNotFoundError{}) {
return nil, ErrBlobUnknown
}
return nil, err
}
return p, nil
}
func (bs *ociBlobStore) Open(
ctx context.Context, pathPrefix string,
dgst digest.Digest,
) (io.ReadSeekCloser, error) {
desc, err := bs.Stat(ctx, pathPrefix, dgst)
if err != nil {
return nil, err
}
path, err := bs.pathFn(pathPrefix, desc.Digest)
if err != nil {
return nil, err
}
return NewFileReader(ctx, bs.driver, path, desc.Size)
}
// Put stores the content p in the blob store, calculating the digest.
// If thebcontent is already present, only the digest will be returned.
// This shouldbonly be used for small objects, such as manifests.
// This implemented as a convenience for other Put implementations.
func (bs *ociBlobStore) Put(
ctx context.Context, pathPrefix string,
p []byte,
) (manifest.Descriptor, error) {
dgst := digest.FromBytes(p)
desc, err := bs.Stat(ctx, pathPrefix, dgst)
if err == nil {
// content already present
return desc, nil
} else if !errors.Is(err, ErrBlobUnknown) {
dcontext.GetLogger(
ctx, log.Error(),
).Msgf(
"ociBlobStore: error stating content (%v): %v", dgst, err,
)
// real error, return it
return manifest.Descriptor{}, err
}
bp, err := bs.pathFn(pathPrefix, dgst)
if err != nil {
return manifest.Descriptor{}, err
}
return manifest.Descriptor{
Size: int64(len(p)),
MediaType: "application/octet-stream",
Digest: dgst,
}, bs.driver.PutContent(ctx, bp, p)
}
// Stat returns the descriptor for the blob
// in the main blob store. If this method returns successfully, there is
// strong guarantee that the blob exists and is available.
func (bs *ociBlobStore) Stat(
ctx context.Context, pathPrefix string,
dgst digest.Digest,
) (manifest.Descriptor, error) {
path, err := pathFor(
blobDataPathSpec{
digest: dgst,
path: pathPrefix,
},
)
if err != nil {
return manifest.Descriptor{}, err
}
fi, err := bs.driver.Stat(ctx, path)
if err != nil {
if errors.As(err, &driver.PathNotFoundError{}) {
return manifest.Descriptor{}, ErrBlobUnknown
}
return manifest.Descriptor{}, err
}
if fi.IsDir() {
dcontext.GetLogger(
ctx, log.Warn(),
).Msgf("blob path should not be a directory: %q", path)
return manifest.Descriptor{}, ErrBlobUnknown
}
return manifest.Descriptor{
Size: fi.Size(),
MediaType: "application/octet-stream",
Digest: dgst,
}, nil
}
// newBlobUpload allocates a new upload controller with the given state.
func (bs *ociBlobStore) newBlobUpload(
ctx context.Context, uuid,
path string, a bool,
) (BlobWriter, error) {
fw, err := bs.driver.Writer(ctx, path, a)
if err != nil {
return nil, err
}
bw := &blobWriter{
ctx: ctx,
blobStore: bs,
id: uuid,
digester: digest.Canonical.Digester(),
fileWriter: fw,
driver: bs.driver,
path: path,
resumableDigestEnabled: bs.resumableDigestEnabled,
}
return bw, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/pkg/wire.go | registry/app/pkg/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pkg
import (
"github.com/harness/gitness/registry/app/pkg/quarantine"
"github.com/harness/gitness/registry/app/store"
"github.com/google/wire"
)
func CoreControllerProvider(
registryDao store.RegistryRepository,
quarantineFinder quarantine.Finder,
) *CoreController {
return NewCoreController(registryDao, quarantineFinder)
}
var WireSet = wire.NewSet(CoreControllerProvider)
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.