repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/npm/delete_package.go | registry/app/api/handler/npm/delete_package.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package npm
import (
"net/http"
"github.com/harness/gitness/registry/app/dist_temp/errcode"
"github.com/harness/gitness/registry/app/pkg/commons"
"github.com/harness/gitness/registry/app/pkg/types/npm"
"github.com/harness/gitness/registry/request"
)
func (h *handler) DeletePackage(w http.ResponseWriter, r *http.Request) {
contextInfo := request.ArtifactInfoFrom(r.Context())
info, ok := contextInfo.(*npm.ArtifactInfo)
if !ok {
h.HandleErrors2(r.Context(), errcode.ErrCodeInvalidRequest.WithMessage("failed to fetch info from context"), w)
return
}
response := h.controller.DeletePackage(r.Context(), info)
if !commons.IsEmpty(response.GetError()) {
http.Error(w, response.GetError().Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusNoContent)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/npm/delete_version.go | registry/app/api/handler/npm/delete_version.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package npm
import (
"net/http"
"github.com/harness/gitness/registry/app/dist_temp/errcode"
"github.com/harness/gitness/registry/app/pkg/commons"
"github.com/harness/gitness/registry/app/pkg/types/npm"
"github.com/harness/gitness/registry/request"
)
func (h *handler) DeleteVersion(w http.ResponseWriter, r *http.Request) {
contextInfo := request.ArtifactInfoFrom(r.Context())
info, ok := contextInfo.(*npm.ArtifactInfo)
if !ok {
h.HandleErrors2(r.Context(), errcode.ErrCodeInvalidRequest.WithMessage("failed to fetch info from context"), w)
return
}
response := h.controller.DeleteVersion(r.Context(), info)
if !commons.IsEmpty(response.GetError()) {
http.Error(w, response.GetError().Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusNoContent)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/npm/download_file.go | registry/app/api/handler/npm/download_file.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package npm
import (
"fmt"
"net/http"
npm2 "github.com/harness/gitness/registry/app/pkg/types/npm"
"github.com/harness/gitness/registry/request"
"github.com/rs/zerolog/log"
)
func (h *handler) DownloadPackageFileByName(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
info, ok := request.ArtifactInfoFrom(ctx).(*npm2.ArtifactInfo)
if !ok {
log.Ctx(ctx).Error().Msg("Failed to get npm artifact info from context")
h.HandleErrors(r.Context(), []error{fmt.Errorf("failed to fetch npm artifact info from context")}, w)
return
}
response := h.controller.DownloadPackageFile(ctx, info)
defer func() {
if response.Body != nil {
err := response.Body.Close()
if err != nil {
log.Ctx(r.Context()).Error().Msgf("Failed to close body: %v", err)
}
}
}()
if response.GetError() != nil {
h.HandleError(r.Context(), w, response.GetError())
return
}
w.Header().Set("Content-Disposition", "attachment; filename="+info.Filename)
if response.RedirectURL != "" {
http.Redirect(w, r, response.RedirectURL, http.StatusFound)
return
}
h.ServeContent(w, r, response.Body, info.Filename)
response.ResponseHeaders.WriteToResponse(w)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/npm/upload.go | registry/app/api/handler/npm/upload.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package npm
import (
"fmt"
"net/http"
"github.com/harness/gitness/app/api/usererror"
"github.com/harness/gitness/registry/app/dist_temp/errcode"
"github.com/harness/gitness/registry/app/pkg/types/npm"
"github.com/harness/gitness/registry/request"
)
func (h *handler) UploadPackage(w http.ResponseWriter, r *http.Request) {
contextInfo := request.ArtifactInfoFrom(r.Context())
info, ok := contextInfo.(*npm.ArtifactInfo)
if !ok {
h.HandleErrors2(r.Context(), errcode.ErrCodeInvalidRequest.WithMessage("failed to fetch info from context"), w)
return
}
file, err := GetNPMFile(r)
if err != nil {
h.HandleError(r.Context(), w, usererror.BadRequest("File Data is empty in the request"))
return
}
defer file.Close()
response := h.controller.UploadPackageFile(r.Context(), info, file)
if response.GetError() != nil {
h.HandleError(r.Context(), w, response.GetError())
return
}
response.ResponseHeaders.WriteToResponse(w)
_, err = fmt.Fprintf(w, "Pushed.\nSha256: %s", response.Sha256)
if err != nil {
h.HandleError(r.Context(), w, errcode.ErrCodeUnknown.WithDetail(err))
return
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/npm/head_file.go | registry/app/api/handler/npm/head_file.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package npm
import (
"encoding/json"
"fmt"
"net/http"
npm2 "github.com/harness/gitness/registry/app/pkg/types/npm"
"github.com/harness/gitness/registry/request"
"github.com/rs/zerolog/log"
)
func (h *handler) HeadPackageFileByName(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
info, ok := request.ArtifactInfoFrom(ctx).(*npm2.ArtifactInfo)
if !ok {
log.Ctx(ctx).Error().Msg("Failed to get npm artifact info from context")
h.HandleErrors(r.Context(), []error{fmt.Errorf("failed to fetch npm artifact info from context")}, w)
return
}
response := h.controller.HeadPackageFileByName(ctx, info)
w.Header().Set("Content-Type", "application/json")
err := json.NewEncoder(w).Encode(response)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/npm/delete_tag.go | registry/app/api/handler/npm/delete_tag.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package npm
import (
"encoding/json"
"net/http"
"github.com/harness/gitness/registry/app/dist_temp/errcode"
"github.com/harness/gitness/registry/app/pkg/commons"
"github.com/harness/gitness/registry/app/pkg/types/npm"
"github.com/harness/gitness/registry/request"
)
func (h *handler) DeletePackageTag(w http.ResponseWriter, r *http.Request) {
contextInfo := request.ArtifactInfoFrom(r.Context())
info, ok := contextInfo.(*npm.ArtifactInfo)
if !ok {
h.HandleErrors2(r.Context(), errcode.ErrCodeInvalidRequest.WithMessage("failed to fetch info from context"), w)
return
}
response := h.controller.DeleteTag(r.Context(), info)
if !commons.IsEmpty(response.GetError()) {
http.Error(w, response.GetError().Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
err := json.NewEncoder(w).Encode(response)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/npm/handler.go | registry/app/api/handler/npm/handler.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package npm
import (
"errors"
"fmt"
"io"
"net/http"
"regexp"
"strings"
npm3 "github.com/harness/gitness/registry/app/api/controller/pkg/npm"
"github.com/harness/gitness/registry/app/api/handler/packages"
"github.com/harness/gitness/registry/app/pkg"
"github.com/harness/gitness/registry/app/pkg/commons"
"github.com/harness/gitness/registry/app/pkg/types/npm"
"github.com/rs/zerolog/log"
)
var (
ErrInvalidPackageVersion = errors.New("package version is invalid")
ErrInvalidAttachment = errors.New("package attachment is invalid")
packageNameRegex = regexp.MustCompile(`^(?:@[\w.-]+\/)?[\w.-]+$`)
versionRegex = regexp.MustCompile(`^(\d+)\.(\d+)\.(\d+)(?:-([\w.-]+))?(?:\+([\w.-]+))?$`)
)
type Handler interface {
pkg.ArtifactInfoProvider
UploadPackage(writer http.ResponseWriter, request *http.Request)
DownloadPackageFile(http.ResponseWriter, *http.Request)
GetPackageMetadata(http.ResponseWriter, *http.Request)
DownloadPackageFileByName(http.ResponseWriter, *http.Request)
HeadPackageFileByName(http.ResponseWriter, *http.Request)
ListPackageTag(http.ResponseWriter, *http.Request)
AddPackageTag(http.ResponseWriter, *http.Request)
DeletePackageTag(http.ResponseWriter, *http.Request)
DeletePackage(w http.ResponseWriter, r *http.Request)
DeleteVersion(w http.ResponseWriter, r *http.Request)
DeletePreview(w http.ResponseWriter, r *http.Request)
SearchPackage(w http.ResponseWriter, r *http.Request)
}
type handler struct {
packages.Handler
controller npm3.Controller
}
func NewHandler(
controller npm3.Controller,
packageHandler packages.Handler,
) Handler {
return &handler{
Handler: packageHandler,
controller: controller,
}
}
var _ Handler = (*handler)(nil)
func (h *handler) GetPackageArtifactInfo(r *http.Request) (pkg.PackageArtifactInfo, error) {
info, e := h.GetArtifactInfo(r)
if !commons.IsEmpty(e) {
return npm.ArtifactInfo{}, e
}
info.Image = PackageNameFromParams(r)
version := GetVersionFromParams(r)
fileName := GetFileName(r)
if info.Image != "" && version != "" && !isValidNameAndVersion(info.Image, version) {
log.Info().Msgf("Invalid image name/version: %s/%s", info.Image, version)
return nil, fmt.Errorf("invalid name or version")
}
distTags := r.PathValue("tag")
npmInfo := &npm.ArtifactInfo{
ArtifactInfo: info,
Filename: fileName,
Version: version,
ParentRegIdentifier: info.RegIdentifier,
DistTags: []string{distTags},
}
if r.Body == nil || r.ContentLength == 0 {
return npmInfo, nil
}
if strings.Contains(r.URL.Path, "/-rev/") {
return npmInfo, nil
}
if strings.Contains(r.URL.Path, "/-/package/") && strings.Contains(r.URL.Path, "/dist-tags/") {
// Process the payload only for add tag requests
if r.Body == nil || r.ContentLength == 0 {
return npmInfo, nil
}
body, err := io.ReadAll(r.Body)
if err != nil {
return npm.ArtifactInfo{}, err
}
npmInfo.Version = strings.Trim(string(body), "\"")
npmInfo.DistTags = []string{r.PathValue("tag")}
return npmInfo, err
}
return &npm.ArtifactInfo{
ArtifactInfo: info,
}, nil
}
func GetNPMFile(r *http.Request) (io.ReadCloser, error) {
return r.Body, nil
}
func isValidNameAndVersion(image, version string) bool {
return packageNameRegex.MatchString(image) && versionRegex.MatchString(version)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/npm/download.go | registry/app/api/handler/npm/download.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package npm
import (
"fmt"
"net/http"
"github.com/harness/gitness/registry/app/pkg/commons"
npm2 "github.com/harness/gitness/registry/app/pkg/types/npm"
"github.com/harness/gitness/registry/request"
"github.com/rs/zerolog/log"
)
func (h *handler) DownloadPackageFile(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
info, ok := request.ArtifactInfoFrom(ctx).(*npm2.ArtifactInfo)
if !ok {
log.Ctx(ctx).Error().Msg("Failed to get npm artifact info from context")
h.HandleErrors(r.Context(), []error{fmt.Errorf("failed to fetch npm artifact info from context")}, w)
return
}
response := h.controller.DownloadPackageFile(ctx, info)
defer func() {
if response.Body != nil {
err := response.Body.Close()
if err != nil {
log.Ctx(r.Context()).Error().Msgf("Failed to close body: %v", err)
}
}
if response.ReadCloser != nil {
err := response.ReadCloser.Close()
if err != nil {
log.Ctx(ctx).Error().Msgf("Failed to close read closer: %v", err)
}
}
}()
if response.GetError() != nil {
h.HandleError(r.Context(), w, response.GetError())
return
}
w.Header().Set("Content-Disposition", "attachment; filename="+info.Filename)
if response.RedirectURL != "" {
http.Redirect(w, r, response.RedirectURL, http.StatusFound)
return
}
err := commons.ServeContent(w, r, response.Body, info.Filename, response.ReadCloser)
if err != nil {
log.Ctx(ctx).Error().Msgf("Failed to serve content: %v", err)
h.HandleError(ctx, w, err)
return
}
response.ResponseHeaders.WriteToResponse(w)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/npm/delete_preview.go | registry/app/api/handler/npm/delete_preview.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package npm
import (
"net/http"
"github.com/harness/gitness/registry/app/dist_temp/errcode"
"github.com/harness/gitness/registry/app/pkg/types/npm"
"github.com/harness/gitness/registry/request"
)
func (h *handler) DeletePreview(w http.ResponseWriter, r *http.Request) {
contextInfo := request.ArtifactInfoFrom(r.Context())
_, ok := contextInfo.(*npm.ArtifactInfo)
if !ok {
h.HandleErrors2(r.Context(), errcode.ErrCodeInvalidRequest.WithMessage("failed to fetch info from context"), w)
return
}
w.WriteHeader(http.StatusOK)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/packages/download_file.go | registry/app/api/handler/packages/download_file.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package packages
import (
"fmt"
"net/http"
"strings"
commons2 "github.com/harness/gitness/registry/app/pkg/types/commons"
"github.com/harness/gitness/registry/request"
"github.com/rs/zerolog/log"
)
func (h *handler) DownloadFile(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
info, ok := request.ArtifactInfoFrom(ctx).(commons2.ArtifactInfo)
if !ok {
log.Ctx(ctx).Error().Msg("Failed to get common artifact info from context")
h.HandleErrors(r.Context(), []error{fmt.Errorf("failed to fetch common artifact info from context")}, w)
return
}
path := r.FormValue("path")
if path == "" {
h.HandleErrors(r.Context(), []error{fmt.Errorf("path parameter is required")}, w)
return
}
registryURL := h.URLProvider.PackageURL(ctx, strings.Join([]string{info.RootIdentifier, info.RegIdentifier}, "/"),
"", "files", path)
http.Redirect(w, r, registryURL, http.StatusPermanentRedirect)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/packages/handler.go | registry/app/api/handler/packages/handler.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package packages
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"time"
usercontroller "github.com/harness/gitness/app/api/controller/user"
"github.com/harness/gitness/app/api/render"
"github.com/harness/gitness/app/api/usererror"
"github.com/harness/gitness/app/auth/authn"
"github.com/harness/gitness/app/auth/authz"
"github.com/harness/gitness/app/services/refcache"
corestore "github.com/harness/gitness/app/store"
urlprovider "github.com/harness/gitness/app/url"
"github.com/harness/gitness/registry/app/api/interfaces"
"github.com/harness/gitness/registry/app/api/openapi/contracts/artifact"
"github.com/harness/gitness/registry/app/dist_temp/errcode"
"github.com/harness/gitness/registry/app/pkg"
"github.com/harness/gitness/registry/app/pkg/commons"
"github.com/harness/gitness/registry/app/pkg/filemanager"
"github.com/harness/gitness/registry/app/pkg/quarantine"
commons2 "github.com/harness/gitness/registry/app/pkg/types/commons"
refcache2 "github.com/harness/gitness/registry/app/services/refcache"
"github.com/harness/gitness/registry/app/storage"
"github.com/harness/gitness/registry/app/store"
"github.com/harness/gitness/registry/request"
"github.com/harness/gitness/types/enum"
"github.com/rs/zerolog/log"
)
func NewHandler(
registryDao store.RegistryRepository,
downloadStatDao store.DownloadStatRepository,
bandwidthStatDao store.BandwidthStatRepository,
spaceStore corestore.SpaceStore, tokenStore corestore.TokenStore,
userCtrl *usercontroller.Controller, authenticator authn.Authenticator,
urlProvider urlprovider.Provider, authorizer authz.Authorizer, spaceFinder refcache.SpaceFinder,
regFinder refcache2.RegistryFinder,
fileManager filemanager.FileManager, quarantineFinder quarantine.Finder,
packageWrapper interfaces.PackageWrapper,
) Handler {
return &handler{
RegistryDao: registryDao,
DownloadStatDao: downloadStatDao,
BandwidthStatDao: bandwidthStatDao,
SpaceStore: spaceStore,
TokenStore: tokenStore,
UserCtrl: userCtrl,
Authenticator: authenticator,
URLProvider: urlProvider,
Authorizer: authorizer,
SpaceFinder: spaceFinder,
RegFinder: regFinder,
fileManager: fileManager,
quarantineFinder: quarantineFinder,
PackageWrapper: packageWrapper,
}
}
type handler struct {
RegistryDao store.RegistryRepository
DownloadStatDao store.DownloadStatRepository
BandwidthStatDao store.BandwidthStatRepository
SpaceStore corestore.SpaceStore
TokenStore corestore.TokenStore
UserCtrl *usercontroller.Controller
Authenticator authn.Authenticator
URLProvider urlprovider.Provider
Authorizer authz.Authorizer
SpaceFinder refcache.SpaceFinder
RegFinder refcache2.RegistryFinder
fileManager filemanager.FileManager
quarantineFinder quarantine.Finder
PackageWrapper interfaces.PackageWrapper
}
type Handler interface {
GetRegistryCheckAccess(
ctx context.Context,
r *http.Request,
reqPermissions ...enum.Permission,
) error
GetArtifactInfo(r *http.Request) (pkg.ArtifactInfo, error)
DownloadFile(w http.ResponseWriter, r *http.Request)
TrackDownloadStats(
ctx context.Context,
r *http.Request,
) error
GetPackageArtifactInfo(r *http.Request) (pkg.PackageArtifactInfo, error)
CheckQuarantineStatus(
ctx context.Context,
) error
GetAuthenticator() authn.Authenticator
HandleErrors2(ctx context.Context, errors errcode.Error, w http.ResponseWriter)
HandleErrors(ctx context.Context, errors errcode.Errors, w http.ResponseWriter)
HandleError(ctx context.Context, w http.ResponseWriter, err error)
ServeContent(
w http.ResponseWriter, r *http.Request, fileReader *storage.FileReader, filename string,
)
}
type PathPackageType string
func (h *handler) GetAuthenticator() authn.Authenticator {
return h.Authenticator
}
func (h *handler) GetRegistryCheckAccess(
ctx context.Context,
r *http.Request,
reqPermissions ...enum.Permission,
) error {
// Get artifact info from context or request
info, err := func() (pkg.ArtifactInfo, error) {
if pkgInfo := request.ArtifactInfoFrom(ctx); pkgInfo != nil {
return pkgInfo.BaseArtifactInfo(), nil
}
return h.GetArtifactInfo(r)
}()
if err != nil {
return err
}
return pkg.GetRegistryCheckAccess(ctx, h.Authorizer, h.SpaceFinder,
info.ParentID, info, reqPermissions...)
}
func (h *handler) TrackDownloadStats(
ctx context.Context,
r *http.Request,
) error {
info := request.ArtifactInfoFrom(r.Context()) //nolint:contextcheck
if err := h.DownloadStatDao.CreateByRegistryIDImageAndArtifactName(ctx,
info.BaseArtifactInfo().RegistryID, info.BaseArtifactInfo().Image, info.GetVersion()); err != nil {
log.Ctx(ctx).Error().Msgf("failed to create download stat: %v", err.Error())
return usererror.ErrInternal
}
return nil
}
func (h *handler) CheckQuarantineStatus(
ctx context.Context,
) error {
info := request.ArtifactInfoFrom(ctx)
err := h.quarantineFinder.CheckArtifactQuarantineStatus(
ctx,
info.BaseArtifactInfo().RegistryID,
info.BaseArtifactInfo().Image,
info.GetVersion(),
nil,
)
if err != nil {
if errors.Is(err, usererror.ErrQuarantinedArtifact) {
log.Ctx(ctx).Error().Msgf("Requested artifact: [%s] with "+
"version: [%s] and filename: [%s] with registryID: [%d] is quarantined or check failed: %v",
info.BaseArtifactInfo().Image, info.GetVersion(), info.GetFileName(),
info.BaseArtifactInfo().RegistryID, err)
return err
}
log.Ctx(ctx).Error().Msgf("Failed to check quarantine status for artifact: [%s] with "+
"version: [%s] and filename: [%s] with registryID: [%d] with error: %v",
info.BaseArtifactInfo().Image, info.GetVersion(), info.GetFileName(),
info.BaseArtifactInfo().RegistryID, err)
}
return nil
}
func (h *handler) GetArtifactInfo(r *http.Request) (pkg.ArtifactInfo, error) {
ctx := r.Context()
rootIdentifier, registryIdentifier, pathPackageType, err := extractPathVars(r)
if err != nil {
return pkg.ArtifactInfo{}, errcode.ErrCodeInvalidRequest.WithDetail(err)
}
// Convert path package type to package type
packageType, err := h.PackageWrapper.GetPackageTypeFromPathPackageType(string(pathPackageType))
if err != nil {
return pkg.ArtifactInfo{}, errcode.ErrCodeInvalidRequest.WithDetail(err)
}
rootSpaceID, err := h.SpaceStore.FindByRefCaseInsensitive(ctx, rootIdentifier)
if err != nil {
log.Ctx(ctx).Error().Msgf("Root spaceID not found: %s", rootIdentifier)
return pkg.ArtifactInfo{}, usererror.NotFoundf("Root not found: %s", rootIdentifier)
}
rootSpace, err := h.SpaceFinder.FindByID(ctx, rootSpaceID)
if err != nil {
log.Ctx(ctx).Error().Msgf("Root space not found: %d", rootSpaceID)
return pkg.ArtifactInfo{}, usererror.NotFoundf("Root not found: %s", rootIdentifier)
}
registry, err := h.RegFinder.FindByRootParentID(ctx, rootSpaceID, registryIdentifier)
if err != nil {
log.Ctx(ctx).Error().Msgf(
"registry %s not found for root: %s. Reason: %s", registryIdentifier, rootSpace.Identifier, err,
)
return pkg.ArtifactInfo{}, usererror.NotFoundf("Registry not found: %s", registryIdentifier)
}
if registry.PackageType != artifact.PackageType(packageType) {
return pkg.ArtifactInfo{}, usererror.NotFoundf(
"Registry package type mismatch: %s != %s", registry.PackageType, pathPackageType,
)
}
_, err = h.SpaceFinder.FindByID(r.Context(), registry.ParentID)
if err != nil {
log.Ctx(ctx).Error().Msgf("Parent space not found: %d", registry.ParentID)
return pkg.ArtifactInfo{}, usererror.NotFoundf("Parent not found for registry: %s", registryIdentifier)
}
return pkg.ArtifactInfo{
BaseInfo: &pkg.BaseInfo{
RootIdentifier: rootIdentifier,
RootParentID: rootSpace.ID,
ParentID: registry.ParentID,
PathPackageType: artifact.PackageType(packageType),
},
RegIdentifier: registryIdentifier,
RegistryID: registry.ID,
Registry: *registry,
Image: "",
}, nil
}
// GetUtilityMethodArtifactInfo : /pkg/{rootIdentifier}/{registryIdentifier}/{utilityMethod}...
const minPathComponents = 5
func (h *handler) GetUtilityMethodArtifactInfo(r *http.Request) (pkg.ArtifactInfo, error) {
ctx := r.Context()
path := r.URL.Path
parts := strings.Split(path, "/")
if len(parts) < minPathComponents {
return pkg.ArtifactInfo{}, errcode.ErrCodeInvalidRequest.WithMessage(fmt.Sprintf("invalid path: %s", path))
}
rootIdentifier := parts[2]
registryIdentifier := parts[3]
rootSpaceID, err := h.SpaceStore.FindByRefCaseInsensitive(ctx, rootIdentifier)
if err != nil {
log.Ctx(ctx).Error().Msgf("Root spaceID not found: %s", rootIdentifier)
return pkg.ArtifactInfo{}, usererror.NotFoundf("Root not found: %s", rootIdentifier)
}
rootSpace, err := h.SpaceFinder.FindByID(ctx, rootSpaceID)
if err != nil {
log.Ctx(ctx).Error().Msgf("Root space not found: %d", rootSpaceID)
return pkg.ArtifactInfo{}, usererror.NotFoundf("Root not found: %s", rootIdentifier)
}
registry, err := h.RegFinder.FindByRootParentID(ctx, rootSpaceID, registryIdentifier)
if err != nil {
log.Ctx(ctx).Error().Msgf(
"registry %s not found for root: %s. Reason: %s", registryIdentifier, rootSpace.Identifier, err,
)
return pkg.ArtifactInfo{}, usererror.NotFoundf("Registry not found: %s", registryIdentifier)
}
_, err = h.SpaceFinder.FindByID(r.Context(), registry.ParentID)
if err != nil {
log.Ctx(ctx).Error().Msgf("Parent space not found: %d", registry.ParentID)
return pkg.ArtifactInfo{}, usererror.NotFoundf("Parent not found for registry: %s", registryIdentifier)
}
return pkg.ArtifactInfo{
BaseInfo: &pkg.BaseInfo{
RootIdentifier: rootIdentifier,
RootParentID: rootSpace.ID,
ParentID: registry.ParentID,
},
RegIdentifier: registryIdentifier,
RegistryID: registry.ID,
Registry: *registry,
}, nil
}
func (h *handler) HandleErrors2(ctx context.Context, err errcode.Error, w http.ResponseWriter) {
if !commons.IsEmptyError(err) {
w.WriteHeader(err.Code.Descriptor().HTTPStatusCode)
_ = errcode.ServeJSON(w, err)
log.Ctx(ctx).Error().Msgf("Error occurred while performing artifact action: %s", err.Message)
}
}
// HandleErrors TODO: Improve Error Handling
// HandleErrors handles errors and writes the appropriate response to the client.
func (h *handler) HandleErrors(ctx context.Context, errs errcode.Errors, w http.ResponseWriter) {
if !commons.IsEmpty(errs) {
LogError(errs)
log.Ctx(ctx).Error().Errs("errs occurred during artifact operation: ", errs).Msgf("Error occurred")
err := errs[0]
var e *commons.Error
if errors.As(err, &e) {
code := e.Status
w.WriteHeader(code)
} else {
w.WriteHeader(http.StatusInternalServerError)
}
w.Header().Set("Content-Type", "application/json")
err = json.NewEncoder(w).Encode(errs)
if err != nil {
log.Ctx(ctx).Error().Err(err).Msgf("Error occurred during artifact error encoding")
}
}
}
func (h *handler) HandleError(ctx context.Context, w http.ResponseWriter, err error) {
if nil != err {
log.Error().Err(err).Ctx(ctx).Msgf("error: %v", err)
render.TranslatedUserError(ctx, w, err)
return
}
}
func LogError(errList errcode.Errors) {
for _, e1 := range errList {
log.Error().Err(e1).Msgf("error: %v", e1)
}
}
// extractPathVars extracts rootSpace, registryId, pathPackageType from the path
// Path format: /pkg/:rootSpace/:registry/:pathPackageType/...
func extractPathVars(r *http.Request) (
rootIdentifier string,
registry string,
pathPackageType PathPackageType,
err error,
) {
path := r.URL.Path
parts := strings.Split(path, "/")
if len(parts) < 5 {
return "", "", "", fmt.Errorf("invalid path: %s", path)
}
rootIdentifier = parts[2]
registry = parts[3]
pathPackageType = PathPackageType(parts[4])
return rootIdentifier, registry, pathPackageType, nil
}
func (h *handler) ServeContent(
w http.ResponseWriter, r *http.Request, fileReader *storage.FileReader, filename string,
) {
if fileReader != nil {
http.ServeContent(w, r, filename, time.Time{}, fileReader)
}
}
func (h *handler) GetPackageArtifactInfo(r *http.Request) (pkg.PackageArtifactInfo, error) {
info, err := h.GetUtilityMethodArtifactInfo(r)
if err != nil {
return nil, err
}
return commons2.ArtifactInfo{
ArtifactInfo: info,
}, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/download_package_index_test.go | registry/app/api/handler/cargo/download_package_index_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo_test
import (
"bytes"
"context"
"errors"
"io"
"net/http"
"net/http/httptest"
"testing"
cargo "github.com/harness/gitness/registry/app/api/controller/pkg/cargo"
cargopkg "github.com/harness/gitness/registry/app/api/handler/cargo"
"github.com/harness/gitness/registry/app/pkg/commons"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
const (
failedToFetchInfoFromContext = "failed to fetch info from context"
)
// --- Tests ---.
func TestDownloadPackageIndex_ServeContent(t *testing.T) {
// Arrange
body := []byte(`{"name": "test-package", "vers": "1.0.0"}`)
resp := &cargo.GetPackageIndexResponse{
DownloadFileResponse: cargo.DownloadFileResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{
"Content-Type": "application/json; charset=utf-8",
},
Code: http.StatusOK,
},
},
RedirectURL: "",
Body: nil,
ReadCloser: io.NopCloser(bytes.NewReader(body)),
},
}
mockCtrl := new(mockController)
handler := cargopkg.NewHandler(mockCtrl, &fakePackagesHandler{})
info := &cargotype.ArtifactInfo{FileName: "index"}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("DownloadPackageIndex", ctx, info, "test/path").Return(resp)
req := httptest.NewRequest(http.MethodGet, "/cargo/index/test/path", nil).WithContext(ctx)
// Mock PathValue method
req.SetPathValue("*", "test/path")
w := httptest.NewRecorder()
// Act
handler.DownloadPackageIndex(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusOK, result.StatusCode)
require.Equal(t, "application/json; charset=utf-8", result.Header.Get("Content-Type"))
data, _ := io.ReadAll(result.Body)
require.Equal(t, body, data)
mockCtrl.AssertExpectations(t)
}
func TestDownloadPackageIndex_Redirect(t *testing.T) {
// Arrange
resp := &cargo.GetPackageIndexResponse{
DownloadFileResponse: cargo.DownloadFileResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: nil,
},
RedirectURL: "https://example.com/index/test/path",
Body: nil,
ReadCloser: nil,
},
}
mockCtrl := new(mockController)
handler := cargopkg.NewHandler(mockCtrl, &fakePackagesHandler{})
info := &cargotype.ArtifactInfo{FileName: "index"}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("DownloadPackageIndex", ctx, info, "test/path").Return(resp)
req := httptest.NewRequest(http.MethodGet, "/cargo/index/test/path", nil).WithContext(ctx)
req.SetPathValue("*", "test/path")
w := httptest.NewRecorder()
// Act
handler.DownloadPackageIndex(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusTemporaryRedirect, result.StatusCode)
require.Equal(t, "https://example.com/index/test/path", result.Header.Get("Location"))
mockCtrl.AssertExpectations(t)
}
func TestDownloadPackageIndex_ErrorFromController(t *testing.T) {
// Arrange
resp := &cargo.GetPackageIndexResponse{
DownloadFileResponse: cargo.DownloadFileResponse{
BaseResponse: cargo.BaseResponse{
Error: errors.New("index not found"),
ResponseHeaders: nil,
},
RedirectURL: "",
Body: nil,
ReadCloser: nil,
},
}
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "index"}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("DownloadPackageIndex", ctx, info, "test/path").Return(resp)
mockPkgHandler.On("HandleError", ctx, mock.Anything, mock.AnythingOfType("*errors.errorString"))
req := httptest.NewRequest(http.MethodGet, "/cargo/index/test/path", nil).WithContext(ctx)
req.SetPathValue("*", "test/path")
w := httptest.NewRecorder()
// Act
handler.DownloadPackageIndex(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
func TestDownloadPackageIndex_InvalidArtifactInfo(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
// Setup expectations - handleCargoPackageAPIError calls HandleErrors internally
mockPkgHandler.On("HandleErrors", mock.Anything, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == failedToFetchInfoFromContext
}), mock.Anything)
// Create request with invalid context (no artifact info)
req := httptest.NewRequest(http.MethodGet, "/cargo/index/test/path", nil)
req.SetPathValue("*", "test/path")
w := httptest.NewRecorder()
// Act
handler.DownloadPackageIndex(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockPkgHandler.AssertExpectations(t)
}
func TestDownloadPackageIndex_ControllerReturnsNil(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "index"}
ctx := request.WithArtifactInfo(context.Background(), info)
// Setup expectations
mockCtrl.On("DownloadPackageIndex", ctx, info, "test/path").Return(nil)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == "failed to get response from controller"
}), mock.Anything)
req := httptest.NewRequest(http.MethodGet, "/cargo/index/test/path", nil).WithContext(ctx)
req.SetPathValue("*", "test/path")
w := httptest.NewRecorder()
// Act
handler.DownloadPackageIndex(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
func TestDownloadPackageIndex_ServeContentError(t *testing.T) {
// Arrange
resp := &cargo.GetPackageIndexResponse{
DownloadFileResponse: cargo.DownloadFileResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{},
Code: http.StatusOK,
},
},
RedirectURL: "",
Body: nil,
ReadCloser: nil, // This will cause ServeContent to fail
},
}
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "index"}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("DownloadPackageIndex", ctx, info, "test/path").Return(resp)
mockPkgHandler.On("HandleError", ctx, mock.Anything, mock.AnythingOfType("*errors.errorString"))
req := httptest.NewRequest(http.MethodGet, "/cargo/index/test/path", nil).WithContext(ctx)
req.SetPathValue("*", "test/path")
w := httptest.NewRecorder()
// Act
handler.DownloadPackageIndex(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
// Status code is 200 because headers are written before ServeContent fails
require.Equal(t, http.StatusOK, result.StatusCode)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/handler_test.go | registry/app/api/handler/cargo/handler_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo_test
import (
"context"
"errors"
"net/http"
"net/http/httptest"
"testing"
cargopkg "github.com/harness/gitness/registry/app/api/handler/cargo"
"github.com/harness/gitness/registry/app/pkg"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/types"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
// isExpectedError checks if the error matches the expected test error.
func isExpectedError(err error, testError error) bool {
return errors.Is(err, testError)
}
func TestNewHandler(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
// Act
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
// Assert
require.NotNil(t, handler)
}
func TestHandler_GetPackageArtifactInfo_Success(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
baseInfo := pkg.ArtifactInfo{
Registry: types.Registry{
Name: "test-registry",
},
Image: "test-image",
}
mockPkgHandler.On("GetArtifactInfo", mock.Anything).Return(baseInfo, nil)
req := httptest.NewRequest(http.MethodGet, "/cargo/test", nil)
req.SetPathValue("name", "test-package")
req.SetPathValue("version", "1.0.0")
// Act
result, err := handler.GetPackageArtifactInfo(req)
// Assert
require.NoError(t, err)
require.NotNil(t, result)
cargoInfo, ok := result.(*cargotype.ArtifactInfo)
require.True(t, ok)
require.Equal(t, "test-package", cargoInfo.Image)
require.Equal(t, "1.0.0", cargoInfo.Version)
require.Equal(t, "test-registry", cargoInfo.Registry.Name)
mockPkgHandler.AssertExpectations(t)
}
func TestHandler_GetPackageArtifactInfo_Error(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
expectedError := errors.New("failed to get artifact info")
mockPkgHandler.On("GetArtifactInfo", mock.Anything).Return(nil, expectedError)
req := httptest.NewRequest(http.MethodGet, "/cargo/test", nil)
req.SetPathValue("name", "test-package")
req.SetPathValue("version", "1.0.0")
// Act
result, err := handler.GetPackageArtifactInfo(req)
// Assert
require.Error(t, err)
require.Nil(t, result)
require.True(t, isExpectedError(err, expectedError))
mockPkgHandler.AssertExpectations(t)
}
func TestHandler_HandleCargoPackageAPIError(t *testing.T) {
// Arrange
mockPkgHandler := &fakePackagesHandler{}
testError := errors.New("test error")
ctx := context.Background()
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errors.Is(errs[0], testError)
}), mock.Anything)
w := httptest.NewRecorder()
// Act - call HandleErrors directly on the mock since the handler delegates to it
mockPkgHandler.HandleErrors(ctx, []error{testError}, w)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockPkgHandler.AssertExpectations(t)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/regenerate_package_index_test.go | registry/app/api/handler/cargo/regenerate_package_index_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo_test
import (
"context"
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"testing"
cargo "github.com/harness/gitness/registry/app/api/controller/pkg/cargo"
cargopkg "github.com/harness/gitness/registry/app/api/handler/cargo"
"github.com/harness/gitness/registry/app/pkg/commons"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestRegeneratePackageIndex_Success(t *testing.T) {
// Arrange
resp := &cargo.RegeneratePackageIndexResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{
"Content-Type": "application/json",
},
Code: http.StatusOK,
},
},
Ok: true,
}
mockCtrl := new(mockController)
handler := cargopkg.NewHandler(mockCtrl, &fakePackagesHandler{})
info := &cargotype.ArtifactInfo{FileName: "test.txt"}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("RegeneratePackageIndex", ctx, info).Return(resp, nil)
req := httptest.NewRequest(http.MethodPost, "/cargo/regenerate", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.RegeneratePackageIndex(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusOK, result.StatusCode)
require.Equal(t, "application/json", result.Header.Get("Content-Type"))
var response cargo.RegeneratePackageIndexResponse
err := json.NewDecoder(result.Body).Decode(&response)
require.NoError(t, err)
require.True(t, response.Ok)
mockCtrl.AssertExpectations(t)
}
func TestRegeneratePackageIndex_InvalidArtifactInfo(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
// Context without proper artifact info
ctx := context.Background()
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == failedToFetchInfoFromContext
}), mock.Anything)
req := httptest.NewRequest(http.MethodPost, "/cargo/regenerate", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.RegeneratePackageIndex(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockPkgHandler.AssertExpectations(t)
}
func TestRegeneratePackageIndex_ControllerError(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "test.txt"}
ctx := request.WithArtifactInfo(context.Background(), info)
expectedError := errors.New("controller error")
mockCtrl.On("RegeneratePackageIndex", ctx, info).Return(nil, expectedError)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == "failed to get response from controller"
}), mock.Anything)
req := httptest.NewRequest(http.MethodPost, "/cargo/regenerate", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.RegeneratePackageIndex(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
func TestRegeneratePackageIndex_JSONEncodingError(t *testing.T) {
// Arrange
resp := &cargo.RegeneratePackageIndexResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{},
Code: http.StatusOK,
},
},
Ok: true,
}
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "test.txt"}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("RegeneratePackageIndex", ctx, info).Return(resp, nil)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1
}), mock.Anything)
req := httptest.NewRequest(http.MethodPost, "/cargo/regenerate", nil).WithContext(ctx)
// Create a ResponseWriter that fails on Write to simulate JSON encoding error
w := &failingResponseWriter{
ResponseRecorder: httptest.NewRecorder(),
shouldFail: true,
}
// Act
handler.RegeneratePackageIndex(w, req)
// Assert - JSON encoding error is handled but status code is already written
require.Equal(t, http.StatusOK, w.Code)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
// failingResponseWriter simulates a ResponseWriter that fails on Write.
type failingResponseWriter struct {
*httptest.ResponseRecorder
shouldFail bool
}
func (f *failingResponseWriter) Write(data []byte) (int, error) {
if f.shouldFail {
return 0, errors.New("write failed")
}
return f.ResponseRecorder.Write(data)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/search_package.go | registry/app/api/handler/cargo/search_package.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo
import (
"encoding/json"
"fmt"
"net/http"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
"github.com/oapi-codegen/runtime"
)
func (h *handler) SearchPackage(
w http.ResponseWriter, r *http.Request,
) {
ctx := r.Context()
info, ok := request.ArtifactInfoFrom(ctx).(*cargotype.ArtifactInfo)
if !ok {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to fetch info from context"))
return
}
requestInfo, err := h.getSearchPackageParams(r)
if err != nil {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to get search package params: %w", err))
return
}
response, err := h.controller.SearchPackage(ctx, info, requestInfo)
if err != nil {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to get response from controller: %w", err))
return
}
response.ResponseHeaders.WriteHeadersToResponse(w)
w.Header().Set("Content-Type", "text/json; charset=utf-8")
err = json.NewEncoder(w).Encode(response)
if err != nil {
h.handleCargoPackageAPIError(w, r,
fmt.Errorf("error occurred during sending response for search cargo package: %w", err),
)
}
}
func (h *handler) getSearchPackageParams(r *http.Request) (*cargotype.SearchPackageRequestParams, error) {
var params cargotype.SearchPackageRequestParams
err := runtime.BindQueryParameter("form", true, false, "q", r.URL.Query(), ¶ms.SearchTerm)
if err != nil {
return nil, fmt.Errorf("invalid format for parameter %s: %w", "q", err)
}
err = runtime.BindQueryParameter("form", true, false, "per_page", r.URL.Query(), ¶ms.Size)
if err != nil {
return nil, fmt.Errorf("invalid format for parameter %s: %w", "per_page", err)
}
return ¶ms, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/un_yank_version.go | registry/app/api/handler/cargo/un_yank_version.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo
import (
"encoding/json"
"fmt"
"net/http"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
)
func (h *handler) UnYankVersion(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
info, ok := request.ArtifactInfoFrom(ctx).(*cargotype.ArtifactInfo)
if !ok {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to fetch info from context"))
return
}
response, err := h.controller.UpdateYank(ctx, info, false)
if err != nil {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to unyank version: %w", err))
return
}
// Final response
response.ResponseHeaders.Headers["Content-Type"] = "application/json"
response.ResponseHeaders.WriteToResponse(w)
err = json.NewEncoder(w).Encode(response)
if err != nil {
h.handleCargoPackageAPIError(w, r,
fmt.Errorf("error occurred during sending response for unyank version for cargo package: %w", err),
)
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/upload_test.go | registry/app/api/handler/cargo/upload_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo_test
import (
"bytes"
"context"
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"testing"
cargo "github.com/harness/gitness/registry/app/api/controller/pkg/cargo"
cargopkg "github.com/harness/gitness/registry/app/api/handler/cargo"
cargometadata "github.com/harness/gitness/registry/app/metadata/cargo"
"github.com/harness/gitness/registry/app/pkg/commons"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestUploadPackage_Success(t *testing.T) {
// Arrange
resp := &cargo.UploadArtifactResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{
"Content-Type": "application/json",
},
Code: http.StatusOK,
},
},
Warnings: &cargo.UploadArtifactWarnings{},
}
mockCtrl := new(mockController)
handler := cargopkg.NewHandler(mockCtrl, &fakePackagesHandler{})
info := &cargotype.ArtifactInfo{FileName: "test-crate-1.0.0.crate"}
ctx := request.WithArtifactInfo(context.Background(), info)
// Create a simple cargo package payload with metadata and file
metadata := &cargometadata.VersionMetadata{
Name: "test-crate",
Version: "1.0.0",
}
// Mock the parseDataFromPayload method by creating a simple payload
payload := createMockCargoPayload(t, metadata, []byte("mock crate file content"))
mockCtrl.On("UploadPackage", ctx, info, mock.AnythingOfType("*cargo.VersionMetadata"), mock.Anything).Return(resp, nil)
req := httptest.NewRequest(http.MethodPut, "/cargo/upload", bytes.NewReader(payload)).WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
w := httptest.NewRecorder()
// Act
handler.UploadPackage(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusOK, result.StatusCode)
require.Equal(t, "application/json", result.Header.Get("Content-Type"))
var response cargo.UploadArtifactResponse
err := json.NewDecoder(result.Body).Decode(&response)
require.NoError(t, err)
mockCtrl.AssertExpectations(t)
}
func TestUploadPackage_InvalidArtifactInfo(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
// Context without proper artifact info
ctx := context.Background()
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == failedToFetchInfoFromContext
}), mock.Anything)
req := httptest.NewRequest(http.MethodPut, "/cargo/upload", bytes.NewReader([]byte("test"))).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.UploadPackage(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockPkgHandler.AssertExpectations(t)
}
func TestUploadPackage_InvalidPayload(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "test-crate-1.0.0.crate"}
ctx := request.WithArtifactInfo(context.Background(), info)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1
}), mock.Anything)
// Invalid payload (too short to contain proper cargo format)
req := httptest.NewRequest(http.MethodPut, "/cargo/upload", bytes.NewReader([]byte("invalid"))).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.UploadPackage(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockPkgHandler.AssertExpectations(t)
}
func TestUploadPackage_ControllerError(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "test-crate-1.0.0.crate"}
ctx := request.WithArtifactInfo(context.Background(), info)
metadata := &cargometadata.VersionMetadata{
Name: "test-crate",
Version: "1.0.0",
}
payload := createMockCargoPayload(t, metadata, []byte("mock crate file content"))
expectedError := errors.New("controller error")
mockCtrl.On("UploadPackage", ctx, info,
mock.AnythingOfType("*cargo.VersionMetadata"), mock.Anything).Return(nil, expectedError)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == "failed to upload package: controller error"
}), mock.Anything)
req := httptest.NewRequest(http.MethodPut, "/cargo/upload", bytes.NewReader(payload)).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.UploadPackage(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
func TestUploadPackage_JSONEncodingError(t *testing.T) {
// Arrange
resp := &cargo.UploadArtifactResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{},
Code: http.StatusOK,
},
},
Warnings: &cargo.UploadArtifactWarnings{},
}
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "test-crate-1.0.0.crate"}
ctx := request.WithArtifactInfo(context.Background(), info)
metadata := &cargometadata.VersionMetadata{
Name: "test-crate",
Version: "1.0.0",
}
payload := createMockCargoPayload(t, metadata, []byte("mock crate file content"))
mockCtrl.On("UploadPackage", ctx, info, mock.AnythingOfType("*cargo.VersionMetadata"), mock.Anything).Return(resp, nil)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1
}), mock.Anything)
req := httptest.NewRequest(http.MethodPut, "/cargo/upload", bytes.NewReader(payload)).WithContext(ctx)
// Create a ResponseWriter that fails on Write to simulate JSON encoding error
w := &failingResponseWriter{
ResponseRecorder: httptest.NewRecorder(),
shouldFail: true,
}
// Act
handler.UploadPackage(w, req)
// Assert - JSON encoding error occurs but status was already written as 200 by WriteToResponse
require.Equal(t, http.StatusOK, w.Code)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
// Helper function to create a mock cargo payload.
func createMockCargoPayload(t *testing.T, metadata *cargometadata.VersionMetadata, crateData []byte) []byte {
// Simplified cargo format: metadata_len (4 bytes) + metadata + crate_len (4 bytes) + crate_data
metadataJSON, err := json.Marshal(metadata)
require.NoError(t, err)
var buf bytes.Buffer
// Write metadata length (little endian)
metadataJSONLen := len(metadataJSON)
if metadataJSONLen > 0xFFFFFFFF {
t.Fatalf("metadata too large: %d bytes", metadataJSONLen)
}
metadataLen := uint32(metadataJSONLen)
buf.WriteByte(byte(metadataLen))
buf.WriteByte(byte(metadataLen >> 8))
buf.WriteByte(byte(metadataLen >> 16))
buf.WriteByte(byte(metadataLen >> 24))
// Write metadata
buf.Write(metadataJSON)
// Write crate data length (little endian)
crateDataLen := len(crateData)
if crateDataLen > 0xFFFFFFFF {
t.Fatalf("crate data too large: %d bytes", crateDataLen)
}
crateLen := uint32(crateDataLen)
buf.WriteByte(byte(crateLen))
buf.WriteByte(byte(crateLen >> 8))
buf.WriteByte(byte(crateLen >> 16))
buf.WriteByte(byte(crateLen >> 24))
// Write crate data
buf.Write(crateData)
return buf.Bytes()
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/registry_config.go | registry/app/api/handler/cargo/registry_config.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo
import (
"encoding/json"
"fmt"
"net/http"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
)
func (h *handler) GetRegistryConfig(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
info, ok := request.ArtifactInfoFrom(ctx).(*cargotype.ArtifactInfo)
if !ok {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to fetch info from context"))
return
}
response, err := h.controller.GetRegistryConfig(ctx, info)
if err != nil {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to fetch registry config %w", err))
return
}
response.ResponseHeaders.WriteHeadersToResponse(w)
w.Header().Set("Content-Type", "text/json; charset=utf-8")
err = json.NewEncoder(w).Encode(response.Config)
if err != nil {
h.handleCargoPackageAPIError(w, r,
fmt.Errorf("error occurred during sending response for get registry config for cargo package: %w", err),
)
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/download_package_test.go | registry/app/api/handler/cargo/download_package_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo_test
import (
"bytes"
"context"
"errors"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/harness/gitness/app/auth/authn"
cargo "github.com/harness/gitness/registry/app/api/controller/pkg/cargo"
cargopkg "github.com/harness/gitness/registry/app/api/handler/cargo"
"github.com/harness/gitness/registry/app/dist_temp/errcode"
cargometadata "github.com/harness/gitness/registry/app/metadata/cargo"
"github.com/harness/gitness/registry/app/pkg"
"github.com/harness/gitness/registry/app/pkg/commons"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/app/storage"
"github.com/harness/gitness/registry/request"
"github.com/harness/gitness/types/enum"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
// --- Mock Controller ---.
type mockController struct {
mock.Mock
}
func (m *mockController) DownloadPackage(ctx context.Context, info *cargotype.ArtifactInfo) *cargo.GetPackageResponse {
args := m.Called(ctx, info)
if resp := args.Get(0); resp != nil {
if typedResp, ok := resp.(*cargo.GetPackageResponse); ok {
return typedResp
}
}
return nil
}
func (m *mockController) DownloadPackageIndex(ctx context.Context,
info *cargotype.ArtifactInfo, path string) *cargo.GetPackageIndexResponse {
args := m.Called(ctx, info, path)
if resp := args.Get(0); resp != nil {
if typedResp, ok := resp.(*cargo.GetPackageIndexResponse); ok {
return typedResp
}
}
return nil
}
func (m *mockController) GetRegistryConfig(ctx context.Context,
info *cargotype.ArtifactInfo) (*cargo.GetRegistryConfigResponse, error) {
args := m.Called(ctx, info)
if args.Error(1) != nil {
return nil, args.Error(1)
}
if args.Get(0) == nil {
return nil, args.Error(1)
}
if resp, ok := args.Get(0).(*cargo.GetRegistryConfigResponse); ok {
return resp, args.Error(1)
}
return nil, args.Error(1)
}
func (m *mockController) SearchPackage(ctx context.Context, info *cargotype.ArtifactInfo,
requestInfo *cargotype.SearchPackageRequestParams) (*cargo.SearchPackageResponse, error) {
args := m.Called(ctx, info, requestInfo)
if args.Error(1) != nil {
return nil, args.Error(1)
}
if args.Get(0) == nil {
return nil, args.Error(1)
}
if resp, ok := args.Get(0).(*cargo.SearchPackageResponse); ok {
return resp, args.Error(1)
}
return nil, args.Error(1)
}
func (m *mockController) RegeneratePackageIndex(ctx context.Context,
info *cargotype.ArtifactInfo) (*cargo.RegeneratePackageIndexResponse, error) {
args := m.Called(ctx, info)
if args.Error(1) != nil {
return nil, args.Error(1)
}
if args.Get(0) == nil {
return nil, args.Error(1)
}
if resp, ok := args.Get(0).(*cargo.RegeneratePackageIndexResponse); ok {
return resp, args.Error(1)
}
return nil, args.Error(1)
}
func (m *mockController) UploadPackage(ctx context.Context, info *cargotype.ArtifactInfo,
metadata *cargometadata.VersionMetadata, fileReader io.ReadCloser) (*cargo.UploadArtifactResponse, error) {
args := m.Called(ctx, info, metadata, fileReader)
if args.Error(1) != nil {
return nil, args.Error(1)
}
if args.Get(0) == nil {
return nil, args.Error(1)
}
if resp, ok := args.Get(0).(*cargo.UploadArtifactResponse); ok {
return resp, args.Error(1)
}
return nil, args.Error(1)
}
func (m *mockController) UpdateYank(ctx context.Context, info *cargotype.ArtifactInfo,
yank bool) (*cargo.UpdateYankResponse, error) {
args := m.Called(ctx, info, yank)
if args.Error(1) != nil {
return nil, args.Error(1)
}
if args.Get(0) == nil {
return nil, args.Error(1)
}
if resp, ok := args.Get(0).(*cargo.UpdateYankResponse); ok {
return resp, args.Error(1)
}
return nil, args.Error(1)
}
// --- Fake packages.Handler ---.
type fakePackagesHandler struct {
mock.Mock
}
func (f *fakePackagesHandler) GetRegistryCheckAccess(ctx context.Context, r *http.Request,
reqPermissions ...enum.Permission) error {
args := f.Called(ctx, r, reqPermissions)
return args.Error(0)
}
func (f *fakePackagesHandler) GetArtifactInfo(r *http.Request) (pkg.ArtifactInfo, error) {
args := f.Called(r)
if args.Error(1) != nil {
return pkg.ArtifactInfo{}, args.Error(1)
}
if args.Error(1) != nil {
return pkg.ArtifactInfo{}, args.Error(1)
}
if info, ok := args.Get(0).(pkg.ArtifactInfo); ok {
return info, args.Error(1)
}
return pkg.ArtifactInfo{}, args.Error(1)
}
func (f *fakePackagesHandler) DownloadFile(w http.ResponseWriter, r *http.Request) {
f.Called(w, r)
}
func (f *fakePackagesHandler) TrackDownloadStats(ctx context.Context, r *http.Request) error {
args := f.Called(ctx, r)
return args.Error(0)
}
func (f *fakePackagesHandler) GetPackageArtifactInfo(r *http.Request) (pkg.PackageArtifactInfo, error) {
args := f.Called(r)
if args.Error(1) != nil {
return nil, args.Error(1)
}
if info, ok := args.Get(0).(pkg.PackageArtifactInfo); ok {
return info, args.Error(1)
}
return nil, args.Error(1)
}
func (f *fakePackagesHandler) CheckQuarantineStatus(ctx context.Context) error {
args := f.Called(ctx)
return args.Error(0)
}
func (f *fakePackagesHandler) GetAuthenticator() authn.Authenticator {
args := f.Called()
if resp := args.Get(0); resp != nil {
if auth, ok := resp.(authn.Authenticator); ok {
return auth
}
}
return nil
}
func (f *fakePackagesHandler) HandleErrors2(ctx context.Context, errors errcode.Error, w http.ResponseWriter) {
f.Called(ctx, errors, w)
}
func (f *fakePackagesHandler) HandleErrors(ctx context.Context, errors errcode.Errors, w http.ResponseWriter) {
f.Called(ctx, errors, w)
// Actually write an error status code to simulate real error handling
w.WriteHeader(http.StatusInternalServerError)
}
func (f *fakePackagesHandler) HandleError(ctx context.Context, w http.ResponseWriter, err error) {
f.Called(ctx, w, err)
// Actually write an error status code to simulate real error handling
w.WriteHeader(http.StatusInternalServerError)
}
func (f *fakePackagesHandler) HandleCargoPackageAPIError(w http.ResponseWriter, r *http.Request, err error) {
f.Called(w, r, err)
// Actually write an error status code to simulate real error handling
w.WriteHeader(http.StatusInternalServerError)
}
func (f *fakePackagesHandler) ServeContent(w http.ResponseWriter, r *http.Request,
fileReader *storage.FileReader, filename string) {
f.Called(w, r, fileReader, filename)
}
// --- Tests ---.
func TestDownloadPackage_ServeContent(t *testing.T) {
// Arrange
body := []byte("hello package")
resp := &cargo.GetPackageResponse{
DownloadFileResponse: cargo.DownloadFileResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{
"Content-Type": "text/plain; charset=utf-8",
},
Code: http.StatusOK,
},
},
RedirectURL: "",
Body: nil,
ReadCloser: io.NopCloser(bytes.NewReader(body)),
},
}
mockCtrl := new(mockController)
handler := cargopkg.NewHandler(mockCtrl, &fakePackagesHandler{})
info := &cargotype.ArtifactInfo{FileName: "test.txt"}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("DownloadPackage", ctx, info).Return(resp)
req := httptest.NewRequest(http.MethodGet, "/cargo/download", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.DownloadPackage(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusOK, result.StatusCode)
require.Equal(t, "text/plain; charset=utf-8", result.Header.Get("Content-Type"))
data, _ := io.ReadAll(result.Body)
require.Equal(t, body, data)
mockCtrl.AssertExpectations(t)
}
func TestDownloadPackage_Redirect(t *testing.T) {
// Arrange
resp := &cargo.GetPackageResponse{
DownloadFileResponse: cargo.DownloadFileResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: nil,
},
RedirectURL: "https://example.com/pkg",
Body: nil,
ReadCloser: nil,
},
}
mockCtrl := new(mockController)
handler := cargopkg.NewHandler(mockCtrl, &fakePackagesHandler{})
info := &cargotype.ArtifactInfo{FileName: "test.txt"}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("DownloadPackage", ctx, info).Return(resp)
req := httptest.NewRequest(http.MethodGet, "/cargo/download", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.DownloadPackage(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusTemporaryRedirect, result.StatusCode)
require.Equal(t, "https://example.com/pkg", result.Header.Get("Location"))
mockCtrl.AssertExpectations(t)
}
func TestDownloadPackage_ErrorFromController(t *testing.T) {
// Arrange
resp := &cargo.GetPackageResponse{
DownloadFileResponse: cargo.DownloadFileResponse{
BaseResponse: cargo.BaseResponse{
Error: errors.New("something went wrong"),
ResponseHeaders: nil,
},
RedirectURL: "",
Body: nil,
ReadCloser: nil,
},
}
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "test.txt"}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("DownloadPackage", ctx, info).Return(resp)
mockPkgHandler.On("HandleError", ctx, mock.Anything, mock.AnythingOfType("*errors.errorString"))
req := httptest.NewRequest(http.MethodGet, "/cargo/download", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.DownloadPackage(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/yank_version_test.go | registry/app/api/handler/cargo/yank_version_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo_test
import (
"context"
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"testing"
cargo "github.com/harness/gitness/registry/app/api/controller/pkg/cargo"
cargopkg "github.com/harness/gitness/registry/app/api/handler/cargo"
"github.com/harness/gitness/registry/app/pkg/commons"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestYankVersion_Success(t *testing.T) {
// Arrange
resp := &cargo.UpdateYankResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{},
Code: http.StatusOK,
},
},
Ok: true,
}
mockCtrl := new(mockController)
handler := cargopkg.NewHandler(mockCtrl, &fakePackagesHandler{})
info := &cargotype.ArtifactInfo{
FileName: "test-crate",
Version: "1.0.0",
}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("UpdateYank", ctx, info, true).Return(resp, nil)
req := httptest.NewRequest(http.MethodDelete, "/cargo/yank", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.YankVersion(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusOK, result.StatusCode)
require.Equal(t, "application/json", result.Header.Get("Content-Type"))
var response cargo.UpdateYankResponse
err := json.NewDecoder(result.Body).Decode(&response)
require.NoError(t, err)
require.True(t, response.Ok)
mockCtrl.AssertExpectations(t)
}
func TestYankVersion_InvalidArtifactInfo(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
// Context without proper artifact info
ctx := context.Background()
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == failedToFetchInfoFromContext
}), mock.Anything)
req := httptest.NewRequest(http.MethodDelete, "/cargo/yank", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.YankVersion(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockPkgHandler.AssertExpectations(t)
}
func TestYankVersion_ControllerError(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{
FileName: "test-crate",
Version: "1.0.0",
}
ctx := request.WithArtifactInfo(context.Background(), info)
expectedError := errors.New("controller error")
mockCtrl.On("UpdateYank", ctx, info, true).Return(nil, expectedError)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == "failed to yank version: controller error"
}), mock.Anything)
req := httptest.NewRequest(http.MethodDelete, "/cargo/yank", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.YankVersion(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
func TestYankVersion_JSONEncodingError(t *testing.T) {
// Arrange
resp := &cargo.UpdateYankResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{},
Code: http.StatusOK,
},
},
Ok: true,
}
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{
FileName: "test-crate",
Version: "1.0.0",
}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("UpdateYank", ctx, info, true).Return(resp, nil)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1
}), mock.Anything)
req := httptest.NewRequest(http.MethodDelete, "/cargo/yank", nil).WithContext(ctx)
// Create a ResponseWriter that fails on Write to simulate JSON encoding error
w := &failingResponseWriter{
ResponseRecorder: httptest.NewRecorder(),
shouldFail: true,
}
// Act
handler.YankVersion(w, req)
// Assert - JSON encoding error is handled but status code is already written
require.Equal(t, http.StatusOK, w.Code)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/download_package_index.go | registry/app/api/handler/cargo/download_package_index.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo
import (
"fmt"
"net/http"
"github.com/harness/gitness/registry/app/pkg/commons"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
"github.com/rs/zerolog/log"
)
func (h *handler) DownloadPackageIndex(
w http.ResponseWriter, r *http.Request,
) {
ctx := r.Context()
info, ok := request.ArtifactInfoFrom(ctx).(*cargotype.ArtifactInfo)
if !ok {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to fetch info from context"))
return
}
path := r.PathValue("*")
response := h.controller.DownloadPackageIndex(ctx, info, path)
if response == nil {
h.HandleErrors(ctx, []error{fmt.Errorf("failed to get response from controller")}, w)
return
}
defer func() {
if response.Body != nil {
err := response.Body.Close()
if err != nil {
log.Ctx(ctx).Error().Msgf("Failed to close body: %v", err)
}
}
if response.ReadCloser != nil {
err := response.ReadCloser.Close()
if err != nil {
log.Ctx(ctx).Error().Msgf("Failed to close read closer: %v", err)
}
}
}()
if response.GetError() != nil {
h.HandleError(ctx, w, response.GetError())
return
}
if response.RedirectURL != "" {
http.Redirect(w, r, response.RedirectURL, http.StatusTemporaryRedirect)
return
}
response.ResponseHeaders.WriteToResponse(w)
err := commons.ServeContent(w, r, response.Body, info.FileName, response.ReadCloser)
if err != nil {
log.Ctx(ctx).Error().Msgf("Failed to serve content: %v", err)
h.HandleError(ctx, w, err)
return
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/search_package_test.go | registry/app/api/handler/cargo/search_package_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo_test
import (
"context"
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"testing"
cargo "github.com/harness/gitness/registry/app/api/controller/pkg/cargo"
cargopkg "github.com/harness/gitness/registry/app/api/handler/cargo"
"github.com/harness/gitness/registry/app/pkg/commons"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
const (
testSearchTerm = "test"
)
func TestSearchPackage_Success(t *testing.T) {
// Arrange
searchResult := &cargo.SearchPackageResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{},
Code: http.StatusOK,
},
},
Crates: []cargo.SearchPackageResponseCrate{
{
Name: "test-crate",
MaxVersion: "1.0.0",
Description: "A test crate",
},
},
Metadata: cargo.SearchPackageResponseMetadata{
Total: 1,
},
}
mockCtrl := new(mockController)
handler := cargopkg.NewHandler(mockCtrl, &fakePackagesHandler{})
info := &cargotype.ArtifactInfo{FileName: "search"}
ctx := request.WithArtifactInfo(context.Background(), info)
searchTerm := testSearchTerm
size := int32(10)
expectedParams := &cargotype.SearchPackageRequestParams{
SearchTerm: &searchTerm,
Size: &size,
}
mockCtrl.On("SearchPackage", ctx, info, expectedParams).Return(searchResult, nil)
// Create request with query parameters
req := httptest.NewRequest(http.MethodGet, "/cargo/search?q=test&per_page=10", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.SearchPackage(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusOK, result.StatusCode)
require.Equal(t, "text/json; charset=utf-8", result.Header.Get("Content-Type"))
var response cargo.SearchPackageResponse
err := json.NewDecoder(result.Body).Decode(&response)
require.NoError(t, err)
require.Len(t, response.Crates, 1)
require.Equal(t, "test-crate", response.Crates[0].Name)
require.Equal(t, int64(1), response.Metadata.Total)
mockCtrl.AssertExpectations(t)
}
func TestSearchPackage_InvalidArtifactInfo(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
// Context without proper artifact info
ctx := context.Background()
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == failedToFetchInfoFromContext
}), mock.Anything)
req := httptest.NewRequest(http.MethodGet, "/cargo/search?q=test", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.SearchPackage(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockPkgHandler.AssertExpectations(t)
}
func TestSearchPackage_InvalidQueryParams(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "search"}
ctx := request.WithArtifactInfo(context.Background(), info)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1
}), mock.Anything)
// Create request with invalid per_page parameter
req := httptest.NewRequest(http.MethodGet, "/cargo/search?q=test&per_page=invalid", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.SearchPackage(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockPkgHandler.AssertExpectations(t)
}
func TestSearchPackage_ControllerError(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "search"}
ctx := request.WithArtifactInfo(context.Background(), info)
searchTerm := testSearchTerm
size := int32(10)
expectedParams := &cargotype.SearchPackageRequestParams{
SearchTerm: &searchTerm,
Size: &size,
}
expectedError := errors.New("controller error")
mockCtrl.On("SearchPackage", ctx, info, expectedParams).Return(nil, expectedError)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == "failed to get response from controller: controller error"
}), mock.Anything)
req := httptest.NewRequest(http.MethodGet, "/cargo/search?q=test&per_page=10", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.SearchPackage(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
func TestSearchPackage_JSONEncodingError(t *testing.T) {
// Arrange
searchResult := &cargo.SearchPackageResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{},
Code: http.StatusOK,
},
},
Crates: []cargo.SearchPackageResponseCrate{
{
Name: "test-crate",
MaxVersion: "1.0.0",
},
},
}
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "search"}
ctx := request.WithArtifactInfo(context.Background(), info)
searchTerm := testSearchTerm
size := int32(10)
expectedParams := &cargotype.SearchPackageRequestParams{
SearchTerm: &searchTerm,
Size: &size,
}
mockCtrl.On("SearchPackage", ctx, info, expectedParams).Return(searchResult, nil)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1
}), mock.Anything)
req := httptest.NewRequest(http.MethodGet, "/cargo/search?q=test&per_page=10", nil).WithContext(ctx)
// Create a ResponseWriter that fails on Write to simulate JSON encoding error
w := &failingResponseWriter{
ResponseRecorder: httptest.NewRecorder(),
shouldFail: true,
}
// Act
handler.SearchPackage(w, req)
// Assert - JSON encoding error causes 500 status since headers were already written
require.Equal(t, http.StatusInternalServerError, w.Code)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
func TestSearchPackage_EmptyQueryParams(t *testing.T) {
// Arrange
searchResult := &cargo.SearchPackageResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{},
Code: http.StatusOK,
},
},
Crates: []cargo.SearchPackageResponseCrate{},
Metadata: cargo.SearchPackageResponseMetadata{
Total: 0,
},
}
mockCtrl := new(mockController)
handler := cargopkg.NewHandler(mockCtrl, &fakePackagesHandler{})
info := &cargotype.ArtifactInfo{FileName: "search"}
ctx := request.WithArtifactInfo(context.Background(), info)
expectedParams := &cargotype.SearchPackageRequestParams{
SearchTerm: nil,
Size: nil,
}
mockCtrl.On("SearchPackage", ctx, info, expectedParams).Return(searchResult, nil)
req := httptest.NewRequest(http.MethodGet, "/cargo/search", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.SearchPackage(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusOK, result.StatusCode)
var response cargo.SearchPackageResponse
err := json.NewDecoder(result.Body).Decode(&response)
require.NoError(t, err)
require.Len(t, response.Crates, 0)
require.Equal(t, int64(0), response.Metadata.Total)
mockCtrl.AssertExpectations(t)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/upload.go | registry/app/api/handler/cargo/upload.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo
import (
"encoding/binary"
"encoding/json"
"fmt"
"io"
"net/http"
cargometadata "github.com/harness/gitness/registry/app/metadata/cargo"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
)
func (h *handler) UploadPackage(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
info, ok := request.ArtifactInfoFrom(ctx).(*cargotype.ArtifactInfo)
if !ok {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to fetch info from context"))
return
}
// parse metadata, and crate file from payload
metadata, fileReader, err := h.parseDataFromPayload(r.Body)
if err != nil {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to parse data from payload: %w", err))
return
}
info.Image = metadata.Name
info.Version = metadata.Version
response, err := h.controller.UploadPackage(ctx, info, metadata, fileReader)
if err != nil {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to upload package: %w", err))
return
}
// Final response
response.ResponseHeaders.WriteToResponse(w)
err = json.NewEncoder(w).Encode(response)
if err != nil {
h.handleCargoPackageAPIError(w, r,
fmt.Errorf("error occurred during sending response for upload package for cargo package: %w", err),
)
}
}
func (h *handler) parseDataFromPayload(
fileReader io.ReadCloser,
) (*cargometadata.VersionMetadata, io.ReadCloser, error) {
// Step 1: Read first 4 bytes to get JSON length
header := make([]byte, 4)
if _, err := io.ReadFull(fileReader, header); err != nil {
return nil, nil, fmt.Errorf("failed to read JSON length: %w", err)
}
jsonLen := binary.LittleEndian.Uint32(header)
// Step 2: Read the JSON metadata
jsonBuf := make([]byte, jsonLen)
if _, err := io.ReadFull(fileReader, jsonBuf); err != nil {
return nil, nil, fmt.Errorf("failed to read JSON metadata: %w", err)
}
var metadata *cargometadata.VersionMetadata
if err := json.Unmarshal(jsonBuf, &metadata); err != nil {
return nil, nil, fmt.Errorf("invalid JSON: %w", err)
}
// 3. Read 4 bytes: crate length
var crateLenBuf [4]byte
if _, err := io.ReadFull(fileReader, crateLenBuf[:]); err != nil {
return nil, nil, fmt.Errorf("failed to read crate length: %w", err)
}
crateLen := binary.LittleEndian.Uint32(crateLenBuf[:])
crateReader := io.LimitReader(fileReader, int64(crateLen))
metadata.Yanked = false // Ensure Yanked is false for new uploads
return metadata, io.NopCloser(crateReader), nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/regenerate_package_index.go | registry/app/api/handler/cargo/regenerate_package_index.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo
import (
"encoding/json"
"fmt"
"net/http"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
)
func (h *handler) RegeneratePackageIndex(
w http.ResponseWriter, r *http.Request,
) {
ctx := r.Context()
info, ok := request.ArtifactInfoFrom(ctx).(*cargotype.ArtifactInfo)
if !ok {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to fetch info from context"))
return
}
response, err := h.controller.RegeneratePackageIndex(ctx, info)
if err != nil {
h.HandleErrors(ctx, []error{fmt.Errorf("failed to get response from controller")}, w)
return
}
response.ResponseHeaders.WriteToResponse(w)
err = json.NewEncoder(w).Encode(response)
if err != nil {
h.handleCargoPackageAPIError(w, r,
fmt.Errorf("error occurred during sending response for regenerate package index for cargo package: %w", err),
)
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/handler.go | registry/app/api/handler/cargo/handler.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo
import (
"net/http"
cargo "github.com/harness/gitness/registry/app/api/controller/pkg/cargo"
"github.com/harness/gitness/registry/app/api/handler/packages"
"github.com/harness/gitness/registry/app/pkg"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
)
type Handler interface {
pkg.ArtifactInfoProvider
GetRegistryConfig(writer http.ResponseWriter, request *http.Request)
DownloadPackageIndex(writer http.ResponseWriter, request *http.Request)
RegeneratePackageIndex(writer http.ResponseWriter, request *http.Request)
DownloadPackage(writer http.ResponseWriter, request *http.Request)
SearchPackage(writer http.ResponseWriter, request *http.Request)
UploadPackage(writer http.ResponseWriter, request *http.Request)
YankVersion(writer http.ResponseWriter, request *http.Request)
UnYankVersion(writer http.ResponseWriter, request *http.Request)
}
type handler struct {
packages.Handler
controller cargo.Controller
}
func NewHandler(
controller cargo.Controller,
packageHandler packages.Handler,
) Handler {
return &handler{
Handler: packageHandler,
controller: controller,
}
}
var _ Handler = (*handler)(nil)
func (h *handler) GetPackageArtifactInfo(r *http.Request) (pkg.PackageArtifactInfo, error) {
info, err := h.Handler.GetArtifactInfo(r)
if err != nil {
return nil, err
}
info.Image = r.PathValue("name")
return &cargotype.ArtifactInfo{
ArtifactInfo: info,
Version: r.PathValue("version"),
}, nil
}
func (h *handler) handleCargoPackageAPIError(writer http.ResponseWriter, request *http.Request, err error) {
h.HandleErrors(request.Context(), []error{err}, writer)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/un_yank_version_test.go | registry/app/api/handler/cargo/un_yank_version_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo_test
import (
"context"
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"testing"
cargo "github.com/harness/gitness/registry/app/api/controller/pkg/cargo"
cargopkg "github.com/harness/gitness/registry/app/api/handler/cargo"
"github.com/harness/gitness/registry/app/pkg/commons"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestUnYankVersion_Success(t *testing.T) {
// Arrange
resp := &cargo.UpdateYankResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{},
Code: http.StatusOK,
},
},
Ok: true,
}
mockCtrl := new(mockController)
handler := cargopkg.NewHandler(mockCtrl, &fakePackagesHandler{})
info := &cargotype.ArtifactInfo{
FileName: "test-crate",
Version: "1.0.0",
}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("UpdateYank", ctx, info, false).Return(resp, nil)
req := httptest.NewRequest(http.MethodPut, "/cargo/unyank", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.UnYankVersion(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusOK, result.StatusCode)
require.Equal(t, "application/json", result.Header.Get("Content-Type"))
var response cargo.UpdateYankResponse
err := json.NewDecoder(result.Body).Decode(&response)
require.NoError(t, err)
require.True(t, response.Ok)
mockCtrl.AssertExpectations(t)
}
func TestUnYankVersion_InvalidArtifactInfo(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
// Context without proper artifact info
ctx := context.Background()
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == failedToFetchInfoFromContext
}), mock.Anything)
req := httptest.NewRequest(http.MethodPut, "/cargo/unyank", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.UnYankVersion(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockPkgHandler.AssertExpectations(t)
}
func TestUnYankVersion_ControllerError(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{
FileName: "test-crate",
Version: "1.0.0",
}
ctx := request.WithArtifactInfo(context.Background(), info)
expectedError := errors.New("controller error")
mockCtrl.On("UpdateYank", ctx, info, false).Return(nil, expectedError)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == "failed to unyank version: controller error"
}), mock.Anything)
req := httptest.NewRequest(http.MethodPut, "/cargo/unyank", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.UnYankVersion(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
func TestUnYankVersion_JSONEncodingError(t *testing.T) {
// Arrange
resp := &cargo.UpdateYankResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{},
Code: http.StatusOK,
},
},
Ok: true,
}
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{
FileName: "test-crate",
Version: "1.0.0",
}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("UpdateYank", ctx, info, false).Return(resp, nil)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1
}), mock.Anything)
req := httptest.NewRequest(http.MethodPut, "/cargo/unyank", nil).WithContext(ctx)
// Create a ResponseWriter that fails on Write to simulate JSON encoding error
w := &failingResponseWriter{
ResponseRecorder: httptest.NewRecorder(),
shouldFail: true,
}
// Act
handler.UnYankVersion(w, req)
// Assert - JSON encoding error is handled but status code is already written
require.Equal(t, http.StatusOK, w.Code)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/yank_version.go | registry/app/api/handler/cargo/yank_version.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo
import (
"encoding/json"
"fmt"
"net/http"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
)
func (h *handler) YankVersion(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
info, ok := request.ArtifactInfoFrom(ctx).(*cargotype.ArtifactInfo)
if !ok {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to fetch info from context"))
return
}
response, err := h.controller.UpdateYank(ctx, info, true)
if err != nil {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to yank version: %w", err))
return
}
// Final response
response.ResponseHeaders.Headers["Content-Type"] = "application/json"
response.ResponseHeaders.WriteToResponse(w)
err = json.NewEncoder(w).Encode(response)
if err != nil {
h.handleCargoPackageAPIError(w, r,
fmt.Errorf("error occurred during sending response for yank version for cargo package: %w", err),
)
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/download_package.go | registry/app/api/handler/cargo/download_package.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo
import (
"fmt"
"net/http"
"github.com/harness/gitness/registry/app/pkg/commons"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
"github.com/rs/zerolog/log"
)
func (h *handler) DownloadPackage(
w http.ResponseWriter, r *http.Request,
) {
ctx := r.Context()
info, ok := request.ArtifactInfoFrom(ctx).(*cargotype.ArtifactInfo)
if !ok {
h.handleCargoPackageAPIError(w, r, fmt.Errorf("failed to fetch info from context"))
return
}
response := h.controller.DownloadPackage(ctx, info)
if response == nil {
h.HandleErrors(ctx, []error{fmt.Errorf("failed to get response from controller")}, w)
return
}
defer func() {
if response.Body != nil {
err := response.Body.Close()
if err != nil {
log.Ctx(ctx).Error().Msgf("Failed to close body: %v", err)
}
}
if response.ReadCloser != nil {
err := response.ReadCloser.Close()
if err != nil {
log.Ctx(ctx).Error().Msgf("Failed to close read closer: %v", err)
}
}
}()
if response.GetError() != nil {
h.HandleError(ctx, w, response.GetError())
return
}
if response.RedirectURL != "" {
http.Redirect(w, r, response.RedirectURL, http.StatusTemporaryRedirect)
return
}
err := commons.ServeContent(w, r, response.Body, info.FileName, response.ReadCloser)
if err != nil {
log.Ctx(ctx).Error().Msgf("Failed to serve content: %v", err)
h.HandleError(ctx, w, err)
return
}
response.ResponseHeaders.WriteToResponse(w)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/handler/cargo/registry_config_test.go | registry/app/api/handler/cargo/registry_config_test.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cargo_test
import (
"context"
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"testing"
cargo "github.com/harness/gitness/registry/app/api/controller/pkg/cargo"
cargopkg "github.com/harness/gitness/registry/app/api/handler/cargo"
cargometadata "github.com/harness/gitness/registry/app/metadata/cargo"
"github.com/harness/gitness/registry/app/pkg/commons"
cargotype "github.com/harness/gitness/registry/app/pkg/types/cargo"
"github.com/harness/gitness/registry/request"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestGetRegistryConfig_Success(t *testing.T) {
// Arrange
config := &cargometadata.RegistryConfig{
DownloadURL: "https://example.com/api/v1/crates",
APIURL: "https://example.com/",
AuthRequired: true,
}
resp := &cargo.GetRegistryConfigResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{},
Code: http.StatusOK,
},
},
Config: config,
}
mockCtrl := new(mockController)
handler := cargopkg.NewHandler(mockCtrl, &fakePackagesHandler{})
info := &cargotype.ArtifactInfo{FileName: "config.json"}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("GetRegistryConfig", ctx, info).Return(resp, nil)
req := httptest.NewRequest(http.MethodGet, "/cargo/config.json", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.GetRegistryConfig(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusOK, result.StatusCode)
require.Equal(t, "text/json; charset=utf-8", result.Header.Get("Content-Type"))
var responseConfig cargometadata.RegistryConfig
err := json.NewDecoder(result.Body).Decode(&responseConfig)
require.NoError(t, err)
require.Equal(t, config.DownloadURL, responseConfig.DownloadURL)
require.Equal(t, config.APIURL, responseConfig.APIURL)
require.Equal(t, config.AuthRequired, responseConfig.AuthRequired)
mockCtrl.AssertExpectations(t)
}
func TestGetRegistryConfig_InvalidArtifactInfo(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
// Context without proper artifact info
ctx := context.Background()
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == failedToFetchInfoFromContext
}), mock.Anything)
req := httptest.NewRequest(http.MethodGet, "/cargo/config.json", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.GetRegistryConfig(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockPkgHandler.AssertExpectations(t)
}
func TestGetRegistryConfig_ControllerError(t *testing.T) {
// Arrange
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "config.json"}
ctx := request.WithArtifactInfo(context.Background(), info)
expectedError := errors.New("controller error")
mockCtrl.On("GetRegistryConfig", ctx, info).Return(nil, expectedError)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1 && errs[0].Error() == "failed to fetch registry config controller error"
}), mock.Anything)
req := httptest.NewRequest(http.MethodGet, "/cargo/config.json", nil).WithContext(ctx)
w := httptest.NewRecorder()
// Act
handler.GetRegistryConfig(w, req)
// Assert
result := w.Result()
defer func() { _ = result.Body.Close() }()
require.Equal(t, http.StatusInternalServerError, result.StatusCode)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
func TestGetRegistryConfig_JSONEncodingError(t *testing.T) {
// Arrange
config := &cargometadata.RegistryConfig{
DownloadURL: "https://example.com/api/v1/crates",
APIURL: "https://example.com/",
AuthRequired: true,
}
resp := &cargo.GetRegistryConfigResponse{
BaseResponse: cargo.BaseResponse{
Error: nil,
ResponseHeaders: &commons.ResponseHeaders{
Headers: map[string]string{},
Code: http.StatusOK,
},
},
Config: config,
}
mockCtrl := new(mockController)
mockPkgHandler := &fakePackagesHandler{}
handler := cargopkg.NewHandler(mockCtrl, mockPkgHandler)
info := &cargotype.ArtifactInfo{FileName: "config.json"}
ctx := request.WithArtifactInfo(context.Background(), info)
mockCtrl.On("GetRegistryConfig", ctx, info).Return(resp, nil)
mockPkgHandler.On("HandleErrors", ctx, mock.MatchedBy(func(errs []error) bool {
return len(errs) == 1
}), mock.Anything)
req := httptest.NewRequest(http.MethodGet, "/cargo/config.json", nil).WithContext(ctx)
// Create a ResponseWriter that fails on Write to simulate JSON encoding error
w := &failingResponseWriter{
ResponseRecorder: httptest.NewRecorder(),
shouldFail: true,
}
// Act
handler.GetRegistryConfig(w, req)
// Assert - JSON encoding error causes 500 status since headers were already written
require.Equal(t, http.StatusInternalServerError, w.Code)
mockCtrl.AssertExpectations(t)
mockPkgHandler.AssertExpectations(t)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/openapi/openapiutil.go | registry/app/api/openapi/openapiutil.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package openapi
import "embed"
//go:embed api.yaml
var GitnessRegistryAPIYaml embed.FS
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/openapi/contracts/artifact/types.gen.go | registry/app/api/openapi/contracts/artifact/types.gen.go | // Package artifact provides primitives to interact with the openapi HTTP API.
//
// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.5.0 DO NOT EDIT.
package artifact
import (
"encoding/json"
"errors"
"fmt"
"github.com/oapi-codegen/runtime"
)
// Defines values for ArtifactType.
const (
ArtifactTypeDataset ArtifactType = "dataset"
ArtifactTypeModel ArtifactType = "model"
)
// Defines values for AuthType.
const (
AuthTypeAccessKeySecretKey AuthType = "AccessKeySecretKey"
AuthTypeAnonymous AuthType = "Anonymous"
AuthTypeUserPassword AuthType = "UserPassword"
)
// Defines values for ClientSetupStepType.
const (
ClientSetupStepTypeGenerateToken ClientSetupStepType = "GenerateToken"
ClientSetupStepTypeStatic ClientSetupStepType = "Static"
)
// Defines values for PackageType.
const (
PackageTypeCARGO PackageType = "CARGO"
PackageTypeDOCKER PackageType = "DOCKER"
PackageTypeGENERIC PackageType = "GENERIC"
PackageTypeGO PackageType = "GO"
PackageTypeHELM PackageType = "HELM"
PackageTypeHUGGINGFACE PackageType = "HUGGINGFACE"
PackageTypeMAVEN PackageType = "MAVEN"
PackageTypeNPM PackageType = "NPM"
PackageTypeNUGET PackageType = "NUGET"
PackageTypePYTHON PackageType = "PYTHON"
PackageTypeRPM PackageType = "RPM"
)
// Defines values for RegistryType.
const (
RegistryTypeUPSTREAM RegistryType = "UPSTREAM"
RegistryTypeVIRTUAL RegistryType = "VIRTUAL"
)
// Defines values for ReplicationRuleDestinationType.
const (
ReplicationRuleDestinationTypeGCP ReplicationRuleDestinationType = "GCP"
ReplicationRuleDestinationTypeJfrog ReplicationRuleDestinationType = "Jfrog"
ReplicationRuleDestinationTypeLocal ReplicationRuleDestinationType = "Local"
)
// Defines values for ReplicationRuleSourceType.
const (
ReplicationRuleSourceTypeGCP ReplicationRuleSourceType = "GCP"
ReplicationRuleSourceTypeJfrog ReplicationRuleSourceType = "Jfrog"
ReplicationRuleSourceTypeLocal ReplicationRuleSourceType = "Local"
)
// Defines values for ReplicationRuleRequestDestinationType.
const (
ReplicationRuleRequestDestinationTypeGCP ReplicationRuleRequestDestinationType = "GCP"
ReplicationRuleRequestDestinationTypeJfrog ReplicationRuleRequestDestinationType = "Jfrog"
ReplicationRuleRequestDestinationTypeLocal ReplicationRuleRequestDestinationType = "Local"
)
// Defines values for ReplicationRuleRequestSourceType.
const (
ReplicationRuleRequestSourceTypeGCP ReplicationRuleRequestSourceType = "GCP"
ReplicationRuleRequestSourceTypeJfrog ReplicationRuleRequestSourceType = "Jfrog"
ReplicationRuleRequestSourceTypeLocal ReplicationRuleRequestSourceType = "Local"
)
// Defines values for SectionType.
const (
SectionTypeINLINE SectionType = "INLINE"
SectionTypeTABS SectionType = "TABS"
)
// Defines values for Status.
const (
StatusERROR Status = "ERROR"
StatusFAILURE Status = "FAILURE"
StatusSUCCESS Status = "SUCCESS"
)
// Defines values for Trigger.
const (
TriggerARTIFACTCREATION Trigger = "ARTIFACT_CREATION"
TriggerARTIFACTDELETION Trigger = "ARTIFACT_DELETION"
)
// Defines values for UpstreamConfigSource.
const (
UpstreamConfigSourceAwsEcr UpstreamConfigSource = "AwsEcr"
UpstreamConfigSourceCrates UpstreamConfigSource = "Crates"
UpstreamConfigSourceCustom UpstreamConfigSource = "Custom"
UpstreamConfigSourceDockerhub UpstreamConfigSource = "Dockerhub"
UpstreamConfigSourceGoProxy UpstreamConfigSource = "GoProxy"
UpstreamConfigSourceHuggingFace UpstreamConfigSource = "HuggingFace"
UpstreamConfigSourceMavenCentral UpstreamConfigSource = "MavenCentral"
UpstreamConfigSourceNpmJs UpstreamConfigSource = "NpmJs"
UpstreamConfigSourceNugetOrg UpstreamConfigSource = "NugetOrg"
UpstreamConfigSourcePyPi UpstreamConfigSource = "PyPi"
)
// Defines values for WebhookExecResult.
const (
WebhookExecResultFATALERROR WebhookExecResult = "FATAL_ERROR"
WebhookExecResultRETRIABLEERROR WebhookExecResult = "RETRIABLE_ERROR"
WebhookExecResultSUCCESS WebhookExecResult = "SUCCESS"
)
// Defines values for RegistryTypeParam.
const (
UPSTREAM RegistryTypeParam = "UPSTREAM"
VIRTUAL RegistryTypeParam = "VIRTUAL"
)
// Defines values for ArtifactTypeParam.
const (
Dataset ArtifactTypeParam = "dataset"
Model ArtifactTypeParam = "model"
)
// Defines values for ScopeParam.
const (
Ancestors ScopeParam = "ancestors"
Descendants ScopeParam = "descendants"
None ScopeParam = "none"
)
// Defines values for VersionTypeParam.
const (
DIGEST VersionTypeParam = "DIGEST"
TAG VersionTypeParam = "TAG"
)
// Defines values for DeleteArtifactParamsArtifactType.
const (
DeleteArtifactParamsArtifactTypeDataset DeleteArtifactParamsArtifactType = "dataset"
DeleteArtifactParamsArtifactTypeModel DeleteArtifactParamsArtifactType = "model"
)
// Defines values for UpdateArtifactLabelsParamsArtifactType.
const (
UpdateArtifactLabelsParamsArtifactTypeDataset UpdateArtifactLabelsParamsArtifactType = "dataset"
UpdateArtifactLabelsParamsArtifactTypeModel UpdateArtifactLabelsParamsArtifactType = "model"
)
// Defines values for GetArtifactSummaryParamsArtifactType.
const (
GetArtifactSummaryParamsArtifactTypeDataset GetArtifactSummaryParamsArtifactType = "dataset"
GetArtifactSummaryParamsArtifactTypeModel GetArtifactSummaryParamsArtifactType = "model"
)
// Defines values for DeleteArtifactVersionParamsArtifactType.
const (
DeleteArtifactVersionParamsArtifactTypeDataset DeleteArtifactVersionParamsArtifactType = "dataset"
DeleteArtifactVersionParamsArtifactTypeModel DeleteArtifactVersionParamsArtifactType = "model"
)
// Defines values for GetArtifactDetailsParamsArtifactType.
const (
GetArtifactDetailsParamsArtifactTypeDataset GetArtifactDetailsParamsArtifactType = "dataset"
GetArtifactDetailsParamsArtifactTypeModel GetArtifactDetailsParamsArtifactType = "model"
)
// Defines values for GetDockerArtifactDetailsParamsVersionType.
const (
GetDockerArtifactDetailsParamsVersionTypeDIGEST GetDockerArtifactDetailsParamsVersionType = "DIGEST"
GetDockerArtifactDetailsParamsVersionTypeTAG GetDockerArtifactDetailsParamsVersionType = "TAG"
)
// Defines values for GetDockerArtifactManifestsParamsVersionType.
const (
GetDockerArtifactManifestsParamsVersionTypeDIGEST GetDockerArtifactManifestsParamsVersionType = "DIGEST"
GetDockerArtifactManifestsParamsVersionTypeTAG GetDockerArtifactManifestsParamsVersionType = "TAG"
)
// Defines values for GetArtifactFileParamsArtifactType.
const (
GetArtifactFileParamsArtifactTypeDataset GetArtifactFileParamsArtifactType = "dataset"
GetArtifactFileParamsArtifactTypeModel GetArtifactFileParamsArtifactType = "model"
)
// Defines values for GetArtifactFilesParamsArtifactType.
const (
GetArtifactFilesParamsArtifactTypeDataset GetArtifactFilesParamsArtifactType = "dataset"
GetArtifactFilesParamsArtifactTypeModel GetArtifactFilesParamsArtifactType = "model"
)
// Defines values for GetHelmArtifactDetailsParamsVersionType.
const (
GetHelmArtifactDetailsParamsVersionTypeDIGEST GetHelmArtifactDetailsParamsVersionType = "DIGEST"
GetHelmArtifactDetailsParamsVersionTypeTAG GetHelmArtifactDetailsParamsVersionType = "TAG"
)
// Defines values for GetArtifactVersionSummaryParamsArtifactType.
const (
GetArtifactVersionSummaryParamsArtifactTypeDataset GetArtifactVersionSummaryParamsArtifactType = "dataset"
GetArtifactVersionSummaryParamsArtifactTypeModel GetArtifactVersionSummaryParamsArtifactType = "model"
)
// Defines values for GetAllArtifactVersionsParamsArtifactType.
const (
GetAllArtifactVersionsParamsArtifactTypeDataset GetAllArtifactVersionsParamsArtifactType = "dataset"
GetAllArtifactVersionsParamsArtifactTypeModel GetAllArtifactVersionsParamsArtifactType = "model"
)
// Defines values for GetAllArtifactsByRegistryParamsArtifactType.
const (
GetAllArtifactsByRegistryParamsArtifactTypeDataset GetAllArtifactsByRegistryParamsArtifactType = "dataset"
GetAllArtifactsByRegistryParamsArtifactTypeModel GetAllArtifactsByRegistryParamsArtifactType = "model"
)
// Defines values for DeleteQuarantineFilePathParamsArtifactType.
const (
DeleteQuarantineFilePathParamsArtifactTypeDataset DeleteQuarantineFilePathParamsArtifactType = "dataset"
DeleteQuarantineFilePathParamsArtifactTypeModel DeleteQuarantineFilePathParamsArtifactType = "model"
)
// Defines values for GetAllRegistriesParamsType.
const (
GetAllRegistriesParamsTypeUPSTREAM GetAllRegistriesParamsType = "UPSTREAM"
GetAllRegistriesParamsTypeVIRTUAL GetAllRegistriesParamsType = "VIRTUAL"
)
// Defines values for GetAllRegistriesParamsScope.
const (
GetAllRegistriesParamsScopeAncestors GetAllRegistriesParamsScope = "ancestors"
GetAllRegistriesParamsScopeDescendants GetAllRegistriesParamsScope = "descendants"
GetAllRegistriesParamsScopeNone GetAllRegistriesParamsScope = "none"
)
// AccessKeySecretKey defines model for AccessKeySecretKey.
type AccessKeySecretKey struct {
AccessKey *string `json:"accessKey,omitempty"`
AccessKeySecretIdentifier *string `json:"accessKeySecretIdentifier,omitempty"`
AccessKeySecretSpaceId *int64 `json:"accessKeySecretSpaceId,omitempty"`
AccessKeySecretSpacePath *string `json:"accessKeySecretSpacePath,omitempty"`
SecretKeyIdentifier string `json:"secretKeyIdentifier"`
SecretKeySpaceId *int64 `json:"secretKeySpaceId,omitempty"`
SecretKeySpacePath *string `json:"secretKeySpacePath,omitempty"`
}
// Anonymous defines model for Anonymous.
type Anonymous interface{}
// ArtifactDetail Artifact Detail
type ArtifactDetail struct {
// ArtifactType refers to artifact type
ArtifactType *ArtifactType `json:"artifactType,omitempty"`
CreatedAt *string `json:"createdAt,omitempty"`
CreatedBy *string `json:"createdBy,omitempty"`
DownloadCount *int64 `json:"downloadCount,omitempty"`
IsQuarantined *bool `json:"isQuarantined,omitempty"`
ModifiedAt *string `json:"modifiedAt,omitempty"`
Name *string `json:"name,omitempty"`
// PackageType refers to package
PackageType PackageType `json:"packageType"`
QuarantineReason *string `json:"quarantineReason,omitempty"`
Size *string `json:"size,omitempty"`
Version string `json:"version"`
union json.RawMessage
}
// ArtifactEntityMetadata Artifact Entity Metadata
type ArtifactEntityMetadata map[string]interface{}
// ArtifactLabelRequest defines model for ArtifactLabelRequest.
type ArtifactLabelRequest struct {
Labels []string `json:"labels"`
}
// ArtifactMetadata Artifact Metadata
type ArtifactMetadata struct {
// ArtifactType refers to artifact type
ArtifactType *ArtifactType `json:"artifactType,omitempty"`
DownloadsCount *int64 `json:"downloadsCount,omitempty"`
IsQuarantined *bool `json:"isQuarantined,omitempty"`
Labels *[]string `json:"labels,omitempty"`
LastModified *string `json:"lastModified,omitempty"`
// Metadata Artifact Entity Metadata
Metadata *ArtifactEntityMetadata `json:"metadata,omitempty"`
Name string `json:"name"`
// PackageType refers to package
PackageType PackageType `json:"packageType"`
PullCommand *string `json:"pullCommand,omitempty"`
QuarantineReason *string `json:"quarantineReason,omitempty"`
RegistryIdentifier string `json:"registryIdentifier"`
RegistryPath string `json:"registryPath"`
RegistryUUID string `json:"registryUUID"`
Uuid string `json:"uuid"`
Version *string `json:"version,omitempty"`
}
// ArtifactStats Harness Artifact Stats
type ArtifactStats struct {
DownloadCount *int64 `json:"downloadCount,omitempty"`
DownloadSize *int64 `json:"downloadSize,omitempty"`
TotalStorageSize *int64 `json:"totalStorageSize,omitempty"`
UploadSize *int64 `json:"uploadSize,omitempty"`
}
// ArtifactSummary Harness Artifact Summary
type ArtifactSummary struct {
// ArtifactType refers to artifact type
ArtifactType *ArtifactType `json:"artifactType,omitempty"`
CreatedAt *string `json:"createdAt,omitempty"`
DownloadsCount *int64 `json:"downloadsCount,omitempty"`
ImageName string `json:"imageName"`
Labels *[]string `json:"labels,omitempty"`
ModifiedAt *string `json:"modifiedAt,omitempty"`
// PackageType refers to package
PackageType PackageType `json:"packageType"`
RegistryUUID string `json:"registryUUID"`
Uuid string `json:"uuid"`
}
// ArtifactType refers to artifact type
type ArtifactType string
// ArtifactVersionMetadata Artifact Version Metadata
type ArtifactVersionMetadata struct {
// ArtifactType refers to artifact type
ArtifactType *ArtifactType `json:"artifactType,omitempty"`
DigestCount *int `json:"digestCount,omitempty"`
DownloadsCount *int64 `json:"downloadsCount,omitempty"`
FileCount *int64 `json:"fileCount,omitempty"`
IsQuarantined *bool `json:"isQuarantined,omitempty"`
LastModified *string `json:"lastModified,omitempty"`
// Metadata Artifact Entity Metadata
Metadata *ArtifactEntityMetadata `json:"metadata,omitempty"`
Name string `json:"name"`
// PackageType refers to package
PackageType PackageType `json:"packageType"`
PullCommand *string `json:"pullCommand,omitempty"`
QuarantineReason *string `json:"quarantineReason,omitempty"`
RegistryIdentifier string `json:"registryIdentifier"`
RegistryPath string `json:"registryPath"`
RegistryUUID string `json:"registryUUID"`
Size *string `json:"size,omitempty"`
Uuid string `json:"uuid"`
}
// ArtifactVersionSummary Docker Artifact Version Summary
type ArtifactVersionSummary struct {
// ArtifactType refers to artifact type
ArtifactType *ArtifactType `json:"artifactType,omitempty"`
ImageName string `json:"imageName"`
IsQuarantined *bool `json:"isQuarantined,omitempty"`
// PackageType refers to package
PackageType PackageType `json:"packageType"`
QuarantineReason *string `json:"quarantineReason,omitempty"`
RegistryUUID string `json:"registryUUID"`
Uuid string `json:"uuid"`
Version string `json:"version"`
}
// AuthType Authentication type
type AuthType string
// CargoArtifactDetailConfig Config for Cargo artifact details
type CargoArtifactDetailConfig struct {
Metadata *map[string]interface{} `json:"metadata,omitempty"`
}
// CleanupPolicy Cleanup Policy for Harness Artifact Registries
type CleanupPolicy struct {
ExpireDays *int `json:"expireDays,omitempty"`
Name *string `json:"name,omitempty"`
PackagePrefix *[]string `json:"packagePrefix,omitempty"`
VersionPrefix *[]string `json:"versionPrefix,omitempty"`
}
// ClientSetupDetails Client Setup Details
type ClientSetupDetails struct {
MainHeader string `json:"mainHeader"`
SecHeader string `json:"secHeader"`
Sections []ClientSetupSection `json:"sections"`
}
// ClientSetupSection Client Setup Section
type ClientSetupSection struct {
Header *string `json:"header,omitempty"`
SecHeader *string `json:"secHeader,omitempty"`
// Type refers to client setup section type
Type SectionType `json:"type"`
union json.RawMessage
}
// ClientSetupStep Client Setup Step
type ClientSetupStep struct {
Commands *[]ClientSetupStepCommand `json:"commands,omitempty"`
Header *string `json:"header,omitempty"`
// Type ClientSetupStepType type
Type *ClientSetupStepType `json:"type,omitempty"`
}
// ClientSetupStepCommand Client Setup Step Command
type ClientSetupStepCommand struct {
Label *string `json:"label,omitempty"`
Value *string `json:"value,omitempty"`
}
// ClientSetupStepConfig Client Setup Step
type ClientSetupStepConfig struct {
Steps *[]ClientSetupStep `json:"steps,omitempty"`
}
// ClientSetupStepType ClientSetupStepType type
type ClientSetupStepType string
// DockerArtifactDetail Docker Artifact Detail
type DockerArtifactDetail struct {
CreatedAt *string `json:"createdAt,omitempty"`
DownloadsCount *int64 `json:"downloadsCount,omitempty"`
ImageName string `json:"imageName"`
IsQuarantined *bool `json:"isQuarantined,omitempty"`
// Metadata Artifact Entity Metadata
Metadata *ArtifactEntityMetadata `json:"metadata,omitempty"`
ModifiedAt *string `json:"modifiedAt,omitempty"`
// PackageType refers to package
PackageType PackageType `json:"packageType"`
PullCommand *string `json:"pullCommand,omitempty"`
PullCommandByDigest *string `json:"pullCommandByDigest,omitempty"`
QuarantineReason *string `json:"quarantineReason,omitempty"`
RegistryPath string `json:"registryPath"`
Size *string `json:"size,omitempty"`
Url string `json:"url"`
Version string `json:"version"`
}
// DockerArtifactDetailConfig Config for docker artifact details
type DockerArtifactDetailConfig struct {
PullCommand *string `json:"pullCommand,omitempty"`
}
// DockerArtifactManifest Docker Artifact Manifest
type DockerArtifactManifest struct {
Manifest string `json:"manifest"`
}
// DockerLayerEntry Harness Artifact Layers
type DockerLayerEntry struct {
Command string `json:"command"`
Size *string `json:"size,omitempty"`
}
// DockerLayersSummary Harness Layers Summary
type DockerLayersSummary struct {
Digest string `json:"digest"`
Layers *[]DockerLayerEntry `json:"layers,omitempty"`
OsArch *string `json:"osArch,omitempty"`
}
// DockerManifestDetails Harness Artifact Layers
type DockerManifestDetails struct {
CreatedAt *string `json:"createdAt,omitempty"`
Digest string `json:"digest"`
DownloadsCount *int64 `json:"downloadsCount,omitempty"`
IsQuarantined *bool `json:"isQuarantined,omitempty"`
OsArch string `json:"osArch"`
QuarantineReason *string `json:"quarantineReason,omitempty"`
Size *string `json:"size,omitempty"`
}
// DockerManifests Harness Manifests
type DockerManifests struct {
ImageName string `json:"imageName"`
Manifests *[]DockerManifestDetails `json:"manifests,omitempty"`
Version string `json:"version"`
}
// Error defines model for Error.
type Error struct {
// Code The http error code
Code string `json:"code"`
// Details Additional details about the error
Details *map[string]interface{} `json:"details,omitempty"`
// Message The reason the request failed
Message string `json:"message"`
}
// ExtraHeader Webhook Extra Header
type ExtraHeader struct {
Key string `json:"key"`
Masked *bool `json:"masked,omitempty"`
Value string `json:"value"`
}
// FileDetail File Detail
type FileDetail struct {
Checksums []string `json:"checksums"`
CreatedAt string `json:"createdAt"`
DownloadCommand string `json:"downloadCommand"`
Name string `json:"name"`
Path string `json:"path"`
Size string `json:"size"`
}
// GenericArtifactDetailConfig Config for generic artifact details
type GenericArtifactDetailConfig struct {
Description *string `json:"description,omitempty"`
}
// GoArtifactDetailConfig Config for Go artifact details
type GoArtifactDetailConfig struct {
Metadata *map[string]interface{} `json:"metadata,omitempty"`
}
// HelmArtifactDetail Helm Artifact Detail
type HelmArtifactDetail struct {
Artifact *string `json:"artifact,omitempty"`
CreatedAt *string `json:"createdAt,omitempty"`
DownloadsCount *int64 `json:"downloadsCount,omitempty"`
// Metadata Artifact Entity Metadata
Metadata *ArtifactEntityMetadata `json:"metadata,omitempty"`
ModifiedAt *string `json:"modifiedAt,omitempty"`
// PackageType refers to package
PackageType PackageType `json:"packageType"`
PullCommand *string `json:"pullCommand,omitempty"`
PullCommandByDigest *string `json:"pullCommandByDigest,omitempty"`
RegistryPath string `json:"registryPath"`
Size *string `json:"size,omitempty"`
Url string `json:"url"`
Version string `json:"version"`
}
// HelmArtifactDetailConfig Config for helm artifact details
type HelmArtifactDetailConfig struct {
PullCommand *string `json:"pullCommand,omitempty"`
}
// HelmArtifactManifest Helm Artifact Manifest
type HelmArtifactManifest struct {
Manifest string `json:"manifest"`
}
// HuggingFaceArtifactDetailConfig Config for huggingface artifact details
type HuggingFaceArtifactDetailConfig struct {
Metadata *map[string]interface{} `json:"metadata,omitempty"`
}
// JfrogReplicationRegistry defines model for JfrogReplicationRegistry.
type JfrogReplicationRegistry struct {
Namespace string `json:"namespace"`
PasswordSecretId *string `json:"passwordSecretId,omitempty"`
PasswordSecretSpaceId *string `json:"passwordSecretSpaceId,omitempty"`
Url string `json:"url"`
Username *string `json:"username,omitempty"`
}
// ListArtifact A list of Artifacts
type ListArtifact struct {
// Artifacts A list of Artifact
Artifacts []ArtifactMetadata `json:"artifacts"`
// ItemCount The total number of items
ItemCount *int64 `json:"itemCount,omitempty"`
// PageCount The total number of pages
PageCount *int64 `json:"pageCount,omitempty"`
// PageIndex The current page
PageIndex *int64 `json:"pageIndex,omitempty"`
// PageSize The number of items per page
PageSize *int `json:"pageSize,omitempty"`
}
// ListArtifactLabel A list of Harness Artifact Labels
type ListArtifactLabel struct {
// ItemCount The total number of items
ItemCount *int64 `json:"itemCount,omitempty"`
Labels []string `json:"labels"`
// PageCount The total number of pages
PageCount *int64 `json:"pageCount,omitempty"`
// PageIndex The current page
PageIndex *int64 `json:"pageIndex,omitempty"`
// PageSize The number of items per page
PageSize *int `json:"pageSize,omitempty"`
}
// ListArtifactVersion A list of Artifact versions
type ListArtifactVersion struct {
// ArtifactVersions A list of Artifact versions
ArtifactVersions *[]ArtifactVersionMetadata `json:"artifactVersions,omitempty"`
// ItemCount The total number of items
ItemCount *int64 `json:"itemCount,omitempty"`
// PageCount The total number of pages
PageCount *int64 `json:"pageCount,omitempty"`
// PageIndex The current page
PageIndex *int64 `json:"pageIndex,omitempty"`
// PageSize The number of items per page
PageSize *int `json:"pageSize,omitempty"`
}
// ListFileDetail A list of Harness Artifact Files
type ListFileDetail struct {
// Files A list of Harness Artifact Files
Files []FileDetail `json:"files"`
// ItemCount The total number of items
ItemCount *int64 `json:"itemCount,omitempty"`
// PageCount The total number of pages
PageCount *int64 `json:"pageCount,omitempty"`
// PageIndex The current page
PageIndex *int64 `json:"pageIndex,omitempty"`
// PageSize The number of items per page
PageSize *int `json:"pageSize,omitempty"`
}
// ListMigrationImage A list of migration images
type ListMigrationImage struct {
// Images A list of Artifact versions
Images []MigrationImage `json:"images"`
// ItemCount The total number of items
ItemCount int64 `json:"itemCount"`
// PageCount The total number of pages
PageCount int64 `json:"pageCount"`
// PageIndex The current page
PageIndex int64 `json:"pageIndex"`
// PageSize The number of items per page
PageSize int `json:"pageSize"`
}
// ListOciArtifactTags A list of Artifact versions
type ListOciArtifactTags struct {
// ItemCount The total number of items
ItemCount *int64 `json:"itemCount,omitempty"`
// OciArtifactTags A list of OCI Artifact tags
OciArtifactTags []OciArtifactTag `json:"ociArtifactTags"`
// PageCount The total number of pages
PageCount *int64 `json:"pageCount,omitempty"`
// PageIndex The current page
PageIndex *int64 `json:"pageIndex,omitempty"`
// PageSize The number of items per page
PageSize *int `json:"pageSize,omitempty"`
}
// ListRegistry A list of Harness Artifact Registries
type ListRegistry struct {
// ItemCount The total number of items
ItemCount *int64 `json:"itemCount,omitempty"`
// PageCount The total number of pages
PageCount *int64 `json:"pageCount,omitempty"`
// PageIndex The current page
PageIndex *int64 `json:"pageIndex,omitempty"`
// PageSize The number of items per page
PageSize *int `json:"pageSize,omitempty"`
// Registries A list of Harness Artifact Registries
Registries []RegistryMetadata `json:"registries"`
}
// ListRegistryArtifact A list of Artifacts
type ListRegistryArtifact struct {
// Artifacts A list of Artifact
Artifacts []RegistryArtifactMetadata `json:"artifacts"`
// ItemCount The total number of items
ItemCount *int64 `json:"itemCount,omitempty"`
// PageCount The total number of pages
PageCount *int64 `json:"pageCount,omitempty"`
// PageIndex The current page
PageIndex *int64 `json:"pageIndex,omitempty"`
// PageSize The number of items per page
PageSize *int `json:"pageSize,omitempty"`
}
// ListReplicationRule A list of replication rules
type ListReplicationRule struct {
// ItemCount The total number of items
ItemCount int64 `json:"itemCount"`
// PageCount The total number of pages
PageCount int64 `json:"pageCount"`
// PageIndex The current page
PageIndex int64 `json:"pageIndex"`
// PageSize The number of items per page
PageSize int `json:"pageSize"`
// Rules A list of Replication Rules
Rules []ReplicationRule `json:"rules"`
}
// ListWebhooks A list of Harness Registries webhooks
type ListWebhooks struct {
// ItemCount The total number of items
ItemCount *int64 `json:"itemCount,omitempty"`
// PageCount The total number of pages
PageCount *int64 `json:"pageCount,omitempty"`
// PageIndex The current page
PageIndex *int64 `json:"pageIndex,omitempty"`
// PageSize The number of items per page
PageSize *int `json:"pageSize,omitempty"`
// Webhooks A list of Registries webhooks
Webhooks []Webhook `json:"webhooks"`
}
// ListWebhooksExecutions A list of Harness Registries webhooks executions
type ListWebhooksExecutions struct {
// Executions A list of Registries webhooks executions
Executions []WebhookExecution `json:"executions"`
// ItemCount The total number of items
ItemCount *int64 `json:"itemCount,omitempty"`
// PageCount The total number of pages
PageCount *int64 `json:"pageCount,omitempty"`
// PageIndex The current page
PageIndex *int64 `json:"pageIndex,omitempty"`
// PageSize The number of items per page
PageSize *int `json:"pageSize,omitempty"`
}
// LocalReplicationRegistry defines model for LocalReplicationRegistry.
type LocalReplicationRegistry struct {
RegistryIdentifier string `json:"registryIdentifier"`
}
// MavenArtifactDetailConfig Config for maven artifact details
type MavenArtifactDetailConfig struct {
ArtifactId *string `json:"artifactId,omitempty"`
GroupId *string `json:"groupId,omitempty"`
}
// MigrationImage defines model for MigrationImage.
type MigrationImage struct {
ImageId *string `json:"imageId,omitempty"`
ImageTag *string `json:"imageTag,omitempty"`
Progress *int `json:"progress,omitempty"`
Status *string `json:"status,omitempty"`
}
// NpmArtifactDetailConfig Config for npm artifact details
type NpmArtifactDetailConfig struct {
Metadata *map[string]interface{} `json:"metadata,omitempty"`
}
// NugetArtifactDetailConfig Config for nuget artifact details
type NugetArtifactDetailConfig struct {
Metadata *map[string]interface{} `json:"metadata,omitempty"`
}
// OciArtifactTag OCI Artifact Tag info
type OciArtifactTag struct {
Digest string `json:"digest"`
Name string `json:"name"`
}
// PackageType refers to package
type PackageType string
// PythonArtifactDetailConfig Config for python artifact details
type PythonArtifactDetailConfig struct {
Metadata *map[string]interface{} `json:"metadata,omitempty"`
}
// Registry Harness Artifact Registry
type Registry struct {
AllowedPattern *[]string `json:"allowedPattern,omitempty"`
BlockedPattern *[]string `json:"blockedPattern,omitempty"`
CleanupPolicy *[]CleanupPolicy `json:"cleanupPolicy,omitempty"`
// Config SubConfig specific for Virtual or Upstream Registry
Config *RegistryConfig `json:"config,omitempty"`
CreatedAt *string `json:"createdAt,omitempty"`
Description *string `json:"description,omitempty"`
Identifier string `json:"identifier"`
IsPublic bool `json:"isPublic"`
Labels *[]string `json:"labels,omitempty"`
ModifiedAt *string `json:"modifiedAt,omitempty"`
// PackageType refers to package
PackageType PackageType `json:"packageType"`
Url string `json:"url"`
Uuid string `json:"uuid"`
}
// RegistryArtifactMetadata Artifact Metadata
type RegistryArtifactMetadata struct {
// ArtifactType refers to artifact type
ArtifactType *ArtifactType `json:"artifactType,omitempty"`
DownloadsCount *int64 `json:"downloadsCount,omitempty"`
IsPublic bool `json:"isPublic"`
IsQuarantined *bool `json:"isQuarantined,omitempty"`
Labels *[]string `json:"labels,omitempty"`
LastModified *string `json:"lastModified,omitempty"`
LatestVersion string `json:"latestVersion"`
Name string `json:"name"`
// PackageType refers to package
PackageType PackageType `json:"packageType"`
RegistryIdentifier string `json:"registryIdentifier"`
RegistryPath string `json:"registryPath"`
RegistryUUID string `json:"registryUUID"`
Uuid string `json:"uuid"`
}
// RegistryConfig SubConfig specific for Virtual or Upstream Registry
type RegistryConfig struct {
// Type refers to type of registry i.e virtual or upstream
Type RegistryType `json:"type"`
union json.RawMessage
}
// RegistryMetadata Harness Artifact Registry Metadata
type RegistryMetadata struct {
ArtifactsCount *int64 `json:"artifactsCount,omitempty"`
Description *string `json:"description,omitempty"`
DownloadsCount *int64 `json:"downloadsCount,omitempty"`
Identifier string `json:"identifier"`
IsPublic bool `json:"isPublic"`
Labels *[]string `json:"labels,omitempty"`
LastModified *string `json:"lastModified,omitempty"`
// PackageType refers to package
PackageType PackageType `json:"packageType"`
Path *string `json:"path,omitempty"`
RegistrySize *string `json:"registrySize,omitempty"`
// Type refers to type of registry i.e virtual or upstream
Type RegistryType `json:"type"`
Url string `json:"url"`
Uuid string `json:"uuid"`
}
// RegistryRequest defines model for RegistryRequest.
type RegistryRequest struct {
AllowedPattern *[]string `json:"allowedPattern,omitempty"`
BlockedPattern *[]string `json:"blockedPattern,omitempty"`
CleanupPolicy *[]CleanupPolicy `json:"cleanupPolicy,omitempty"`
// Config SubConfig specific for Virtual or Upstream Registry
Config *RegistryConfig `json:"config,omitempty"`
Description *string `json:"description,omitempty"`
Identifier string `json:"identifier"`
IsPublic bool `json:"isPublic"`
Labels *[]string `json:"labels,omitempty"`
// PackageType refers to package
PackageType PackageType `json:"packageType"`
ParentRef string `json:"parentRef"`
}
// RegistryType refers to type of registry i.e virtual or upstream
type RegistryType string
// ReplicationRegistry defines model for ReplicationRegistry.
type ReplicationRegistry struct {
union json.RawMessage
}
// ReplicationRule defines model for ReplicationRule.
type ReplicationRule struct {
AllowedPatterns []string `json:"allowedPatterns"`
BlockedPatterns []string `json:"blockedPatterns"`
CreatedAt string `json:"createdAt"`
Destination ReplicationRegistry `json:"destination"`
DestinationType ReplicationRuleDestinationType `json:"destinationType"`
Identifier string `json:"identifier"`
ModifiedAt string `json:"modifiedAt"`
ParentRef string `json:"parentRef"`
Source ReplicationRegistry `json:"source"`
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | true |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/api/openapi/contracts/artifact/services.gen.go | registry/app/api/openapi/contracts/artifact/services.gen.go | // Package artifact provides primitives to interact with the openapi HTTP API.
//
// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.5.0 DO NOT EDIT.
package artifact
import (
"bytes"
"compress/gzip"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"github.com/getkin/kin-openapi/openapi3"
"github.com/go-chi/chi/v5"
"github.com/oapi-codegen/runtime"
strictnethttp "github.com/oapi-codegen/runtime/strictmiddleware/nethttp"
)
// ServerInterface represents all server handlers.
type ServerInterface interface {
// Create Registry.
// (POST /registry)
CreateRegistry(w http.ResponseWriter, r *http.Request, params CreateRegistryParams)
// Delete a Registry
// (DELETE /registry/{registry_ref})
DeleteRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam)
// Returns Registry Details
// (GET /registry/{registry_ref})
GetRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam)
// Updates a Registry
// (PUT /registry/{registry_ref})
ModifyRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam)
// List Artifact Labels
// (GET /registry/{registry_ref}/artifact/labels)
ListArtifactLabels(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params ListArtifactLabelsParams)
// Get Artifact Stats
// (GET /registry/{registry_ref}/artifact/stats)
GetArtifactStatsForRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params GetArtifactStatsForRegistryParams)
// Delete Artifact
// (DELETE /registry/{registry_ref}/artifact/{artifact})
DeleteArtifact(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params DeleteArtifactParams)
// Update Artifact Labels
// (PUT /registry/{registry_ref}/artifact/{artifact}/labels)
UpdateArtifactLabels(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params UpdateArtifactLabelsParams)
// Get Artifact Stats
// (GET /registry/{registry_ref}/artifact/{artifact}/stats)
GetArtifactStats(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetArtifactStatsParams)
// Get Artifact Summary
// (GET /registry/{registry_ref}/artifact/{artifact}/summary)
GetArtifactSummary(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetArtifactSummaryParams)
// List OCI Artifact tags
// (GET /registry/{registry_ref}/artifact/{artifact}/tags)
GetOciArtifactTags(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetOciArtifactTagsParams)
// Delete an Artifact Version
// (DELETE /registry/{registry_ref}/artifact/{artifact}/version/{version})
DeleteArtifactVersion(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params DeleteArtifactVersionParams)
// Describe Artifact Details
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/details)
GetArtifactDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetArtifactDetailsParams)
// Describe Docker Artifact Detail
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/details)
GetDockerArtifactDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactDetailsParams)
// Describe Docker Artifact Layers
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/layers)
GetDockerArtifactLayers(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactLayersParams)
// Describe Docker Artifact Manifest
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifest)
GetDockerArtifactManifest(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactManifestParams)
// Describe Docker Artifact Manifests
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifests)
GetDockerArtifactManifests(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactManifestsParams)
// Get Artifact file
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/file/{file_name})
GetArtifactFile(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, fileName FileNamePathParam, params GetArtifactFileParams)
// Describe Artifact files
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/files)
GetArtifactFiles(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetArtifactFilesParams)
// Describe Helm Artifact Detail
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/details)
GetHelmArtifactDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetHelmArtifactDetailsParams)
// Describe Helm Artifact Manifest
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/manifest)
GetHelmArtifactManifest(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam)
// Get Artifact Version Summary
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/summary)
GetArtifactVersionSummary(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetArtifactVersionSummaryParams)
// List Artifact Versions
// (GET /registry/{registry_ref}/artifact/{artifact}/versions)
GetAllArtifactVersions(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetAllArtifactVersionsParams)
// List Artifacts for Registry
// (GET /registry/{registry_ref}/artifacts)
GetAllArtifactsByRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params GetAllArtifactsByRegistryParams)
// Returns CLI Client Setup Details
// (GET /registry/{registry_ref}/client-setup-details)
GetClientSetupDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params GetClientSetupDetailsParams)
// deleteQuarantineFilePath
// (DELETE /registry/{registry_ref}/quarantine)
DeleteQuarantineFilePath(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params DeleteQuarantineFilePathParams)
// quarantineFilePath
// (PUT /registry/{registry_ref}/quarantine)
QuarantineFilePath(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam)
// ListWebhooks
// (GET /registry/{registry_ref}/webhooks)
ListWebhooks(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params ListWebhooksParams)
// CreateWebhook
// (POST /registry/{registry_ref}/webhooks)
CreateWebhook(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam)
// DeleteWebhook
// (DELETE /registry/{registry_ref}/webhooks/{webhook_identifier})
DeleteWebhook(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, webhookIdentifier WebhookIdentifierPathParam)
// GetWebhook
// (GET /registry/{registry_ref}/webhooks/{webhook_identifier})
GetWebhook(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, webhookIdentifier WebhookIdentifierPathParam)
// UpdateWebhook
// (PUT /registry/{registry_ref}/webhooks/{webhook_identifier})
UpdateWebhook(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, webhookIdentifier WebhookIdentifierPathParam)
// ListWebhookExecutions
// (GET /registry/{registry_ref}/webhooks/{webhook_identifier}/executions)
ListWebhookExecutions(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, webhookIdentifier WebhookIdentifierPathParam, params ListWebhookExecutionsParams)
// GetWebhookExecution
// (GET /registry/{registry_ref}/webhooks/{webhook_identifier}/executions/{webhook_execution_id})
GetWebhookExecution(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, webhookIdentifier WebhookIdentifierPathParam, webhookExecutionId WebhookExecutionIdPathParam)
// ReTriggerWebhookExecution
// (GET /registry/{registry_ref}/webhooks/{webhook_identifier}/executions/{webhook_execution_id}/retrigger)
ReTriggerWebhookExecution(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, webhookIdentifier WebhookIdentifierPathParam, webhookExecutionId WebhookExecutionIdPathParam)
// List replication rules
// (GET /replication/rules)
ListReplicationRules(w http.ResponseWriter, r *http.Request, params ListReplicationRulesParams)
// Create a replication rule
// (POST /replication/rules)
CreateReplicationRule(w http.ResponseWriter, r *http.Request, params CreateReplicationRuleParams)
// Delete a replication rule
// (DELETE /replication/rules/{id})
DeleteReplicationRule(w http.ResponseWriter, r *http.Request, id string)
// Get a replication rule
// (GET /replication/rules/{id})
GetReplicationRule(w http.ResponseWriter, r *http.Request, id string)
// Update a replication rule
// (PUT /replication/rules/{id})
UpdateReplicationRule(w http.ResponseWriter, r *http.Request, id string)
// List migration images
// (GET /replication/rules/{id}/migration/images)
ListMigrationImages(w http.ResponseWriter, r *http.Request, id string, params ListMigrationImagesParams)
// Get migration logs for an image
// (GET /replication/rules/{id}/migration/images/{image_id}/logs)
GetMigrationLogsForImage(w http.ResponseWriter, r *http.Request, id string, imageId string)
// Start migration
// (POST /replication/rules/{id}/migration/start)
StartMigration(w http.ResponseWriter, r *http.Request, id string)
// Stop migration
// (POST /replication/rules/{id}/migration/stop)
StopMigration(w http.ResponseWriter, r *http.Request, id string)
// Get artifact stats
// (GET /spaces/{space_ref}/artifact/stats)
GetArtifactStatsForSpace(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetArtifactStatsForSpaceParams)
// List artifacts
// (GET /spaces/{space_ref}/artifacts)
GetAllArtifacts(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetAllArtifactsParams)
// List registries
// (GET /spaces/{space_ref}/registries)
GetAllRegistries(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetAllRegistriesParams)
}
// Unimplemented server implementation that returns http.StatusNotImplemented for each endpoint.
type Unimplemented struct{}
// Create Registry.
// (POST /registry)
func (_ Unimplemented) CreateRegistry(w http.ResponseWriter, r *http.Request, params CreateRegistryParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Delete a Registry
// (DELETE /registry/{registry_ref})
func (_ Unimplemented) DeleteRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) {
w.WriteHeader(http.StatusNotImplemented)
}
// Returns Registry Details
// (GET /registry/{registry_ref})
func (_ Unimplemented) GetRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) {
w.WriteHeader(http.StatusNotImplemented)
}
// Updates a Registry
// (PUT /registry/{registry_ref})
func (_ Unimplemented) ModifyRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) {
w.WriteHeader(http.StatusNotImplemented)
}
// List Artifact Labels
// (GET /registry/{registry_ref}/artifact/labels)
func (_ Unimplemented) ListArtifactLabels(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params ListArtifactLabelsParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Get Artifact Stats
// (GET /registry/{registry_ref}/artifact/stats)
func (_ Unimplemented) GetArtifactStatsForRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params GetArtifactStatsForRegistryParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Delete Artifact
// (DELETE /registry/{registry_ref}/artifact/{artifact})
func (_ Unimplemented) DeleteArtifact(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params DeleteArtifactParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Update Artifact Labels
// (PUT /registry/{registry_ref}/artifact/{artifact}/labels)
func (_ Unimplemented) UpdateArtifactLabels(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params UpdateArtifactLabelsParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Get Artifact Stats
// (GET /registry/{registry_ref}/artifact/{artifact}/stats)
func (_ Unimplemented) GetArtifactStats(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetArtifactStatsParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Get Artifact Summary
// (GET /registry/{registry_ref}/artifact/{artifact}/summary)
func (_ Unimplemented) GetArtifactSummary(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetArtifactSummaryParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// List OCI Artifact tags
// (GET /registry/{registry_ref}/artifact/{artifact}/tags)
func (_ Unimplemented) GetOciArtifactTags(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetOciArtifactTagsParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Delete an Artifact Version
// (DELETE /registry/{registry_ref}/artifact/{artifact}/version/{version})
func (_ Unimplemented) DeleteArtifactVersion(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params DeleteArtifactVersionParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Describe Artifact Details
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/details)
func (_ Unimplemented) GetArtifactDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetArtifactDetailsParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Describe Docker Artifact Detail
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/details)
func (_ Unimplemented) GetDockerArtifactDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactDetailsParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Describe Docker Artifact Layers
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/layers)
func (_ Unimplemented) GetDockerArtifactLayers(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactLayersParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Describe Docker Artifact Manifest
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifest)
func (_ Unimplemented) GetDockerArtifactManifest(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactManifestParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Describe Docker Artifact Manifests
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/docker/manifests)
func (_ Unimplemented) GetDockerArtifactManifests(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetDockerArtifactManifestsParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Get Artifact file
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/file/{file_name})
func (_ Unimplemented) GetArtifactFile(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, fileName FileNamePathParam, params GetArtifactFileParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Describe Artifact files
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/files)
func (_ Unimplemented) GetArtifactFiles(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetArtifactFilesParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Describe Helm Artifact Detail
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/details)
func (_ Unimplemented) GetHelmArtifactDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetHelmArtifactDetailsParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Describe Helm Artifact Manifest
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/helm/manifest)
func (_ Unimplemented) GetHelmArtifactManifest(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam) {
w.WriteHeader(http.StatusNotImplemented)
}
// Get Artifact Version Summary
// (GET /registry/{registry_ref}/artifact/{artifact}/version/{version}/summary)
func (_ Unimplemented) GetArtifactVersionSummary(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, version VersionPathParam, params GetArtifactVersionSummaryParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// List Artifact Versions
// (GET /registry/{registry_ref}/artifact/{artifact}/versions)
func (_ Unimplemented) GetAllArtifactVersions(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, artifact ArtifactPathParam, params GetAllArtifactVersionsParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// List Artifacts for Registry
// (GET /registry/{registry_ref}/artifacts)
func (_ Unimplemented) GetAllArtifactsByRegistry(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params GetAllArtifactsByRegistryParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Returns CLI Client Setup Details
// (GET /registry/{registry_ref}/client-setup-details)
func (_ Unimplemented) GetClientSetupDetails(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params GetClientSetupDetailsParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// deleteQuarantineFilePath
// (DELETE /registry/{registry_ref}/quarantine)
func (_ Unimplemented) DeleteQuarantineFilePath(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params DeleteQuarantineFilePathParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// quarantineFilePath
// (PUT /registry/{registry_ref}/quarantine)
func (_ Unimplemented) QuarantineFilePath(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) {
w.WriteHeader(http.StatusNotImplemented)
}
// ListWebhooks
// (GET /registry/{registry_ref}/webhooks)
func (_ Unimplemented) ListWebhooks(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, params ListWebhooksParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// CreateWebhook
// (POST /registry/{registry_ref}/webhooks)
func (_ Unimplemented) CreateWebhook(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam) {
w.WriteHeader(http.StatusNotImplemented)
}
// DeleteWebhook
// (DELETE /registry/{registry_ref}/webhooks/{webhook_identifier})
func (_ Unimplemented) DeleteWebhook(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, webhookIdentifier WebhookIdentifierPathParam) {
w.WriteHeader(http.StatusNotImplemented)
}
// GetWebhook
// (GET /registry/{registry_ref}/webhooks/{webhook_identifier})
func (_ Unimplemented) GetWebhook(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, webhookIdentifier WebhookIdentifierPathParam) {
w.WriteHeader(http.StatusNotImplemented)
}
// UpdateWebhook
// (PUT /registry/{registry_ref}/webhooks/{webhook_identifier})
func (_ Unimplemented) UpdateWebhook(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, webhookIdentifier WebhookIdentifierPathParam) {
w.WriteHeader(http.StatusNotImplemented)
}
// ListWebhookExecutions
// (GET /registry/{registry_ref}/webhooks/{webhook_identifier}/executions)
func (_ Unimplemented) ListWebhookExecutions(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, webhookIdentifier WebhookIdentifierPathParam, params ListWebhookExecutionsParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// GetWebhookExecution
// (GET /registry/{registry_ref}/webhooks/{webhook_identifier}/executions/{webhook_execution_id})
func (_ Unimplemented) GetWebhookExecution(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, webhookIdentifier WebhookIdentifierPathParam, webhookExecutionId WebhookExecutionIdPathParam) {
w.WriteHeader(http.StatusNotImplemented)
}
// ReTriggerWebhookExecution
// (GET /registry/{registry_ref}/webhooks/{webhook_identifier}/executions/{webhook_execution_id}/retrigger)
func (_ Unimplemented) ReTriggerWebhookExecution(w http.ResponseWriter, r *http.Request, registryRef RegistryRefPathParam, webhookIdentifier WebhookIdentifierPathParam, webhookExecutionId WebhookExecutionIdPathParam) {
w.WriteHeader(http.StatusNotImplemented)
}
// List replication rules
// (GET /replication/rules)
func (_ Unimplemented) ListReplicationRules(w http.ResponseWriter, r *http.Request, params ListReplicationRulesParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Create a replication rule
// (POST /replication/rules)
func (_ Unimplemented) CreateReplicationRule(w http.ResponseWriter, r *http.Request, params CreateReplicationRuleParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Delete a replication rule
// (DELETE /replication/rules/{id})
func (_ Unimplemented) DeleteReplicationRule(w http.ResponseWriter, r *http.Request, id string) {
w.WriteHeader(http.StatusNotImplemented)
}
// Get a replication rule
// (GET /replication/rules/{id})
func (_ Unimplemented) GetReplicationRule(w http.ResponseWriter, r *http.Request, id string) {
w.WriteHeader(http.StatusNotImplemented)
}
// Update a replication rule
// (PUT /replication/rules/{id})
func (_ Unimplemented) UpdateReplicationRule(w http.ResponseWriter, r *http.Request, id string) {
w.WriteHeader(http.StatusNotImplemented)
}
// List migration images
// (GET /replication/rules/{id}/migration/images)
func (_ Unimplemented) ListMigrationImages(w http.ResponseWriter, r *http.Request, id string, params ListMigrationImagesParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// Get migration logs for an image
// (GET /replication/rules/{id}/migration/images/{image_id}/logs)
func (_ Unimplemented) GetMigrationLogsForImage(w http.ResponseWriter, r *http.Request, id string, imageId string) {
w.WriteHeader(http.StatusNotImplemented)
}
// Start migration
// (POST /replication/rules/{id}/migration/start)
func (_ Unimplemented) StartMigration(w http.ResponseWriter, r *http.Request, id string) {
w.WriteHeader(http.StatusNotImplemented)
}
// Stop migration
// (POST /replication/rules/{id}/migration/stop)
func (_ Unimplemented) StopMigration(w http.ResponseWriter, r *http.Request, id string) {
w.WriteHeader(http.StatusNotImplemented)
}
// Get artifact stats
// (GET /spaces/{space_ref}/artifact/stats)
func (_ Unimplemented) GetArtifactStatsForSpace(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetArtifactStatsForSpaceParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// List artifacts
// (GET /spaces/{space_ref}/artifacts)
func (_ Unimplemented) GetAllArtifacts(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetAllArtifactsParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// List registries
// (GET /spaces/{space_ref}/registries)
func (_ Unimplemented) GetAllRegistries(w http.ResponseWriter, r *http.Request, spaceRef SpaceRefPathParam, params GetAllRegistriesParams) {
w.WriteHeader(http.StatusNotImplemented)
}
// ServerInterfaceWrapper converts contexts to parameters.
type ServerInterfaceWrapper struct {
Handler ServerInterface
HandlerMiddlewares []MiddlewareFunc
ErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error)
}
type MiddlewareFunc func(http.Handler) http.Handler
// CreateRegistry operation middleware
func (siw *ServerInterfaceWrapper) CreateRegistry(w http.ResponseWriter, r *http.Request) {
var err error
// Parameter object where we will unmarshal all parameters from the context
var params CreateRegistryParams
// ------------- Required query parameter "space_ref" -------------
if paramValue := r.URL.Query().Get("space_ref"); paramValue != "" {
} else {
siw.ErrorHandlerFunc(w, r, &RequiredParamError{ParamName: "space_ref"})
return
}
err = runtime.BindQueryParameter("form", true, true, "space_ref", r.URL.Query(), ¶ms.SpaceRef)
if err != nil {
siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "space_ref", Err: err})
return
}
handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
siw.Handler.CreateRegistry(w, r, params)
}))
for _, middleware := range siw.HandlerMiddlewares {
handler = middleware(handler)
}
handler.ServeHTTP(w, r)
}
// DeleteRegistry operation middleware
func (siw *ServerInterfaceWrapper) DeleteRegistry(w http.ResponseWriter, r *http.Request) {
var err error
// ------------- Path parameter "registry_ref" -------------
var registryRef RegistryRefPathParam
err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true})
if err != nil {
siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err})
return
}
handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
siw.Handler.DeleteRegistry(w, r, registryRef)
}))
for _, middleware := range siw.HandlerMiddlewares {
handler = middleware(handler)
}
handler.ServeHTTP(w, r)
}
// GetRegistry operation middleware
func (siw *ServerInterfaceWrapper) GetRegistry(w http.ResponseWriter, r *http.Request) {
var err error
// ------------- Path parameter "registry_ref" -------------
var registryRef RegistryRefPathParam
err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true})
if err != nil {
siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err})
return
}
handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
siw.Handler.GetRegistry(w, r, registryRef)
}))
for _, middleware := range siw.HandlerMiddlewares {
handler = middleware(handler)
}
handler.ServeHTTP(w, r)
}
// ModifyRegistry operation middleware
func (siw *ServerInterfaceWrapper) ModifyRegistry(w http.ResponseWriter, r *http.Request) {
var err error
// ------------- Path parameter "registry_ref" -------------
var registryRef RegistryRefPathParam
err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true})
if err != nil {
siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err})
return
}
handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
siw.Handler.ModifyRegistry(w, r, registryRef)
}))
for _, middleware := range siw.HandlerMiddlewares {
handler = middleware(handler)
}
handler.ServeHTTP(w, r)
}
// ListArtifactLabels operation middleware
func (siw *ServerInterfaceWrapper) ListArtifactLabels(w http.ResponseWriter, r *http.Request) {
var err error
// ------------- Path parameter "registry_ref" -------------
var registryRef RegistryRefPathParam
err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true})
if err != nil {
siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err})
return
}
// Parameter object where we will unmarshal all parameters from the context
var params ListArtifactLabelsParams
// ------------- Optional query parameter "page" -------------
err = runtime.BindQueryParameter("form", true, false, "page", r.URL.Query(), ¶ms.Page)
if err != nil {
siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "page", Err: err})
return
}
// ------------- Optional query parameter "size" -------------
err = runtime.BindQueryParameter("form", true, false, "size", r.URL.Query(), ¶ms.Size)
if err != nil {
siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "size", Err: err})
return
}
// ------------- Optional query parameter "search_term" -------------
err = runtime.BindQueryParameter("form", true, false, "search_term", r.URL.Query(), ¶ms.SearchTerm)
if err != nil {
siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "search_term", Err: err})
return
}
handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
siw.Handler.ListArtifactLabels(w, r, registryRef, params)
}))
for _, middleware := range siw.HandlerMiddlewares {
handler = middleware(handler)
}
handler.ServeHTTP(w, r)
}
// GetArtifactStatsForRegistry operation middleware
func (siw *ServerInterfaceWrapper) GetArtifactStatsForRegistry(w http.ResponseWriter, r *http.Request) {
var err error
// ------------- Path parameter "registry_ref" -------------
var registryRef RegistryRefPathParam
err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true})
if err != nil {
siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err})
return
}
// Parameter object where we will unmarshal all parameters from the context
var params GetArtifactStatsForRegistryParams
// ------------- Optional query parameter "from" -------------
err = runtime.BindQueryParameter("form", true, false, "from", r.URL.Query(), ¶ms.From)
if err != nil {
siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "from", Err: err})
return
}
// ------------- Optional query parameter "to" -------------
err = runtime.BindQueryParameter("form", true, false, "to", r.URL.Query(), ¶ms.To)
if err != nil {
siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "to", Err: err})
return
}
handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
siw.Handler.GetArtifactStatsForRegistry(w, r, registryRef, params)
}))
for _, middleware := range siw.HandlerMiddlewares {
handler = middleware(handler)
}
handler.ServeHTTP(w, r)
}
// DeleteArtifact operation middleware
func (siw *ServerInterfaceWrapper) DeleteArtifact(w http.ResponseWriter, r *http.Request) {
var err error
// ------------- Path parameter "registry_ref" -------------
var registryRef RegistryRefPathParam
err = runtime.BindStyledParameterWithOptions("simple", "registry_ref", chi.URLParam(r, "registry_ref"), ®istryRef, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true})
if err != nil {
siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "registry_ref", Err: err})
return
}
// ------------- Path parameter "artifact" -------------
var artifact ArtifactPathParam
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | true |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/common/url_utils.go | registry/app/common/url_utils.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import (
"context"
"net/url"
"strings"
"github.com/rs/zerolog/log"
)
func GenerateOciTokenURL(registryURL string) string {
return registryURL + "/v2/token"
}
func GenerateSetupClientHostnameAndRegistry(registryURL string) (hostname string, registryRef string) {
regURL, err := url.Parse(registryURL)
if err != nil {
return "", ""
}
return regURL.Host, strings.Trim(regURL.Path, "/")
}
func GetHost(ctx context.Context, urlStr string) string {
if !strings.Contains(urlStr, "://") {
urlStr = "https://" + urlStr
}
u, err := url.Parse(urlStr)
if err != nil {
log.Ctx(ctx).Warn().Msgf("Failed to parse URL: %s", urlStr)
return ""
}
return u.Host
}
func GetHostName(ctx context.Context, urlStr string) string {
if !strings.Contains(urlStr, "://") {
urlStr = "https://" + urlStr
}
u, err := url.Parse(urlStr)
if err != nil {
log.Ctx(ctx).Warn().Msgf("Failed to parse URL: %s", urlStr)
return ""
}
return u.Hostname()
}
func TrimURLScheme(urlStr string) string {
u, err := url.Parse(urlStr)
if err != nil {
// Return the original URL if parsing fails
return urlStr
}
// Clear the scheme
u.Scheme = ""
// Reconstruct the URL string without the scheme
return strings.TrimPrefix(u.String(), "//")
}
func ExtractFirstQueryParams(queryParams url.Values) map[string]string {
queryMap := make(map[string]string)
for key, values := range queryParams {
if len(values) > 0 {
queryMap[key] = values[0]
}
}
return queryMap
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/common/lib/authorizer.go | registry/app/common/lib/authorizer.go | // Source: https://github.com/goharbor/harbor
// Copyright 2016 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lib
import (
"github.com/harness/gitness/registry/app/common/http/modifier"
)
// Authorizer authorizes the request.
type Authorizer modifier.Modifier
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/common/lib/link.go | registry/app/common/lib/link.go | // Source: https://github.com/goharbor/harbor
// Copyright 2016 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lib
import (
"fmt"
"strings"
)
// Link defines the model that describes the HTTP link header.
type Link struct {
URL string
Rel string
Attrs map[string]string
}
// String returns the string representation of a link.
func (l *Link) String() string {
s := fmt.Sprintf("<%s>", l.URL)
if len(l.Rel) > 0 {
s = fmt.Sprintf(`%s; rel="%s"`, s, l.Rel)
}
for key, value := range l.Attrs {
s = fmt.Sprintf(`%s; %s="%s"`, s, key, value)
}
return s
}
// Links is a link object array.
type Links []*Link
// String returns the string representation of links.
func (l Links) String() string {
var strs []string
for _, link := range l {
strs = append(strs, link.String())
}
return strings.Join(strs, " , ")
}
// ParseLinks parses the link header into Links
// e.g. <http://example.com/TheBook/chapter2>; rel="previous";
// title="previous chapter" , <http://example.com/TheBook/chapter4>; rel="next"; title="next chapter".
func ParseLinks(str string) Links {
var links Links
for lk := range strings.SplitSeq(str, ",") {
link := &Link{
Attrs: map[string]string{},
}
for attr := range strings.SplitSeq(lk, ";") {
attr = strings.TrimSpace(attr)
if len(attr) == 0 {
continue
}
if attr[0] == '<' && attr[len(attr)-1] == '>' {
link.URL = attr[1 : len(attr)-1]
continue
}
parts := strings.SplitN(attr, "=", 2)
key := parts[0]
value := ""
if len(parts) == 2 {
value = strings.Trim(parts[1], `"`)
}
if key == "rel" {
link.Rel = value
} else {
link.Attrs[key] = value
}
}
if len(link.URL) == 0 {
continue
}
links = append(links, link)
}
return links
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/common/lib/errors/errors.go | registry/app/common/lib/errors/errors.go | // Source: https://github.com/goharbor/harbor
// Copyright 2016 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
import (
"encoding/json"
"errors"
"fmt"
"github.com/rs/zerolog/log"
)
var (
// As alias function of `errors.As`.
As = errors.As
// Is alias function of `errors.Is`.
Is = errors.Is
)
// Error ...
type Error struct {
Cause error `json:"-"`
Code string `json:"code"`
Message string `json:"message"`
Stack *stack `json:"-"`
}
// Error returns a human readable error, error.Error() will not
// contains the track information. Needs it? just call error.StackTrace()
// Code will not be in the error output.
func (e *Error) Error() string {
out := e.Message
if e.Cause != nil {
out = out + ": " + e.Cause.Error()
}
return out
}
// StackTrace ...
func (e *Error) StackTrace() string {
return e.Stack.frames().format()
}
// MarshalJSON ...
func (e *Error) MarshalJSON() ([]byte, error) {
return json.Marshal(
&struct {
Code string `json:"code"`
Message string `json:"message"`
}{
Code: e.Code,
Message: e.Error(),
},
)
}
// WithMessage ...
func (e *Error) WithMessage(format string, v ...any) *Error {
e.Message = fmt.Sprintf(format, v...)
return e
}
// WithCode ...
func (e *Error) WithCode(code string) *Error {
e.Code = code
return e
}
// WithCause ...
func (e *Error) WithCause(err error) *Error {
e.Cause = err
return e
}
// Unwrap ...
func (e *Error) Unwrap() error { return e.Cause }
// Errors ...
type Errors []error
var _ error = Errors{}
// Error converts slice of error.
func (errs Errors) Error() string {
var tmpErrs struct {
Errors []Error `json:"errors,omitempty"`
}
for _, e := range errs {
var err *Error
ok := errors.As(e, &err)
if !ok {
err = UnknownError(e)
}
if err.Code == "" {
err.Code = GeneralCode
}
tmpErrs.Errors = append(tmpErrs.Errors, *err)
}
msg, err := json.Marshal(tmpErrs)
if err != nil {
log.Error().Stack().Err(err).Msg("")
return "{}"
}
return string(msg)
}
// Len returns the current number of errors.
func (errs Errors) Len() int {
return len(errs)
}
// NewErrs ...
func NewErrs(err error) Errors {
return Errors{err}
}
// New ...
func New(in any) *Error {
var err error
switch in := in.(type) {
case error:
err = in
default:
err = fmt.Errorf("%v", in)
}
return &Error{
Message: err.Error(),
Stack: newStack(),
}
}
// Wrap ...
func Wrap(err error, message string) *Error {
if err == nil {
return nil
}
e := &Error{
Cause: err,
Message: message,
Stack: newStack(),
}
return e
}
// Wrapf ...
func Wrapf(err error, format string, args ...any) *Error {
if err == nil {
return nil
}
e := &Error{
Cause: err,
Message: fmt.Sprintf(format, args...),
Stack: newStack(),
}
return e
}
// Errorf ...
func Errorf(format string, args ...any) *Error {
return &Error{
Message: fmt.Sprintf(format, args...),
Stack: newStack(),
}
}
// IsErr checks whether the err chain contains error matches the code.
func IsErr(err error, code string) bool {
var e *Error
if As(err, &e) {
return e.Code == code
}
return false
}
// ErrCode returns code of err.
func ErrCode(err error) string {
if err == nil {
return ""
}
var e *Error
if ok := As(err, &e); ok && e.Code != "" {
return e.Code
} else if ok && e.Cause != nil {
return ErrCode(e.Cause)
}
return GeneralCode
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/common/lib/errors/stack.go | registry/app/common/lib/errors/stack.go | // Source: https://github.com/goharbor/harbor
// Copyright 2016 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
import (
"fmt"
"runtime"
"strings"
)
const maxDepth = 50
type stack []uintptr
func (s *stack) frames() StackFrames {
var stackFrames StackFrames
frames := runtime.CallersFrames(*s)
for {
frame, next := frames.Next()
// filter out runtime
if !strings.Contains(frame.File, "runtime/") {
stackFrames = append(stackFrames, frame)
}
if !next {
break
}
}
return stackFrames
}
// newStack ...
func newStack() *stack {
var pcs [maxDepth]uintptr
n := runtime.Callers(3, pcs[:])
var st stack = pcs[0:n]
return &st
}
// StackFrames ...
type StackFrames []runtime.Frame
// Output: <File>:<Line>, <Method>.
func (frames StackFrames) format() string {
var msg string
for _, frame := range frames {
msg += fmt.Sprintf("\n%v:%v, %v", frame.File, frame.Line, frame.Function)
}
return msg
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/common/lib/errors/const.go | registry/app/common/lib/errors/const.go | // Source: https://github.com/goharbor/harbor
// Copyright 2016 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
const (
// NotFoundCode is code for the error of no object found.
NotFoundCode = "NOT_FOUND"
// ConflictCode ...
ConflictCode = "CONFLICT"
// UnAuthorizedCode ...
UnAuthorizedCode = "UNAUTHORIZED"
// BadRequestCode ...
BadRequestCode = "BAD_REQUEST"
// ForbiddenCode ...
ForbiddenCode = "FORBIDDEN"
// MethodNotAllowedCode ...
MethodNotAllowedCode = "METHOD_NOT_ALLOWED"
// RateLimitCode.
RateLimitCode = "TOO_MANY_REQUEST"
// PreconditionCode ...
PreconditionCode = "PRECONDITION"
// GeneralCode ...
GeneralCode = "UNKNOWN"
// DENIED it's used by middleware(readonly, vul and content trust)
// and returned to docker client to index the request is denied.
DENIED = "DENIED"
// PROJECTPOLICYVIOLATION ...
PROJECTPOLICYVIOLATION = "PROJECTPOLICYVIOLATION"
// ViolateForeignKeyConstraintCode is the error code for violating foreign key constraint error.
ViolateForeignKeyConstraintCode = "VIOLATE_FOREIGN_KEY_CONSTRAINT"
// DIGESTINVALID ...
DIGESTINVALID = "DIGEST_INVALID"
// MANIFESTINVALID ...
MANIFESTINVALID = "MANIFEST_INVALID"
// UNSUPPORTED is for digest UNSUPPORTED error.
UNSUPPORTED = "UNSUPPORTED"
)
// NotFoundError is error for the case of object not found.
func NotFoundError(err error) *Error {
return New("resource not found").WithCode(NotFoundCode).WithCause(err)
}
// UnknownError ...
func UnknownError(err error) *Error {
return New("unknown").WithCode(GeneralCode).WithCause(err)
}
// IsNotFoundErr returns true when the error is NotFoundError.
func IsNotFoundErr(err error) bool {
return IsErr(err, NotFoundCode)
}
// IsRateLimitError checks whether the err chains contains rate limit error.
func IsRateLimitError(err error) bool {
return IsErr(err, RateLimitCode)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/common/lib/errors/stack_test.go | registry/app/common/lib/errors/stack_test.go | // Source: https://github.com/goharbor/harbor
// Copyright 2016 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
import (
"testing"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/suite"
)
type stackTestSuite struct {
suite.Suite
}
func (c *stackTestSuite) SetupTest() {}
func (c *stackTestSuite) TestFrame() {
stack := newStack()
frames := stack.frames()
c.Equal(len(frames), 4)
log.Info().Msg(frames.format())
}
func (c *stackTestSuite) TestFormat() {
stack := newStack()
frames := stack.frames()
c.Contains(frames[len(frames)-1].Function, "testing.tRunner")
}
func TestStackTestSuite(t *testing.T) {
suite.Run(t, &stackTestSuite{})
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/common/http/transport_test.go | registry/app/common/http/transport_test.go | // Source: https://github.com/goharbor/harbor
// Copyright 2016 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package http
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetHTTPTransport(t *testing.T) {
transport := GetHTTPTransport()
assert.Equal(t, secureHTTPTransport, transport, "Transport should be secure")
transport = GetHTTPTransport(WithInsecure(true))
assert.Equal(t, insecureHTTPTransport, transport, "Transport should be insecure")
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/common/http/transport.go | registry/app/common/http/transport.go | // Source: https://github.com/goharbor/harbor
// Copyright 2016 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package http
import (
"crypto/tls"
"net"
"net/http"
"time"
)
const (
// InsecureTransport used to get the insecure http Transport.
InsecureTransport = iota
// SecureTransport used to get the external secure http Transport.
SecureTransport
)
var (
secureHTTPTransport http.RoundTripper
insecureHTTPTransport http.RoundTripper
)
func init() {
insecureHTTPTransport = NewTransport(WithInsecureSkipVerify(true))
if InternalTLSEnabled() {
secureHTTPTransport = NewTransport(WithInternalTLSConfig())
} else {
secureHTTPTransport = NewTransport()
}
}
// Use this instead of Default Transport in library because it sets ForceAttemptHTTP2 to true
// And that options introduced in go 1.13 will cause the https requests hang forever in replication environment.
func newDefaultTransport() *http.Transport {
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
TLSClientConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
},
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
}
// WithInternalTLSConfig returns a TransportOption that configures the transport to use the internal TLS configuration.
func WithInternalTLSConfig() func(*http.Transport) {
return func(tr *http.Transport) {
tlsConfig, err := GetInternalTLSConfig()
if err != nil {
panic(err)
}
tr.TLSClientConfig = tlsConfig
}
}
// WithInsecureSkipVerify returns a TransportOption that configures the
// transport to skip verification of the server's certificate.
func WithInsecureSkipVerify(skipVerify bool) func(*http.Transport) {
return func(tr *http.Transport) {
tr.TLSClientConfig.InsecureSkipVerify = skipVerify
}
}
// WithMaxIdleConns returns a TransportOption that configures the
// transport to use the specified number of idle connections per host.
func WithMaxIdleConns(maxIdleConns int) func(*http.Transport) {
return func(tr *http.Transport) {
tr.MaxIdleConns = maxIdleConns
}
}
// WithIdleconnectionTimeout returns a TransportOption that configures
// the transport to use the specified idle connection timeout.
func WithIdleconnectionTimeout(idleConnectionTimeout time.Duration) func(*http.Transport) {
return func(tr *http.Transport) {
tr.IdleConnTimeout = idleConnectionTimeout
}
}
// NewTransport returns a new http.Transport with the specified options.
func NewTransport(opts ...func(*http.Transport)) http.RoundTripper {
tr := newDefaultTransport()
for _, opt := range opts {
opt(tr)
}
return tr
}
// TransportConfig is the configuration for http transport.
type TransportConfig struct {
Insecure bool
}
// TransportOption is the option for http transport.
type TransportOption func(*TransportConfig)
// WithInsecure returns a TransportOption that configures the
// transport to skip verification of the server's certificate.
func WithInsecure(skipVerify bool) TransportOption {
return func(cfg *TransportConfig) {
cfg.Insecure = skipVerify
}
}
// GetHTTPTransport returns HttpTransport based on insecure configuration.
func GetHTTPTransport(opts ...TransportOption) http.RoundTripper {
cfg := &TransportConfig{}
for _, opt := range opts {
opt(cfg)
}
if cfg.Insecure {
return insecureHTTPTransport
}
return secureHTTPTransport
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/common/http/tls.go | registry/app/common/http/tls.go | // Source: https://github.com/goharbor/harbor
// Copyright 2016 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package http
import (
"crypto/tls"
"fmt"
"os"
"strings"
)
const (
// Internal TLS ENV.
internalTLSEnable = "GITNESS_INTERNAL_TLS_ENABLED"
internalVerifyClientCert = "GITNESS_INTERNAL_VERIFY_CLIENT_CERT"
internalTLSKeyPath = "GITNESS_INTERNAL_TLS_KEY_PATH"
internalTLSCertPath = "GITNESS_INTERNAL_TLS_CERT_PATH"
)
// InternalTLSEnabled returns true if internal TLS enabled.
func InternalTLSEnabled() bool {
return strings.ToLower(os.Getenv(internalTLSEnable)) == "true"
}
// InternalEnableVerifyClientCert returns true if mTLS enabled.
func InternalEnableVerifyClientCert() bool {
return strings.ToLower(os.Getenv(internalVerifyClientCert)) == "true"
}
// GetInternalCertPair used to get internal cert and key pair from environment.
func GetInternalCertPair() (tls.Certificate, error) {
crtPath := os.Getenv(internalTLSCertPath)
keyPath := os.Getenv(internalTLSKeyPath)
return tls.LoadX509KeyPair(crtPath, keyPath)
}
// GetInternalTLSConfig return a tls.Config for internal https communicate.
func GetInternalTLSConfig() (*tls.Config, error) {
// genrate key pair
cert, err := GetInternalCertPair()
if err != nil {
return nil, fmt.Errorf("internal TLS enabled but can't get cert file %w", err)
}
return &tls.Config{
Certificates: []tls.Certificate{cert},
MinVersion: tls.VersionTLS12,
}, nil
}
// NewServerTLSConfig returns a modern tls config,
// refer to https://blog.cloudflare.com/exposing-go-on-the-internet/
func NewServerTLSConfig() *tls.Config {
return &tls.Config{
PreferServerCipherSuites: true,
CurvePreferences: []tls.CurveID{
tls.CurveP256,
tls.X25519,
},
MinVersion: tls.VersionTLS12,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
},
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/common/http/modifier/modifier.go | registry/app/common/http/modifier/modifier.go | // Source: https://github.com/goharbor/harbor
// Copyright 2016 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package modifier
import (
"net/http"
)
// Modifier modifies request.
type Modifier interface {
Modify(*http.Request) error
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/events/asyncprocessing/wire.go | registry/app/events/asyncprocessing/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package asyncprocessing
import (
"github.com/harness/gitness/events"
"github.com/harness/gitness/registry/app/store"
"github.com/harness/gitness/store/database/dbtx"
)
func ProvideReaderFactory(eventsSystem *events.System) (*events.ReaderFactory[*Reader], error) {
return NewReaderFactory(eventsSystem)
}
func ProvideAsyncProcessingReporter(
tx dbtx.Transactor,
eventsSystem *events.System,
taskRepository store.TaskRepository,
taskSourceRepository store.TaskSourceRepository,
taskEventRepository store.TaskEventRepository,
) (*Reporter, error) {
reporter, err := NewReporter(tx, eventsSystem, taskRepository, taskSourceRepository, taskEventRepository)
if err != nil {
return nil, err
}
return reporter, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/events/asyncprocessing/reader.go | registry/app/events/asyncprocessing/reader.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package asyncprocessing
import (
"github.com/harness/gitness/events"
)
func NewReaderFactory(eventsSystem *events.System) (*events.ReaderFactory[*Reader], error) {
readerFactoryFunc := func(innerReader *events.GenericReader) (*Reader, error) {
return &Reader{
innerReader: innerReader,
}, nil
}
return events.NewReaderFactory(eventsSystem, RegistryAsyncProcessing, readerFactoryFunc)
}
// Reader is the event reader for this package.
// It exposes typesafe event registration methods for all events by this package.
// NOTE: Event registration methods are in the event's dedicated file.
type Reader struct {
innerReader *events.GenericReader
}
func (r *Reader) Configure(opts ...events.ReaderOption) {
r.innerReader.Configure(opts...)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/events/asyncprocessing/reporter.go | registry/app/events/asyncprocessing/reporter.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package asyncprocessing
import (
"errors"
"github.com/harness/gitness/events"
"github.com/harness/gitness/registry/app/store"
"github.com/harness/gitness/store/database/dbtx"
)
const RegistryAsyncProcessing = "registry-async-postprocessing"
// Reporter is the event reporter for this package.
// It exposes typesafe send methods for all events of this package.
// NOTE: Event send methods are in the event's dedicated file.
type Reporter struct {
tx dbtx.Transactor
innerReporter *events.GenericReporter
TaskRepository store.TaskRepository
TaskSourceRepository store.TaskSourceRepository
TaskEventRepository store.TaskEventRepository
}
func NewReporter(
tx dbtx.Transactor,
eventsSystem *events.System,
taskRepository store.TaskRepository,
taskSourceRepository store.TaskSourceRepository,
taskEventRepository store.TaskEventRepository,
) (*Reporter, error) {
innerReporter, err := events.NewReporter(eventsSystem, RegistryAsyncProcessing)
if err != nil {
return nil, errors.New("failed to create new GenericReporter for registry async processing from event system")
}
return &Reporter{
tx: tx,
innerReporter: innerReporter,
TaskRepository: taskRepository,
TaskSourceRepository: taskSourceRepository,
TaskEventRepository: taskEventRepository,
}, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/events/asyncprocessing/events.go | registry/app/events/asyncprocessing/events.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package asyncprocessing
import (
"context"
"encoding/json"
"fmt"
"github.com/harness/gitness/app/api/request"
"github.com/harness/gitness/events"
"github.com/harness/gitness/registry/types"
"github.com/rs/zerolog/log"
)
const ExecuteAsyncTask events.EventType = "execute_async_task"
type ExecuteAsyncTaskPayload struct {
TaskKey string `json:"task_key"` //nolint:tagliatelle
}
func (r *Reporter) BuildRegistryIndex(ctx context.Context, registryID int64, sources []types.SourceRef) {
session, _ := request.AuthSessionFrom(ctx)
principalID := session.Principal.ID
r.BuildRegistryIndexWithPrincipal(ctx, registryID, sources, principalID)
}
func (r *Reporter) BuildRegistryIndexWithPrincipal(
ctx context.Context,
registryID int64,
sources []types.SourceRef,
principalID int64,
) {
key := fmt.Sprintf("registry_%d", registryID)
payload, err := json.Marshal(&types.BuildRegistryIndexTaskPayload{
Key: key,
RegistryID: registryID,
PrincipalID: principalID,
})
if err != nil {
log.Ctx(ctx).Err(err).Msgf("failed to send execute async task event")
}
task := &types.Task{
Key: key,
Kind: types.TaskKindBuildRegistryIndex,
Payload: payload,
}
sources = append(sources, types.SourceRef{Type: types.SourceTypeRegistry, ID: registryID})
err = r.UpsertAndSendEvent(ctx, task, sources)
if err != nil {
log.Ctx(ctx).Err(err).Msgf("failed to send execute async task event")
}
}
func (r *Reporter) BuildPackageIndex(ctx context.Context, registryID int64, image string) {
session, _ := request.AuthSessionFrom(ctx)
principalID := session.Principal.ID
r.BuildPackageIndexWithPrincipal(ctx, registryID, image, principalID)
}
func (r *Reporter) BuildPackageIndexWithPrincipal(
ctx context.Context, registryID int64, image string, principalID int64,
) {
key := fmt.Sprintf("package_%d_%s", registryID, image)
payload, err := json.Marshal(&types.BuildPackageIndexTaskPayload{
Key: key,
RegistryID: registryID,
Image: image,
PrincipalID: principalID,
})
if err != nil {
log.Ctx(ctx).Err(err).Msgf("failed to send execute async task event")
}
task := &types.Task{
Key: key,
Kind: types.TaskKindBuildPackageIndex,
Payload: payload,
}
sources := make([]types.SourceRef, 0)
sources = append(sources, types.SourceRef{Type: types.SourceTypeRegistry, ID: registryID})
err = r.UpsertAndSendEvent(ctx, task, sources)
if err != nil {
log.Ctx(ctx).Err(err).Msgf("failed to send execute async task event")
}
}
func (r *Reporter) BuildPackageMetadata(
ctx context.Context, registryID int64, image string, version string,
) {
session, _ := request.AuthSessionFrom(ctx)
principalID := session.Principal.ID
r.BuildPackageMetadataWithPrincipal(ctx, registryID, image, version, principalID)
}
func (r *Reporter) BuildPackageMetadataWithPrincipal(
ctx context.Context, registryID int64, image string,
version string, principalID int64,
) {
key := fmt.Sprintf("package_%d_%s_%s_metadata", registryID, image, version)
payload, err := json.Marshal(&types.BuildPackageMetadataTaskPayload{
Key: key,
RegistryID: registryID,
Image: image,
Version: version,
PrincipalID: principalID,
})
if err != nil {
log.Ctx(ctx).Err(err).Msgf("failed to send execute async task event")
}
task := &types.Task{
Key: key,
Kind: types.TaskKindBuildPackageMetadata,
Payload: payload,
}
sources := make([]types.SourceRef, 0)
sources = append(sources, types.SourceRef{Type: types.SourceTypeRegistry, ID: registryID})
err = r.UpsertAndSendEvent(ctx, task, sources)
if err != nil {
log.Ctx(ctx).Err(err).Msgf("failed to send execute async task event")
}
}
func (r *Reporter) UpsertAndSendEvent(
ctx context.Context,
task *types.Task,
sources []types.SourceRef,
) error {
shouldEnqueue, err := r.upsertTask(ctx, task, sources)
if err == nil && shouldEnqueue {
payload := &ExecuteAsyncTaskPayload{
TaskKey: task.Key,
}
eventID, err := events.ReporterSendEvent(r.innerReporter, ctx, ExecuteAsyncTask, payload)
if err != nil {
log.Ctx(ctx).Err(err).Msgf("failed to send execute async task event")
return err
}
log.Ctx(ctx).Debug().Msgf("reported execute async task event with id '%s'", eventID)
}
return err
}
//nolint:nestif
func (r *Reporter) upsertTask(ctx context.Context, task *types.Task, sources []types.SourceRef) (bool, error) {
shouldEnqueue := false
err := r.tx.WithTx(
ctx, func(ctx context.Context) error {
err := r.TaskRepository.UpsertTask(ctx, task)
if err != nil {
return fmt.Errorf("failed to upsert task: %w", err)
}
status, err := r.TaskRepository.LockForUpdate(ctx, task)
if err != nil {
return fmt.Errorf("failed to lock task %s for update: %w", task.Key, err)
}
for _, src := range sources {
err = r.TaskSourceRepository.InsertSource(ctx, task.Key, src)
if err != nil {
return fmt.Errorf("failed to insert source %s for task %s: %w", src.Type, task.Key, err)
}
}
if status == types.TaskStatusProcessing {
err = r.TaskRepository.SetRunAgain(ctx, task.Key, true)
if err != nil {
return fmt.Errorf("failed to set task %s to run again: %w", task.Key, err)
}
err = r.TaskEventRepository.LogTaskEvent(ctx, task.Key, "merged", task.Payload)
if err != nil {
log.Ctx(ctx).Error().Msgf("failed to log task event for task %s: %v", task.Key, err)
}
} else {
err = r.TaskRepository.UpdateStatus(ctx, task.Key, types.TaskStatusPending)
if err != nil {
return fmt.Errorf("failed to update task %s status to pending: %w", task.Key, err)
}
err = r.TaskEventRepository.LogTaskEvent(ctx, task.Key, "enqueued", task.Payload)
if err != nil {
return fmt.Errorf("failed to log task event for task %s: %w", task.Key, err)
}
shouldEnqueue = true
}
return nil
})
return shouldEnqueue, err
}
func (r *Reader) RegisterExecuteAsyncTask(
fn events.HandlerFunc[*ExecuteAsyncTaskPayload],
opts ...events.HandlerOption,
) error {
return events.ReaderRegisterEvent(r.innerReader, ExecuteAsyncTask, fn, opts...)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/events/artifact/wire.go | registry/app/events/artifact/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package artifact
import (
"encoding/gob"
"github.com/harness/gitness/events"
"github.com/google/wire"
)
func ProvideReaderFactory(eventsSystem *events.System) (*events.ReaderFactory[*Reader], error) {
return NewReaderFactory(eventsSystem)
}
func ProvideArtifactReporter(eventsSystem *events.System) (*Reporter, error) {
reporter, err := NewReporter(eventsSystem)
if err != nil {
return nil, err
}
gob.Register(&CommonArtifact{})
gob.Register(&DockerArtifact{})
gob.Register(&HelmArtifact{})
return reporter, nil
}
var WireSet = wire.NewSet(
ProvideReaderFactory,
ProvideArtifactReporter,
)
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/events/artifact/artifacts.go | registry/app/events/artifact/artifacts.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package artifact
//nolint:revive
import (
"context"
"github.com/harness/gitness/events"
"github.com/harness/gitness/registry/app/api/openapi/contracts/artifact"
"github.com/rs/zerolog/log"
)
const ArtifactsCategory = "artifacts"
const ArtifactCreatedEvent events.EventType = "artifact-created"
const ArtifactDeletedEvent events.EventType = "artifact-deleted"
//nolint:revive
type ArtifactCreatedPayload struct {
RegistryID int64 `json:"registry_id"`
PrincipalID int64 `json:"principal_id"`
ArtifactType artifact.PackageType `json:"artifact_type"`
Artifact Artifact `json:"artifact"`
}
type Artifact interface {
GetInfo() string
}
type BaseArtifact struct {
Name string `json:"name"`
Ref string `json:"ref"`
}
type DockerArtifact struct {
BaseArtifact
URL string `json:"url"`
Tag string `json:"tag"`
Digest string `json:"digest"`
}
func (a *DockerArtifact) GetInfo() string {
return a.Ref
}
type HelmArtifact struct {
BaseArtifact
URL string `json:"url"`
Tag string `json:"tag"`
Digest string `json:"digest"`
}
func (a *HelmArtifact) GetInfo() string {
return a.Ref
}
type CommonArtifact struct {
BaseArtifact
Type artifact.PackageType `json:"package_type"`
Version string `json:"version"`
}
func (a *CommonArtifact) GetInfo() string {
return a.Ref
}
//nolint:revive
type ArtifactInfo struct {
Type artifact.PackageType `json:"type"`
Name string `json:"name"`
Version string `json:"version"`
Artifact any `json:"artifact"`
}
func (r *Reporter) ArtifactCreated(ctx context.Context, payload *ArtifactCreatedPayload) {
eventID, err := events.ReporterSendEvent(r.innerReporter, ctx, ArtifactCreatedEvent, payload)
if err != nil {
log.Ctx(ctx).Err(err).Msgf("failed to send artifact-created created event")
return
}
log.Ctx(ctx).Debug().Msgf("reported artifact-created event with id '%s'", eventID)
}
func (r *Reader) RegisterArtifactCreated(
fn events.HandlerFunc[*ArtifactCreatedPayload],
opts ...events.HandlerOption,
) error {
return events.ReaderRegisterEvent(r.innerReader, ArtifactCreatedEvent, fn, opts...)
}
//nolint:revive
type ArtifactDeletedPayload struct {
RegistryID int64 `json:"registry_id"`
PrincipalID int64 `json:"principal_id"`
ArtifactType artifact.PackageType `json:"artifact_type"`
Artifact Artifact `json:"artifact"`
}
func (r *Reporter) ArtifactDeleted(ctx context.Context, payload *ArtifactDeletedPayload) {
eventID, err := events.ReporterSendEvent(r.innerReporter, ctx, ArtifactDeletedEvent, payload)
if err != nil {
log.Ctx(ctx).Err(err).Msgf("failed to send artifact deleted event")
return
}
log.Ctx(ctx).Debug().Msgf("reported artifact deleted event with id '%s'", eventID)
}
func (r *Reader) RegisterArtifactDeleted(
fn events.HandlerFunc[*ArtifactDeletedPayload],
opts ...events.HandlerOption,
) error {
return events.ReaderRegisterEvent(r.innerReader, ArtifactDeletedEvent, fn, opts...)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/events/artifact/reader.go | registry/app/events/artifact/reader.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package artifact
import (
"github.com/harness/gitness/events"
)
func NewReaderFactory(eventsSystem *events.System) (*events.ReaderFactory[*Reader], error) {
readerFactoryFunc := func(innerReader *events.GenericReader) (*Reader, error) {
return &Reader{
innerReader: innerReader,
}, nil
}
return events.NewReaderFactory(eventsSystem, ArtifactsCategory, readerFactoryFunc)
}
// Reader is the event reader for this package.
// It exposes typesafe event registration methods for all events by this package.
// NOTE: Event registration methods are in the event's dedicated file.
type Reader struct {
innerReader *events.GenericReader
}
func (r *Reader) Configure(opts ...events.ReaderOption) {
r.innerReader.Configure(opts...)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/events/artifact/reporter.go | registry/app/events/artifact/reporter.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package artifact
import (
"errors"
"github.com/harness/gitness/events"
)
// Reporter is the event reporter for this package.
// It exposes typesafe send methods for all events of this package.
// NOTE: Event send methods are in the event's dedicated file.
type Reporter struct {
innerReporter *events.GenericReporter
}
func NewReporter(eventsSystem *events.System) (*Reporter, error) {
innerReporter, err := events.NewReporter(eventsSystem, ArtifactsCategory)
if err != nil {
return nil, errors.New("failed to create new GenericReporter from event system")
}
return &Reporter{
innerReporter: innerReporter,
}, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/events/replication/wire.go | registry/app/events/replication/wire.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package replication
import (
"github.com/harness/gitness/events"
)
func ProvideReplicationReporter(
eventsSystem *events.System,
) (Reporter, error) {
r, err := NewReporter(eventsSystem)
if err != nil {
return nil, err
}
return r, nil
}
func ProvideNoOpReplicationReporter() (Reporter, error) {
return &Noop{}, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/events/replication/replication.go | registry/app/events/replication/replication.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package replication
import "github.com/harness/gitness/events"
const RegistryBlobCreatedEvent events.EventType = "registry-blob-created"
type BlobAction string
type Provider string
type CloudLocation struct {
Provider Provider `json:"provider,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Region string `json:"region,omitempty"`
Bucket string `json:"bucket,omitempty"`
}
// ReplicationDetails represents the ReplicationDetails message from the proto file.
type ReplicationDetails struct {
AccountID string `json:"account_id,omitempty"`
Action BlobAction `json:"action,omitempty"`
BlobID int64 `json:"blob_id,omitempty"`
GenericBlobID string `json:"generic_blob_id,omitempty"`
Path string `json:"path,omitempty"`
Source CloudLocation `json:"source"`
Destinations []CloudLocation `json:"destinations,omitempty"`
}
const (
BlobCreate BlobAction = "BlobCreate"
BlobDelete BlobAction = "BlobDelete"
)
const (
CLOUDFLARE Provider = "CLOUDFLARE"
GCS Provider = "GCS"
)
var ProviderValue = map[string]Provider{
"CLOUDFLARE": CLOUDFLARE,
"GCS": GCS,
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/events/replication/reporter.go | registry/app/events/replication/reporter.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package replication
import (
"context"
"errors"
"strings"
"github.com/harness/gitness/events"
a "github.com/harness/gitness/registry/app/api/openapi/contracts/artifact"
s "github.com/harness/gitness/registry/app/storage"
"github.com/harness/gitness/types"
"github.com/rs/zerolog/log"
)
const RegistryBlobsReplication = "registry-blobs-replication"
type Reporter interface {
ReportEventAsync(
ctx context.Context,
accountID string,
action BlobAction,
blobID int64,
genericBlobID string,
sha256 string,
conf *types.Config,
destinationBuckets []CloudLocation,
)
}
type reporter struct {
innerReporter *events.GenericReporter
}
func NewReporter(
eventsSystem *events.System,
) (Reporter, error) {
innerReporter, err := events.NewReporter(eventsSystem, RegistryBlobsReplication)
if err != nil {
return nil, errors.New("failed to create new GenericReporter for registry blobs replication from event system")
}
return &reporter{
innerReporter: innerReporter,
}, nil
}
func (r reporter) ReportEventAsync(
ctx context.Context,
accountID string,
action BlobAction,
blobID int64,
genericBlobID string,
sha256 string,
conf *types.Config,
destinationBuckets []CloudLocation,
) {
var path string
var err error
switch {
case blobID != 0:
path, err = s.BlobPath(accountID, string(a.PackageTypeDOCKER), sha256)
case genericBlobID != "":
path, err = s.BlobPath(accountID, string(a.PackageTypeGENERIC), sha256)
default:
err = errors.New("blobID or genericBlobID must be set")
}
if err != nil {
log.Ctx(ctx).Error().
Err(err).
Int64("blobID", blobID).
Str("genericBlobID", genericBlobID).
Str("action", string(action)).
Msg("Failed to determine blob path for event reporting")
return
}
source := CloudLocation{
Provider: ProviderValue[strings.ToUpper(conf.Registry.Storage.S3Storage.Provider)],
Endpoint: conf.Registry.Storage.S3Storage.RegionEndpoint,
Region: conf.Registry.Storage.S3Storage.Region,
Bucket: conf.Registry.Storage.S3Storage.Bucket,
}
destinations := destinationBuckets
if len(destinations) == 0 {
return
}
go func() {
eventID, err := events.ReporterSendEvent(r.innerReporter, ctx, RegistryBlobCreatedEvent, &ReplicationDetails{
AccountID: accountID,
Action: action,
BlobID: blobID,
GenericBlobID: genericBlobID,
Path: path,
Source: source,
Destinations: destinations,
})
if err != nil {
log.Ctx(ctx).Err(err).Msgf("failed to send blob replication created event")
return
}
log.Ctx(ctx).Debug().Msgf("reported blob replication event with id '%s'", eventID)
}()
}
type Noop struct {
}
func (*Noop) ReportEventAsync(
_ context.Context, _ string, _ BlobAction, _ int64, _ string, _ string, _ *types.Config, _ []CloudLocation,
) {
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/errcode/errors.go | registry/app/dist_temp/errcode/errors.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errcode
import (
"encoding/json"
"fmt"
"strings"
)
// ErrorCoder is the base interface for ErrorCode and Error allowing
// users of each to just call ErrorCode to get the real ID of each.
type ErrorCoder interface {
ErrorCode() CodeError
}
// CodeError represents the error type. The errors are serialized via strings
// and the integer format may change and should *never* be exported.
type CodeError int
var _ error = CodeError(0)
// ErrorCode just returns itself.
func (ec CodeError) ErrorCode() CodeError {
return ec
}
// Error returns the ID/Value.
func (ec CodeError) Error() string {
return strings.ToLower(strings.ReplaceAll(ec.String(), "_", " "))
}
// Descriptor returns the descriptor for the error code.
func (ec CodeError) Descriptor() ErrorDescriptor {
d, ok := errorCodeToDescriptors[ec]
if !ok {
return ErrCodeUnknown.Descriptor()
}
return d
}
// String returns the canonical identifier for this error code.
func (ec CodeError) String() string {
return ec.Descriptor().Value
}
// Message returned the human-readable error message for this error code.
func (ec CodeError) Message() string {
return ec.Descriptor().Message
}
// MarshalText encodes the receiver into UTF-8-encoded text and returns the
// result.
func (ec CodeError) MarshalText() (text []byte, err error) {
return []byte(ec.String()), nil
}
// UnmarshalText decodes the form generated by MarshalText.
func (ec *CodeError) UnmarshalText(text []byte) error {
desc, ok := idToDescriptors[string(text)]
if !ok {
desc = ErrCodeUnknown.Descriptor()
}
*ec = desc.Code
return nil
}
// WithMessage creates a new Error struct based on the passed-in info and
// overrides the Message property.
func (ec CodeError) WithMessage(message string) Error {
return Error{
Code: ec,
Message: message,
}
}
// WithDetail creates a new Error struct based on the passed-in info and
// set the Detail property appropriately.
func (ec CodeError) WithDetail(detail any) Error {
return Error{
Code: ec,
Message: ec.Message(),
}.WithDetail(detail)
}
// WithArgs creates a new Error struct and sets the Args slice.
func (ec CodeError) WithArgs(args ...any) Error {
return Error{
Code: ec,
Message: ec.Message(),
}.WithArgs(args...)
}
// Error provides a wrapper around ErrorCode with extra Details provided.
type Error struct {
Code CodeError `json:"code"`
Message string `json:"message"`
Detail any `json:"detail,omitempty"`
}
var _ error = Error{}
// ErrorCode returns the ID/Value of this Error.
func (e Error) ErrorCode() CodeError {
return e.Code
}
// Error returns a human readable representation of the error.
func (e Error) Error() string {
return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
}
// WithDetail will return a new Error, based on the current one, but with
// some Detail info added.
func (e Error) WithDetail(detail any) Error {
return Error{
Code: e.Code,
Message: e.Message,
Detail: detail,
}
}
// WithArgs uses the passed-in list of interface{} as the substitution
// variables in the Error's Message string, but returns a new Error.
func (e Error) WithArgs(args ...any) Error {
return Error{
Code: e.Code,
Message: fmt.Sprintf(e.Code.Message(), args...),
Detail: e.Detail,
}
}
// ErrorDescriptor provides relevant information about a given error code.
type ErrorDescriptor struct {
// Code is the error code that this descriptor describes.
Code CodeError
// Value provides a unique, string key, often captilized with
// underscores, to identify the error code. This value is used as the
// keyed value when serializing api errors.
Value string
// Message is a short, human readable description of the error condition
// included in API responses.
Message string
// Description provides a complete account of the errors purpose, suitable
// for use in documentation.
Description string
// HTTPStatusCode provides the http status code that is associated with
// this error condition.
HTTPStatusCode int
}
// ParseErrorCode returns the value by the string error code.
// `ErrorCodeUnknown` will be returned if the error is not known.
func ParseErrorCode(value string) CodeError {
ed, ok := idToDescriptors[value]
if ok {
return ed.Code
}
return ErrCodeUnknown
}
// Errors provides the envelope for multiple errors and a few sugar methods
// for use within the application.
type Errors []error
var _ error = Errors{}
func (errs Errors) Error() string {
switch len(errs) {
case 0:
return "<nil>"
case 1:
return errs[0].Error()
default:
msg := "errors:\n"
for _, err := range errs {
msg += err.Error() + "\n"
}
return msg
}
}
// Len returns the current number of errors.
func (errs Errors) Len() int {
return len(errs)
}
// MarshalJSON converts slice of error, ErrorCode or Error into a
// slice of Error - then serializes.
func (errs Errors) MarshalJSON() ([]byte, error) {
var tmpErrs struct {
Errors []Error `json:"errors,omitempty"`
}
for _, daErr := range errs {
var err Error
switch daErr := daErr.(type) {
case CodeError:
err = daErr.WithDetail(nil)
case Error:
err = daErr
default:
err = ErrCodeUnknown.WithDetail(daErr)
}
// If the Error struct was setup and they forgot to set the
// Message field (meaning its "") then grab it from the ErrCode
msg := err.Message
if msg == "" {
msg = err.Code.Message()
}
tmpErr := Error{
Code: err.Code,
Message: msg,
Detail: err.Detail,
}
// if the detail contains error extract the error message
// otherwise json.Marshal will not serialize it at all
// https://github.com/golang/go/issues/10748
if detail, ok := tmpErr.Detail.(error); ok {
tmpErr.Detail = detail.Error()
}
tmpErrs.Errors = append(tmpErrs.Errors, tmpErr)
}
return json.Marshal(tmpErrs)
}
// UnmarshalJSON deserializes []Error and then converts it into slice of
// Error or ErrorCode.
func (errs *Errors) UnmarshalJSON(data []byte) error {
var tmpErrs struct {
Errors []Error
}
if err := json.Unmarshal(data, &tmpErrs); err != nil {
return err
}
var newErrs Errors
for _, daErr := range tmpErrs.Errors {
// If Message is empty or exactly matches the Code's message string
// then just use the Code, no need for a full Error struct
if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
// Error's w/o details get converted to ErrorCode
newErrs = append(newErrs, daErr.Code)
} else {
// Error's w/ details are untouched
newErrs = append(
newErrs, Error{
Code: daErr.Code,
Message: daErr.Message,
Detail: daErr.Detail,
},
)
}
}
*errs = newErrs
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/errcode/register.go | registry/app/dist_temp/errcode/register.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errcode
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"os"
"sort"
"sync"
"syscall"
storagedriver "github.com/harness/gitness/registry/app/driver"
"github.com/jackc/pgerrcode"
"github.com/jackc/pgx/v5/pgconn"
"google.golang.org/api/googleapi"
)
var (
errorCodeToDescriptors = map[CodeError]ErrorDescriptor{}
idToDescriptors = map[string]ErrorDescriptor{}
groupToDescriptors = map[string][]ErrorDescriptor{}
)
var (
// ErrCodeUnknown is a generic error that can be used as a last
// resort if there is no situation-specific error message that can be used.
ErrCodeUnknown = register(
"errcode", ErrorDescriptor{
Value: "UNKNOWN",
Message: "unknown error",
Description: `Generic error returned when the error does not have an
API classification.`,
HTTPStatusCode: http.StatusInternalServerError,
},
)
// ErrCodeUnsupported is returned when an operation is not supported.
ErrCodeUnsupported = register(
"errcode", ErrorDescriptor{
Value: "UNSUPPORTED",
Message: "The operation is unsupported.",
Description: `The operation was unsupported due to a missing
implementation or invalid set of parameters.`,
HTTPStatusCode: http.StatusMethodNotAllowed,
},
)
// ErrCodeUnauthorized is returned if a request requires
// authentication.
ErrCodeUnauthorized = register(
"errcode", ErrorDescriptor{
Value: "UNAUTHORIZED",
Message: "authentication required",
Description: `The access controller was unable to authenticate
the client. Often this will be accompanied by a
Www-Authenticate HTTP response header indicating how to
authenticate.`,
HTTPStatusCode: http.StatusUnauthorized,
},
)
// ErrCodeDenied is returned if a client does not have sufficient
// permission to perform an action.
ErrCodeDenied = register(
"errcode", ErrorDescriptor{
Value: "DENIED",
Message: "requested access to the resource is denied",
Description: `The access controller denied access for the
operation on a resource.`,
HTTPStatusCode: http.StatusForbidden,
},
)
// ErrCodeUnavailable provides a common error to report unavailability
// of a service or endpoint.
ErrCodeUnavailable = register(
"errcode", ErrorDescriptor{
Value: "UNAVAILABLE",
Message: "service unavailable",
Description: "Returned when a service is not available",
HTTPStatusCode: http.StatusServiceUnavailable,
},
)
// ErrCodeTooManyRequests is returned if a client attempts too many
// times to contact a service endpoint.
ErrCodeTooManyRequests = register(
"errcode", ErrorDescriptor{
Value: "TOOMANYREQUESTS",
Message: "too many requests",
Description: `Returned when a client attempts to contact a
service too many times`,
HTTPStatusCode: http.StatusTooManyRequests,
},
)
// ErrCodeConnectionReset provides an error to report a client dropping the
// connection.
ErrCodeConnectionReset = register(
"errcode", ErrorDescriptor{
Value: "CONNECTIONRESET",
Message: "connection reset by peer",
Description: "Returned when the client closes the connection unexpectedly",
// 400 is the most fitting error code in the HTTP spec, 499 is used by
// nginx (and within this project as well), and is specific to this scenario,
// but it is preferable to stay within the spec.
HTTPStatusCode: http.StatusBadRequest,
},
)
// ErrCodeRequestCanceled provides an error to report a canceled request. This is usually due to a
// context.Canceled error.
ErrCodeRequestCanceled = register(
"errcode", ErrorDescriptor{
Value: "REQUESTCANCELED",
Message: "request canceled",
Description: "Returned when the client cancels the request",
HTTPStatusCode: http.StatusBadRequest,
},
)
// ErrCodeInvalidRequest provides an error when the request is invalid.
ErrCodeInvalidRequest = register(
"errcode", ErrorDescriptor{
Value: "INVALID REQUEST",
Message: "invalid request",
Description: "Returned when the request is invalid",
HTTPStatusCode: http.StatusBadRequest,
},
)
)
const errGroup = "registry.api.v2"
var (
// ErrCodeDigestInvalid is returned when uploading a blob if the
// provided digest does not match the blob contents.
ErrCodeDigestInvalid = register(
errGroup, ErrorDescriptor{
Value: "DIGEST_INVALID",
Message: "provided digest did not match uploaded content",
Description: `When a blob is uploaded, the registry will check that
the content matches the digest provided by the client. The error may
include a detail structure with the key "digest", including the
invalid digest string. This error may also be returned when a manifest
includes an invalid layer digest.`,
HTTPStatusCode: http.StatusBadRequest,
},
)
// ErrCodeSizeInvalid is returned when uploading a blob if the provided.
ErrCodeSizeInvalid = register(
errGroup, ErrorDescriptor{
Value: "SIZE_INVALID",
Message: "provided length did not match content length",
Description: `When a layer is uploaded, the provided size will be
checked against the uploaded content. If they do not match, this error
will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
},
)
// ErrCodeRangeInvalid is returned when uploading a blob if the provided
// content range is invalid.
ErrCodeRangeInvalid = register(
errGroup, ErrorDescriptor{
Value: "RANGE_INVALID",
Message: "invalid content range",
Description: `When a layer is uploaded, the provided range is checked
against the uploaded chunk. This error is returned if the range is
out of order.`,
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
},
)
// ErrCodeNameInvalid is returned when the name in the manifest does not
// match the provided name.
ErrCodeNameInvalid = register(
errGroup, ErrorDescriptor{
Value: "NAME_INVALID",
Message: "invalid repository name",
Description: `Invalid repository name encountered either during
manifest validation or any API operation.`,
HTTPStatusCode: http.StatusBadRequest,
},
)
// ErrCodeTagInvalid is returned when the tag in the manifest does not
// match the provided tag.
ErrCodeTagInvalid = register(
errGroup, ErrorDescriptor{
Value: "TAG_INVALID",
Message: "manifest tag did not match URI",
Description: `During a manifest upload, if the tag in the manifest
does not match the uri tag, this error will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
},
)
// ErrCodeNameUnknown when the repository name is not known.
ErrCodeNameUnknown = register(
errGroup, ErrorDescriptor{
Value: "NAME_UNKNOWN",
Message: "repository name not known to registry",
Description: `This is returned if the name used during an operation is
unknown to the registry.`,
HTTPStatusCode: http.StatusNotFound,
},
)
// ErrCodeManifestUnknown returned when image manifest is unknown.
ErrCodeManifestUnknown = register(
errGroup, ErrorDescriptor{
Value: "MANIFEST_UNKNOWN",
Message: "manifest unknown",
Description: `This error is returned when the manifest, identified by
name and tag is unknown to the repository.`,
HTTPStatusCode: http.StatusNotFound,
},
)
// ErrCodeManifestQuarantined returned when image manifest is quarantined.
ErrCodeManifestQuarantined = register(
errGroup, ErrorDescriptor{
Value: "ARTIFACT_QUARANTINED",
Message: "artifact quarantined",
Description: `This error is returned when the manifest, identified by
name or tag is quarantined`,
HTTPStatusCode: http.StatusForbidden,
},
)
// ErrCodeManifestReferencedInList is returned when attempting to delete a manifest that is still referenced by at
// least one manifest list.
ErrCodeManifestReferencedInList = register(
errGroup, ErrorDescriptor{
Value: "MANIFEST_REFERENCED",
Message: "manifest referenced by a manifest list",
Description: `The manifest is still referenced by at least one manifest list and therefore the delete cannot
proceed.`,
HTTPStatusCode: http.StatusConflict,
},
)
// ErrCodeManifestInvalid returned when an image manifest is invalid,
// typically during a PUT operation. This error encompasses all errors
// encountered during manifest validation that aren't signature errors.
ErrCodeManifestInvalid = register(
errGroup, ErrorDescriptor{
Value: "MANIFEST_INVALID",
Message: "manifest invalid",
Description: `During upload, manifests undergo several checks ensuring
validity. If those checks fail, this error may be returned, unless a
more specific error is included. The detail will contain information
the failed validation.`,
HTTPStatusCode: http.StatusBadRequest,
},
)
// ErrCodeManifestUnverified is returned when the manifest fails
// signature verification.
ErrCodeManifestUnverified = register(
errGroup, ErrorDescriptor{
Value: "MANIFEST_UNVERIFIED",
Message: "manifest failed signature verification",
Description: `During manifest upload, if the manifest fails signature
verification, this error will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
},
)
// ErrCodeManifestReferenceLimit is returned when a manifest has more
// references than the configured limit.
ErrCodeManifestReferenceLimit = register(
errGroup, ErrorDescriptor{
Value: "MANIFEST_REFERENCE_LIMIT",
Message: "too many manifest references",
Description: `This error may be returned when a manifest references more than
the configured limit allows.`,
HTTPStatusCode: http.StatusBadRequest,
},
)
// ErrCodeManifestPayloadSizeLimit is returned when a manifest payload is
// bigger than the configured limit.
ErrCodeManifestPayloadSizeLimit = register(
errGroup, ErrorDescriptor{
Value: "MANIFEST_SIZE_LIMIT",
Message: "payload size limit exceeded",
Description: `This error may be returned when a manifest payload size is bigger than
the configured limit allows.`,
HTTPStatusCode: http.StatusBadRequest,
},
)
// ErrCodeManifestBlobUnknown is returned when a manifest blob is
// unknown to the registry.
ErrCodeManifestBlobUnknown = register(
errGroup, ErrorDescriptor{
Value: "MANIFEST_BLOB_UNKNOWN",
Message: "blob unknown to registry",
Description: `This error may be returned when a manifest blob is
unknown to the registry.`,
HTTPStatusCode: http.StatusBadRequest,
},
)
// ErrCodeBlobUnknown is returned when a blob is unknown to the
// registry. This can happen when the manifest references a nonexistent
// layer or the result is not found by a blob fetch.
ErrCodeBlobUnknown = register(
errGroup, ErrorDescriptor{
Value: "BLOB_UNKNOWN",
Message: "blob unknown to registry",
Description: `This error may be returned when a blob is unknown to the
registry in a specified repository. This can be returned with a
standard get or if a manifest references an unknown layer during
upload.`,
HTTPStatusCode: http.StatusNotFound,
},
)
// ErrCodeBlobUploadUnknown is returned when an upload is unknown.
ErrCodeBlobUploadUnknown = register(
errGroup, ErrorDescriptor{
Value: "BLOB_UPLOAD_UNKNOWN",
Message: "blob upload unknown to registry",
Description: `If a blob upload has been cancelled or was never
started, this error code may be returned.`,
HTTPStatusCode: http.StatusNotFound,
},
)
// ErrCodeBlobUploadInvalid is returned when an upload is invalid.
ErrCodeBlobUploadInvalid = register(
errGroup, ErrorDescriptor{
Value: "BLOB_UPLOAD_INVALID",
Message: "blob upload invalid",
Description: `The blob upload encountered an error and can no
longer proceed.`,
HTTPStatusCode: http.StatusNotFound,
},
)
// ErrCodePaginationNumberInvalid is returned when the `n` parameter is
// not an integer, or `n` is negative.
ErrCodePaginationNumberInvalid = register(
errGroup, ErrorDescriptor{
Value: "PAGINATION_NUMBER_INVALID",
Message: "invalid number of results requested",
Description: `Returned when the "n" parameter (number of results
to return) is not an integer, "n" is negative or "n" is bigger than
the maximum allowed.`,
HTTPStatusCode: http.StatusBadRequest,
},
)
)
const gitnessErrGroup = "gitness.api.v1"
var (
ErrCodeRootNotFound = register(
gitnessErrGroup, ErrorDescriptor{
Value: "ROOT_NOT_FOUND",
Message: "Root not found",
Description: "The root does not exist",
HTTPStatusCode: http.StatusNotFound,
},
)
ErrCodeParentNotFound = register(
gitnessErrGroup, ErrorDescriptor{
Value: "PARENT_NOT_FOUND",
Message: "Parent not found",
Description: "The parent does not exist",
HTTPStatusCode: http.StatusNotFound,
},
)
ErrCodeRegNotFound = register(
gitnessErrGroup, ErrorDescriptor{
Value: "REGISTRY_NOT_FOUND",
Message: "registry not found",
Description: "The registry does not exist",
HTTPStatusCode: http.StatusNotFound,
},
)
)
var (
nextCode = 1000
registerLock sync.Mutex
)
// Register will make the passed-in error known to the environment and
// return a new ErrorCode.
func Register(group string, descriptor ErrorDescriptor) CodeError {
return register(group, descriptor)
}
// register will make the passed-in error known to the environment and
// return a new ErrorCode.
func register(group string, descriptor ErrorDescriptor) CodeError {
registerLock.Lock()
defer registerLock.Unlock()
descriptor.Code = CodeError(nextCode)
if _, ok := idToDescriptors[descriptor.Value]; ok {
panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value))
}
if _, ok := errorCodeToDescriptors[descriptor.Code]; ok {
panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code))
}
groupToDescriptors[group] = append(groupToDescriptors[group], descriptor)
errorCodeToDescriptors[descriptor.Code] = descriptor
idToDescriptors[descriptor.Value] = descriptor
nextCode++
return descriptor.Code
}
type byValue []ErrorDescriptor
func (a byValue) Len() int { return len(a) }
func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
// GetGroupNames returns the list of Error group names that are registered.
func GetGroupNames() []string {
keys := []string{}
for k := range groupToDescriptors {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
// GetErrorCodeGroup returns the named group of error descriptors.
func GetErrorCodeGroup(name string) []ErrorDescriptor {
desc := groupToDescriptors[name]
sort.Sort(byValue(desc))
return desc
}
// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
// registered, irrespective of what group they're in.
func GetErrorAllDescriptors() []ErrorDescriptor {
result := []ErrorDescriptor{}
for _, group := range GetGroupNames() {
result = append(result, GetErrorCodeGroup(group)...)
}
sort.Sort(byValue(result))
return result
}
// FromUnknownError will try to parse an unknown error and infer the appropriate Error to use.
func FromUnknownError(err error) Error {
// return if this is an Error already
var e Error
if errors.As(err, &e) {
return e
}
// if this is a storage driver catch-all error (storagedriver.Error), extract the enclosed error
var sdErr storagedriver.Error
if errors.As(err, &sdErr) {
err = sdErr.Detail
}
// use 503 Service Unavailable for network timeout errors
var netError net.Error
if ok := errors.As(err, &netError); ok && netError.Timeout() {
return ErrCodeUnavailable.WithDetail(err)
}
var netOpError *net.OpError
if errors.As(err, &netOpError) {
// use 400 Bad Request if the client drops the connection during the request
var syscallErr *os.SyscallError
if errors.As(err, &syscallErr) && errors.Is(syscallErr.Err, syscall.ECONNRESET) {
return ErrCodeConnectionReset.WithDetail(err)
}
// use 503 Service Unavailable for network connection refused or unknown host errors
return ErrCodeUnavailable.WithDetail(err)
}
// use 400 Bad Request for canceled requests
if errors.Is(err, context.Canceled) {
return ErrCodeRequestCanceled.WithDetail(err)
}
// use 503 Service Unavailable for database connection failures
var pgErr *pgconn.PgError
if errors.As(err, &pgErr) && pgerrcode.IsConnectionException(pgErr.Code) {
return ErrCodeUnavailable.WithDetail(err)
}
// propagate a 503 Service Unavailable status from the storage backends
var gcsErr *googleapi.Error
if errors.As(err, &gcsErr) {
if gcsErr.Code == http.StatusServiceUnavailable {
return ErrCodeUnavailable.WithDetail(gcsErr.Error())
}
}
// otherwise, we're not sure what the error is or how to react, use 500 Internal Server Error
return ErrCodeUnknown.WithDetail(err)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/errcode/handler.go | registry/app/dist_temp/errcode/handler.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errcode
import (
"encoding/json"
"net/http"
)
// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err
// and sets the content-type header to 'application/json'. It will handle
// ErrorCoder and Errors, and if necessary will create an envelope.
func ServeJSON(w http.ResponseWriter, err error) error {
w.Header().Set("Content-Type", "application/json")
var sc int
switch errs := err.(type) {
case Errors:
if len(errs) < 1 {
break
}
if err, ok := errs[0].(ErrorCoder); ok {
sc = err.ErrorCode().Descriptor().HTTPStatusCode
}
case ErrorCoder:
sc = errs.ErrorCode().Descriptor().HTTPStatusCode
err = Errors{err} // create an envelope.
default:
// We just have an unhandled error type, so just place in an envelope
// and move along.
err = Errors{err}
}
if sc == 0 {
sc = http.StatusInternalServerError
}
w.WriteHeader(sc)
return json.NewEncoder(w).Encode(err)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/requestutil/util.go | registry/app/dist_temp/requestutil/util.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package requestutil
import (
"net"
"net/http"
"strings"
"github.com/rs/zerolog/log"
)
func parseIP(ipStr string) net.IP {
ip := net.ParseIP(ipStr)
if ip == nil {
log.Warn().Msgf("invalid remote IP address: %q", ipStr)
}
return ip
}
// RemoteAddr extracts the remote address of the request, taking into
// account proxy headers.
func RemoteAddr(r *http.Request) string {
if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
remoteAddr, _, _ := strings.Cut(prior, ",")
remoteAddr = strings.Trim(remoteAddr, " ")
if parseIP(remoteAddr) != nil {
return remoteAddr
}
}
// X-Real-Ip is less supported, but worth checking in the
// absence of X-Forwarded-For
if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
if parseIP(realIP) != nil {
return realIP
}
}
return r.RemoteAddr
}
// RemoteIP extracts the remote IP of the request, taking into
// account proxy headers.
func RemoteIP(r *http.Request) string {
addr := RemoteAddr(r)
// Try parsing it as "IP:port"
if ip, _, err := net.SplitHostPort(addr); err == nil {
return ip
}
return addr
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/requestutil/util_test.go | registry/app/dist_temp/requestutil/util_test.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package requestutil
import (
"net/http"
"net/http/httptest"
"net/http/httputil"
"net/url"
"testing"
)
// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test
// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten
// at the transport layer to 127.0.0.1:<port> . However, as the X-Forwarded-For header
// just contains the IP address, it is different enough for testing.
func TestRemoteAddr(t *testing.T) {
var expectedRemote string
backend := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
if r.RemoteAddr == expectedRemote {
t.Errorf("Unexpected matching remote addresses")
}
actualRemote := RemoteAddr(r)
if expectedRemote != actualRemote {
t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote)
}
w.WriteHeader(http.StatusOK)
},
),
)
defer backend.Close()
backendURL, err := url.Parse(backend.URL)
if err != nil {
t.Fatal(err)
}
proxy := httputil.NewSingleHostReverseProxy(backendURL)
frontend := httptest.NewServer(proxy)
defer frontend.Close()
// X-Forwarded-For set by proxy
expectedRemote = "127.0.0.1"
//nolint:noctx
proxyReq, err := http.NewRequest(http.MethodGet, frontend.URL, nil)
if err != nil {
t.Fatal(err)
}
resp, err := http.DefaultClient.Do(proxyReq)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
// RemoteAddr in X-Real-Ip
//nolint:noctx
getReq, err := http.NewRequest(http.MethodGet, backend.URL, nil)
if err != nil {
t.Fatal(err)
}
expectedRemote = "1.2.3.4"
getReq.Header["X-Real-ip"] = []string{expectedRemote}
resp, err = http.DefaultClient.Do(getReq)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
// Valid X-Real-Ip and invalid X-Forwarded-For
getReq.Header["X-forwarded-for"] = []string{"1.2.3"}
resp, err = http.DefaultClient.Do(getReq)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/dcontext/trace.go | registry/app/dist_temp/dcontext/trace.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dcontext
import (
"context"
"runtime"
"time"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
)
// WithTrace allocates a traced timing span in a new context. This allows a
// caller to track the time between calling WithTrace and the returned done
// function. When the done function is called, a log message is emitted with a
// "trace.duration" field, corresponding to the elapsed time and a
// "trace.func" field, corresponding to the function that called WithTrace.
//
// The logging keys "trace.id" and "trace.parent.id" are provided to implement
// dapper-like tracing. This function should be complemented with a WithSpan
// method that could be used for tracing distributed RPC calls.
//
// The main benefit of this function is to post-process log messages or
// intercept them in a hook to provide timing data. Trace ids and parent ids
// can also be linked to provide call tracing, if so required.
//
// Here is an example of the usage:
//
// func timedOperation(ctx Context) {
// ctx, done := WithTrace(ctx)
// defer done("this will be the log message")
// // ... function body ...
// }
//
// If the function ran for roughly 1s, such a usage would emit a log message
// as follows:
//
// INFO[0001] this will be the log message trace.duration=1.004575763s
//
// trace.func=github.com/distribution/distribution/context.traceOperation trace.id=<id> ...
//
// Notice that the function name is automatically resolved, along with the
// package and a trace id is emitted that can be linked with parent ids.
func WithTrace(ctx context.Context) (context.Context, func(format string, a ...any)) {
if ctx == nil {
ctx = Background()
}
pc, file, line, _ := runtime.Caller(1)
f := runtime.FuncForPC(pc)
ctx = &traced{
Context: ctx,
id: uuid.NewString(),
start: time.Now(),
parent: GetStringValue(ctx, "trace.id"),
fnname: f.Name(),
file: file,
line: line,
}
return ctx, func(format string, a ...any) {
GetLogger(
ctx, log.Info(),
"trace.duration",
"trace.id",
"trace.parent.id",
"trace.func",
"trace.file",
"trace.line",
).
Msgf(format, a...)
}
}
// traced represents a context that is traced for function call timing. It
// also provides fast lookup for the various attributes that are available on
// the trace.
type traced struct {
context.Context
id string
parent string
start time.Time
fnname string
file string
line int
}
func (ts *traced) Value(key any) any {
switch key {
case "trace.start":
return ts.start
case "trace.duration":
return time.Since(ts.start)
case "trace.id":
return ts.id
case "trace.parent.id":
if ts.parent == "" {
return nil // must return nil to signal no parent.
}
return ts.parent
case "trace.func":
return ts.fnname
case "trace.file":
return ts.file
case "trace.line":
return ts.line
}
return ts.Context.Value(key)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/dcontext/util.go | registry/app/dist_temp/dcontext/util.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dcontext
import (
"context"
"time"
)
// Since looks up key, which should be a time.Time, and returns the duration
// since that time. If the key is not found, the value returned will be zero.
// This is helpful when inferring metrics related to context execution times.
func Since(ctx context.Context, key any) time.Duration {
if startedAt, ok := ctx.Value(key).(time.Time); ok {
return time.Since(startedAt)
}
return 0
}
// GetStringValue returns a string value from the context. The empty string
// will be returned if not found.
func GetStringValue(ctx context.Context, key any) (value string) {
if valuev, ok := ctx.Value(key).(string); ok {
value = valuev
}
return value
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/dcontext/trace_test.go | registry/app/dist_temp/dcontext/trace_test.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dcontext
import (
"runtime"
"testing"
"time"
)
// TestWithTrace ensures that tracing has the expected values in the context.
func TestWithTrace(t *testing.T) {
t.Parallel()
pc, file, _, _ := runtime.Caller(0) // get current caller.
f := runtime.FuncForPC(pc)
base := []valueTestCase{
{
key: "trace.id",
notnilorempty: true,
},
{
key: "trace.file",
expected: file,
notnilorempty: true,
},
{
key: "trace.line",
notnilorempty: true,
},
{
key: "trace.start",
notnilorempty: true,
},
}
ctx, done := WithTrace(Background())
t.Cleanup(func() { done("this will be emitted at end of test") })
tests := base
tests = append(
tests, valueTestCase{
key: "trace.func",
expected: f.Name(),
},
)
for _, tc := range tests {
testCase := tc
t.Run(
testCase.key, func(t *testing.T) {
t.Parallel()
v := ctx.Value(testCase.key)
if testCase.notnilorempty {
if v == nil || v == "" {
t.Fatalf("value was nil or empty: %#v", v)
}
return
}
if v != testCase.expected {
t.Fatalf("unexpected value: %v != %v", v, testCase.expected)
}
},
)
}
tracedFn := func() {
parentID := ctx.Value("trace.id") // ensure the parent trace id is correct.
pc1, _, _, _ := runtime.Caller(0) // get current caller.
f1 := runtime.FuncForPC(pc1)
ctx, done1 := WithTrace(ctx)
defer done1("this should be subordinate to the other trace")
time.Sleep(time.Second)
tests1 := base
tests1 = append(
tests1, valueTestCase{
key: "trace.func",
expected: f1.Name(),
}, valueTestCase{
key: "trace.parent.id",
expected: parentID,
},
)
for _, tc := range tests1 {
testCase := tc
t.Run(
testCase.key, func(t *testing.T) {
t.Parallel()
v := ctx.Value(testCase.key)
if testCase.notnilorempty {
if v == nil || v == "" {
t.Fatalf("value was nil or empty: %#v", v)
}
return
}
if v != testCase.expected {
t.Fatalf("unexpected value: %v != %v", v, testCase.expected)
}
},
)
}
}
tracedFn()
time.Sleep(time.Second)
}
type valueTestCase struct {
key string
expected any
notnilorempty bool // just check not empty/not nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/dcontext/version_test.go | registry/app/dist_temp/dcontext/version_test.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dcontext
import "testing"
func TestVersionContext(t *testing.T) {
ctx := Background()
if GetVersion(ctx) != "" {
t.Fatalf("context should not yet have a version")
}
expected := "2.1-whatever"
ctx = WithVersion(ctx, expected)
version := GetVersion(ctx)
if version != expected {
t.Fatalf("version was not set: %q != %q", version, expected)
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/dcontext/http_test.go | registry/app/dist_temp/dcontext/http_test.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dcontext
import (
"net/http"
"reflect"
"testing"
"time"
)
func TestWithRequest(t *testing.T) {
var req http.Request
start := time.Now()
req.Method = http.MethodGet
req.Host = "example.com"
req.RequestURI = "/test-test"
req.Header = make(http.Header)
req.Header.Set("Referer", "foo.com/referer")
req.Header.Set("User-Agent", "test/0.1")
ctx := WithRequest(Background(), &req)
for _, tc := range []struct {
key string
expected any
}{
{
key: "http.request",
expected: &req,
},
{
key: "http.request.id",
},
{
key: "http.request.method",
expected: req.Method,
},
{
key: "http.request.host",
expected: req.Host,
},
{
key: "http.request.uri",
expected: req.RequestURI,
},
{
key: "http.request.referer",
expected: req.Referer(),
},
{
key: "http.request.useragent",
expected: req.UserAgent(),
},
{
key: "http.request.remoteaddr",
expected: req.RemoteAddr,
},
{
key: "http.request.startedat",
},
} {
v := ctx.Value(tc.key)
if v == nil {
t.Fatalf("value not found for %q", tc.key)
}
if tc.expected != nil && v != tc.expected {
t.Fatalf("%s: %v != %v", tc.key, v, tc.expected)
}
// Key specific checks!
switch tc.key {
case "http.request.id":
if _, ok := v.(string); !ok {
t.Fatalf("request id not a string: %v", v)
}
case "http.request.startedat":
vt, ok := v.(time.Time)
if !ok {
t.Fatalf("value not a time: %v", v)
}
now := time.Now()
if vt.After(now) {
t.Fatalf("time generated too late: %v > %v", vt, now)
}
if vt.Before(start) {
t.Fatalf("time generated too early: %v < %v", vt, start)
}
}
}
}
type testResponseWriter struct {
flushed bool
status int
written int64
header http.Header
}
func (trw *testResponseWriter) Header() http.Header {
if trw.header == nil {
trw.header = make(http.Header)
}
return trw.header
}
func (trw *testResponseWriter) Write(p []byte) (n int, err error) {
if trw.status == 0 {
trw.status = http.StatusOK
}
n = len(p)
trw.written += int64(n)
return
}
func (trw *testResponseWriter) WriteHeader(status int) {
trw.status = status
}
func (trw *testResponseWriter) Flush() {
trw.flushed = true
}
func TestWithResponseWriter(t *testing.T) {
trw := testResponseWriter{}
ctx, rw := WithResponseWriter(Background(), &trw)
if ctx.Value("http.response") != rw {
t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw)
}
grw, err := GetResponseWriter(ctx)
if err != nil {
t.Fatalf("error getting response writer: %v", err)
}
if grw != rw {
t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw)
}
if ctx.Value("http.response.status") != 0 {
t.Fatalf(
"response status should always be a number and should be zero here: %v != 0",
ctx.Value("http.response.status"),
)
}
if n, err := rw.Write(make([]byte, 1024)); err != nil {
t.Fatalf("unexpected error writing: %v", err)
} else if n != 1024 {
t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024)
}
if ctx.Value("http.response.status") != http.StatusOK {
t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK)
}
if ctx.Value("http.response.written") != int64(1024) {
t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024)
}
// Make sure flush propagates
rw.(http.Flusher).Flush() //nolint:errcheck
if !trw.flushed {
t.Fatalf("response writer not flushed")
}
// Write another status and make sure context is correct. This normally
// wouldn't work except for in this contrived testcase.
rw.WriteHeader(http.StatusBadRequest)
if ctx.Value("http.response.status") != http.StatusBadRequest {
t.Fatalf(
"unexpected response status in context: %v != %v",
ctx.Value("http.response.status"),
http.StatusBadRequest,
)
}
}
func TestWithVars(t *testing.T) {
var req http.Request
vars := map[string]string{
"foo": "asdf",
"bar": "qwer",
}
getVarsFromRequest = func(r *http.Request) map[string]string {
if r != &req {
t.Fatalf("unexpected request: %v != %v", r, req)
}
return vars
}
ctx := WithVars(Background(), &req)
for _, tc := range []struct {
key string
expected any
}{
{
key: "vars",
expected: vars,
},
{
key: "vars.foo",
expected: "asdf",
},
{
key: "vars.bar",
expected: "qwer",
},
} {
v := ctx.Value(tc.key)
if !reflect.DeepEqual(v, tc.expected) {
t.Fatalf("%q: %v != %v", tc.key, v, tc.expected)
}
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/dcontext/http.go | registry/app/dist_temp/dcontext/http.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dcontext
import (
"context"
"errors"
"net/http"
"strings"
"sync"
"time"
"github.com/harness/gitness/registry/app/dist_temp/requestutil"
"github.com/google/uuid"
"github.com/gorilla/mux"
"github.com/rs/zerolog"
)
// Common errors used with this package.
var (
ErrNoRequestContext = errors.New("no http request in context")
ErrNoResponseWriterContext = errors.New("no http response in context")
)
// WithRequest places the request on the context. The context of the request
// is assigned a unique id, available at "http.request.id". The request itself
// is available at "http.request". Other common attributes are available under
// the prefix "http.request.". If a request is already present on the context,
// this method will panic.
func WithRequest(ctx context.Context, r *http.Request) context.Context {
if ctx.Value("http.request") != nil {
panic("only one request per context")
}
return &httpRequestContext{
Context: ctx,
startedAt: time.Now(),
id: uuid.NewString(),
r: r,
}
}
// GetRequestID attempts to resolve the current request id, if possible. An
// error is return if it is not available on the context.
func GetRequestID(ctx context.Context) string {
return GetStringValue(ctx, "http.request.id")
}
// WithResponseWriter returns a new context and response writer that makes
// interesting response statistics available within the context.
func WithResponseWriter(ctx context.Context, w http.ResponseWriter) (context.Context, http.ResponseWriter) {
irw := instrumentedResponseWriter{
ResponseWriter: w,
Context: ctx,
}
return &irw, &irw
}
// GetResponseWriter returns the http.ResponseWriter from the provided
// context. If not present, ErrNoResponseWriterContext is returned. The
// returned instance provides instrumentation in the context.
func GetResponseWriter(ctx context.Context) (http.ResponseWriter, error) {
v := ctx.Value("http.response")
rw, ok := v.(http.ResponseWriter)
if !ok || rw == nil {
return nil, ErrNoResponseWriterContext
}
return rw, nil
}
// getVarsFromRequest let's us change request vars implementation for testing
// and maybe future changes.
var getVarsFromRequest = mux.Vars
// WithVars extracts gorilla/mux vars and makes them available on the returned
// context. Variables are available at keys with the prefix "vars.". For
// example, if looking for the variable "name", it can be accessed as
// "vars.name". Implementations that are accessing values need not know that
// the underlying context is implemented with gorilla/mux vars.
func WithVars(ctx context.Context, r *http.Request) context.Context {
return &muxVarsContext{
Context: ctx,
vars: getVarsFromRequest(r),
}
}
// GetResponseLogger reads the current response stats and builds a logger.
// Because the values are read at call time, pushing a logger returned from
// this function on the context will lead to missing or invalid data. Only
// call this at the end of a request, after the response has been written.
func GetResponseLogger(ctx context.Context, l *zerolog.Event) Logger {
logger := getZerologLogger(
ctx, l,
"http.response.written",
"http.response.status",
"http.response.contenttype",
)
duration := Since(ctx, "http.request.startedat")
if duration > 0 {
logger = logger.Str("http.response.duration", duration.String())
}
return logger
}
// httpRequestContext makes information about a request available to context.
type httpRequestContext struct {
context.Context
startedAt time.Time
id string
r *http.Request
}
// Value returns a keyed element of the request for use in the context. To get
// the request itself, query "request". For other components, access them as
// "request.<component>". For example, r.RequestURI.
func (ctx *httpRequestContext) Value(key any) any {
if keyStr, ok := key.(string); ok {
switch keyStr {
case "http.request":
return ctx.r
case "http.request.uri":
return ctx.r.RequestURI
case "http.request.remoteaddr":
return requestutil.RemoteAddr(ctx.r)
case "http.request.method":
return ctx.r.Method
case "http.request.host":
return ctx.r.Host
case "http.request.referer":
referer := ctx.r.Referer()
if referer != "" {
return referer
}
case "http.request.useragent":
return ctx.r.UserAgent()
case "http.request.id":
return ctx.id
case "http.request.startedat":
return ctx.startedAt
case "http.request.contenttype":
if ct := ctx.r.Header.Get("Content-Type"); ct != "" {
return ct
}
default:
// no match; fall back to standard behavior below
}
}
return ctx.Context.Value(key)
}
type muxVarsContext struct {
context.Context
vars map[string]string
}
func (ctx *muxVarsContext) Value(key any) any {
if keyStr, ok := key.(string); ok {
if keyStr == "vars" {
return ctx.vars
}
// We need to check if that's intentional (could be a bug).
if v, ok1 := ctx.vars[strings.TrimPrefix(keyStr, "vars.")]; ok1 {
return v
}
}
return ctx.Context.Value(key)
}
// instrumentedResponseWriter provides response writer information in a
// context. This variant is only used in the case where CloseNotifier is not
// implemented by the parent ResponseWriter.
type instrumentedResponseWriter struct {
http.ResponseWriter
context.Context
mu sync.Mutex
status int
written int64
}
func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
n, err = irw.ResponseWriter.Write(p)
irw.mu.Lock()
irw.written += int64(n)
// Guess the likely status if not set.
if irw.status == 0 {
irw.status = http.StatusOK
}
irw.mu.Unlock()
return
}
func (irw *instrumentedResponseWriter) WriteHeader(status int) {
irw.ResponseWriter.WriteHeader(status)
irw.mu.Lock()
irw.status = status
irw.mu.Unlock()
}
func (irw *instrumentedResponseWriter) Flush() {
if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
flusher.Flush()
}
}
func (irw *instrumentedResponseWriter) Value(key any) any {
if keyStr, ok := key.(string); ok {
switch keyStr {
case "http.response":
return irw
case "http.response.written":
irw.mu.Lock()
defer irw.mu.Unlock()
return irw.written
case "http.response.status":
irw.mu.Lock()
defer irw.mu.Unlock()
return irw.status
case "http.response.contenttype":
if ct := irw.Header().Get("Content-Type"); ct != "" {
return ct
}
default:
// no match; fall back to standard behavior below
}
}
return irw.Context.Value(key)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/dcontext/context.go | registry/app/dist_temp/dcontext/context.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dcontext
import (
"context"
"maps"
"sync"
"github.com/google/uuid"
)
// instanceContext is a context that provides only an instance id. It is
// provided as the main background context.
type instanceContext struct {
context.Context
id string // id of context, logged as "instance.id"
once sync.Once // once protect generation of the id
}
func (ic *instanceContext) Value(key any) any {
if key == "instance.id" {
ic.once.Do(
func() {
// We want to lazy initialize the UUID such that we don't
// call a random generator from the package initialization
// code. For various reasons random could not be available
// https://github.com/distribution/distribution/issues/782
ic.id = uuid.NewString()
},
)
return ic.id
}
return ic.Context.Value(key)
}
var background = &instanceContext{
Context: context.Background(),
}
// Background returns a non-nil, empty Context. The background context
// provides a single key, "instance.id" that is globally unique to the
// process.
func Background() context.Context {
return background
}
// stringMapContext is a simple context implementation that checks a map for a
// key, falling back to a parent if not present.
type stringMapContext struct {
context.Context
m map[string]any
}
// WithValues returns a context that proxies lookups through a map. Only
// supports string keys.
func WithValues(ctx context.Context, m map[string]any) context.Context {
mo := make(map[string]any, len(m)) // make our own copy.
maps.Copy(mo, m)
return stringMapContext{
Context: ctx,
m: mo,
}
}
func (smc stringMapContext) Value(key any) any {
if ks, ok := key.(string); ok {
if v, ok1 := smc.m[ks]; ok1 {
return v
}
}
return smc.Context.Value(key)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/dcontext/version.go | registry/app/dist_temp/dcontext/version.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dcontext
import (
"context"
"github.com/rs/zerolog/log"
)
type versionKey struct{}
func (versionKey) String() string { return "version" }
// WithVersion stores the application version in the context. The new context
// gets a logger to ensure log messages are marked with the application
// version.
func WithVersion(ctx context.Context, version string) context.Context {
ctx = context.WithValue(ctx, versionKey{}, version)
// push a new logger onto the stack
return WithLogger(ctx, GetLogger(ctx, log.Info(), versionKey{}))
}
// GetVersion returns the application version from the context. An empty
// string may returned if the version was not set on the context.
func GetVersion(ctx context.Context) string {
return GetStringValue(ctx, versionKey{})
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/dcontext/doc.go | registry/app/dist_temp/dcontext/doc.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dcontext
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/dcontext/logger.go | registry/app/dist_temp/dcontext/logger.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dcontext
import (
"context"
"fmt"
"runtime"
"github.com/rs/zerolog"
)
// Logger provides a leveled-logging interface.
type Logger interface {
Msgf(format string, v ...any)
Msg(msg string)
}
type loggerKey struct{}
// WithLogger creates a new context with provided logger.
func WithLogger(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, loggerKey{}, logger)
}
// GetLoggerWithFields returns a logger instance with the specified fields
// without affecting the context. Extra specified keys will be resolved from
// the context.
func GetLoggerWithFields(
ctx context.Context, log *zerolog.Event,
fields map[any]any, keys ...any,
) Logger {
logger := getZerologLogger(ctx, log, keys...)
for key, value := range fields {
logger.Interface(fmt.Sprint(key), value)
}
return logger
}
// GetLogger returns the logger from the current context, if present. If one
// or more keys are provided, they will be resolved on the context and
// included in the logger. While context.Value takes an interface, any key
// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
// a logging key field. If context keys are integer constants, for example,
// its recommended that a String method is implemented.
func GetLogger(ctx context.Context, l *zerolog.Event, keys ...any) Logger {
return getZerologLogger(ctx, l, keys...)
}
// getZerologLogger returns the zerolog logger for the context. If one more keys
// are provided, they will be resolved on the context and included in the
// logger. Only use this function if specific zerolog functionality is
// required.
func getZerologLogger(ctx context.Context, l *zerolog.Event, keys ...any) *zerolog.Event {
var logger *zerolog.Event
// Get a logger, if it is present.
loggerInterface := ctx.Value(loggerKey{})
if loggerInterface != nil {
if lgr, ok := loggerInterface.(*zerolog.Event); ok {
logger = lgr
}
}
if logger == nil {
logger = l.Str("go.version", runtime.Version())
// Fill in the instance id, if we have it.
instanceID := ctx.Value("instance.id")
if instanceID != nil {
logger.Interface("instance.id", instanceID)
}
}
for _, key := range keys {
v := ctx.Value(key)
if v != nil {
logger.Interface(fmt.Sprint(key), v)
}
}
return logger
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/challenge/authchallenge_test.go | registry/app/dist_temp/challenge/authchallenge_test.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package challenge
import (
"fmt"
"net/http"
"net/url"
"strings"
"sync"
"testing"
)
func TestAuthChallengeParse(t *testing.T) {
header := http.Header{}
header.Add(
"WWW-Authenticate",
`Bearer realm="https://auth.example.com/token",service="registry.example.com",other=fun,slashed="he\"\l\lo"`,
)
challenges := parseAuthHeader(header)
if len(challenges) != 1 {
t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges))
}
challenge := challenges[0]
if expected := "bearer"; challenge.Scheme != expected {
t.Fatalf("Unexpected scheme: %s, expected: %s", challenge.Scheme, expected)
}
if expected := "https://auth.example.com/token"; challenge.Parameters["realm"] != expected {
t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["realm"], expected)
}
if expected := "registry.example.com"; challenge.Parameters["service"] != expected {
t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["service"], expected)
}
if expected := "fun"; challenge.Parameters["other"] != expected {
t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["other"], expected)
}
if expected := "he\"llo"; challenge.Parameters["slashed"] != expected {
t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected)
}
}
func TestAuthChallengeNormalization(t *testing.T) {
testAuthChallengeNormalization(t, "reg.EXAMPLE.com")
testAuthChallengeNormalization(t, "bɿɒʜɔiɿ-ɿɘƚƨim-ƚol-ɒ-ƨʞnɒʜƚ.com")
testAuthChallengeNormalization(t, "reg.example.com:80")
testAuthChallengeConcurrent(t, "reg.EXAMPLE.com")
}
func testAuthChallengeNormalization(t *testing.T, host string) {
scm := NewSimpleManager()
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))
if err != nil {
t.Fatal(err)
}
resp := &http.Response{
Request: &http.Request{
URL: url,
},
Header: make(http.Header),
StatusCode: http.StatusUnauthorized,
}
resp.Header.Add(
"WWW-Authenticate",
fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host),
)
err = scm.AddResponse(resp)
if err != nil {
t.Fatal(err)
}
lowered := *url
lowered.Host = strings.ToLower(lowered.Host)
lowered.Host = canonicalAddr(&lowered)
c, err := scm.GetChallenges(lowered)
if err != nil {
t.Fatal(err)
}
if len(c) == 0 {
t.Fatal("Expected challenge for lower-cased-host URL")
}
}
func testAuthChallengeConcurrent(t *testing.T, host string) {
scm := NewSimpleManager()
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))
if err != nil {
t.Fatal(err)
}
resp := &http.Response{
Request: &http.Request{
URL: url,
},
Header: make(http.Header),
StatusCode: http.StatusUnauthorized,
}
resp.Header.Add(
"WWW-Authenticate",
fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host),
)
var s sync.WaitGroup
s.Add(2)
go func() {
defer s.Done()
for range 200 {
err = scm.AddResponse(resp)
if err != nil {
t.Error(err)
}
}
}()
go func() {
defer s.Done()
lowered := *url
lowered.Host = strings.ToLower(lowered.Host)
for range 200 {
_, err := scm.GetChallenges(lowered)
if err != nil {
t.Error(err)
}
}
}()
s.Wait()
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/challenge/addr.go | registry/app/dist_temp/challenge/addr.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package challenge
import (
"net/url"
"strings"
)
// FROM: https://golang.org/src/net/http/http.go
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
// return true if the string includes a port.
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
// FROM: http://golang.org/src/net/http/transport.go
var portMap = map[string]string{
"http": "80",
"https": "443",
}
// canonicalAddr returns url.Host but always with a ":port" suffix
// FROM: http://golang.org/src/net/http/transport.go
func canonicalAddr(url *url.URL) string {
addr := url.Host
if !hasPort(addr) {
return addr + ":" + portMap[url.Scheme]
}
return addr
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/dist_temp/challenge/authchallenge.go | registry/app/dist_temp/challenge/authchallenge.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package challenge
import (
"fmt"
"net/http"
"net/url"
"strings"
"sync"
)
// Octet types from RFC 2616.
type octetType byte
var octetTypes [256]octetType
const (
isToken octetType = 1 << iota
isSpace
)
func init() {
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
// CR = <US-ASCII CR, carriage return (13)>
// LF = <US-ASCII LF, linefeed (10)>
// SP = <US-ASCII SP, space (32)>
// HT = <US-ASCII HT, horizontal-tab (9)>
// <"> = <US-ASCII double-quote mark (34)>
// CRLF = CR LF
// LWS = [CRLF] 1*( SP | HT )
// TEXT = <any OCTET except CTLs, but including LWS>
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
// token = 1*<any CHAR except CTLs or separators>
// qdtext = <any TEXT except <">>
for c := range 256 {
var t octetType
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
if strings.ContainsRune(" \t\r\n", rune(c)) {
t |= isSpace
}
if isChar && !isCtl && !isSeparator {
t |= isToken
}
octetTypes[c] = t
}
}
// Challenge carries information from a WWW-Authenticate response header.
// See RFC 2617.
type Challenge struct {
// Scheme is the auth-scheme according to RFC 2617
Scheme string
// Parameters are the auth-params according to RFC 2617
Parameters map[string]string
}
// Manager manages the challenges for endpoints.
// The challenges are pulled out of HTTP responses. Only
// responses which expect challenges should be added to
// the manager, since a non-unauthorized request will be
// viewed as not requiring challenges.
type Manager interface {
// GetChallenges returns the challenges for the given
// endpoint URL.
GetChallenges(endpoint url.URL) ([]Challenge, error)
// AddResponse adds the response to the challenge
// manager. The challenges will be parsed out of
// the WWW-Authenicate headers and added to the
// URL which was produced the response. If the
// response was authorized, any challenges for the
// endpoint will be cleared.
AddResponse(resp *http.Response) error
}
// NewSimpleManager returns an instance of
// Manager which only maps endpoints to challenges
// based on the responses which have been added the
// manager. The simple manager will make no attempt to
// perform requests on the endpoints or cache the responses
// to a backend.
func NewSimpleManager() Manager {
return &simpleManager{
Challenges: make(map[string][]Challenge),
}
}
type simpleManager struct {
sync.RWMutex
Challenges map[string][]Challenge
}
func normalizeURL(endpoint *url.URL) {
endpoint.Host = strings.ToLower(endpoint.Host)
endpoint.Host = canonicalAddr(endpoint)
}
func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
normalizeURL(&endpoint)
m.RLock()
defer m.RUnlock()
challenges := m.Challenges[endpoint.String()]
return challenges, nil
}
func (m *simpleManager) AddResponse(resp *http.Response) error {
challenges := ResponseChallenges(resp)
if resp.Request == nil {
return fmt.Errorf("missing request reference")
}
urlCopy := url.URL{
Path: resp.Request.URL.Path,
Host: resp.Request.URL.Host,
Scheme: resp.Request.URL.Scheme,
}
normalizeURL(&urlCopy)
m.Lock()
defer m.Unlock()
m.Challenges[urlCopy.String()] = challenges
return nil
}
// ResponseChallenges returns a list of authorization challenges
// for the given http Response. Challenges are only checked if
// the response status code was a 401.
func ResponseChallenges(resp *http.Response) []Challenge {
if resp.StatusCode == http.StatusUnauthorized {
// Parse the WWW-Authenticate Header and store the challenges
// on this endpoint object.
return parseAuthHeader(resp.Header)
}
return nil
}
func parseAuthHeader(header http.Header) []Challenge {
challenges := []Challenge{}
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
v, p := parseValueAndParams(h)
if v != "" {
challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
}
}
return challenges
}
func parseValueAndParams(header string) (value string, params map[string]string) {
params = make(map[string]string)
value, s := expectToken(header)
if value == "" {
return
}
value = strings.ToLower(value)
s = "," + skipSpace(s)
for strings.HasPrefix(s, ",") {
var pkey string
pkey, s = expectToken(skipSpace(s[1:]))
if pkey == "" {
return
}
if !strings.HasPrefix(s, "=") {
return
}
var pvalue string
pvalue, s = expectTokenOrQuoted(s[1:])
if pvalue == "" {
return
}
pkey = strings.ToLower(pkey)
params[pkey] = pvalue
s = skipSpace(s)
}
return
}
func skipSpace(s string) (rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isSpace == 0 {
break
}
}
return s[i:]
}
func expectToken(s string) (token, rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isToken == 0 {
break
}
}
return s[:i], s[i:]
}
func expectTokenOrQuoted(s string) (value string, rest string) {
if !strings.HasPrefix(s, "\"") {
return expectToken(s)
}
s = s[1:]
for i := 0; i < len(s); i++ {
switch s[i] {
case '"':
return s[:i], s[i+1:]
case '\\':
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
for i++; i < len(s); i++ {
b := s[i]
switch {
case escape:
escape = false
p[j] = b
j++
case b == '\\':
escape = true
case b == '"':
return string(p[:j]), s[i+1:]
default:
p[j] = b
j++
}
}
return "", ""
}
}
return "", ""
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/event/reporter.go | registry/app/event/reporter.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package event
import (
"context"
"fmt"
"github.com/harness/gitness/registry/app/api/openapi/contracts/artifact"
)
type PackageType int32
type ArtifactDetails struct {
RegistryID int64 `json:"registry_id,omitempty"`
RegistryName string `json:"registry_name,omitempty"`
ImagePath string `json:"image_path,omitempty"` // format = image:tag
PackageType PackageType `json:"package_type,omitempty"`
ManifestID int64 `json:"manifest_id,omitempty"`
}
// PackageType constants using iota.
const (
PackageTypeDOCKER = iota
PackageTypeGENERIC
PackageTypeHELM
PackageTypeMAVEN
)
var PackageTypeValue = map[string]PackageType{
string(artifact.PackageTypeDOCKER): PackageTypeDOCKER,
string(artifact.PackageTypeGENERIC): PackageTypeGENERIC,
string(artifact.PackageTypeHELM): PackageTypeHELM,
string(artifact.PackageTypeMAVEN): PackageTypeMAVEN,
}
// GetPackageTypeFromString returns the PackageType constant corresponding to the given string value.
func GetPackageTypeFromString(value string) (PackageType, error) {
if val, ok := PackageTypeValue[value]; ok {
return val, nil
}
return 0, fmt.Errorf("invalid PackageType string value: %s", value)
}
type Reporter interface {
ReportEvent(
ctx context.Context, payload any, spacePath string,
) // format of spacePath = acctID/orgID/projectID
}
type Noop struct {
}
func (*Noop) ReportEvent(_ context.Context, _ any, _ string) {
// no implementation
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/manifest/descriptor.go | registry/app/manifest/descriptor.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package manifest
import (
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// Descriptor describes targeted content. Used in conjunction with a blob
// store, a descriptor can be used to fetch, store and target any kind of
// blob. The struct also describes the wire protocol format. Fields should
// only be added but never changed.
type Descriptor struct {
// MediaType describe the type of the content. All text based formats are
// encoded as utf-8.
MediaType string `json:"mediaType,omitempty"`
// Digest uniquely identifies the content. A byte stream can be verified
// against this digest.
Digest digest.Digest `json:"digest,omitempty"`
// Size in bytes of content.
Size int64 `json:"size,omitempty"`
// URLs contains the source URLs of this content.
URLs []string `json:"urls,omitempty"`
// Annotations contains arbitrary metadata relating to the targeted content.
Annotations map[string]string `json:"annotations,omitempty"`
// Platform describes the platform which the image in the manifest runs on.
// This should only be used when referring to a manifest.
Platform *v1.Platform `json:"platform,omitempty"`
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/manifest/manifests.go | registry/app/manifest/manifests.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package manifest
import (
"context"
"fmt"
"mime"
"github.com/opencontainers/go-digest"
)
// Manifest represents a registry object specifying a set of
// references and an optional target.
type Manifest interface {
// References returns a list of objects which make up this manifest.
// A reference is anything which can be represented by a
// distribution.Descriptor. These can consist of layers, resources or other
// manifests.
//
// While no particular order is required, implementations should return
// them from highest to lowest priority. For example, one might want to
// return the base layer before the top layer.
References() []Descriptor
// Payload provides the serialized format of the manifest, in addition to
// the media type.
Payload() (mediaType string, payload []byte, err error)
}
type ManifestV2 interface {
Manifest
Version() Versioned
Config() Descriptor
Layers() []Descriptor
// TotalSize is the sum of the size of the manifest payload, layer and config
// blob sizes.
TotalSize() int64
// DistributableLayers is a slice of distributable image layers. This is a subset of Layers, excluding items with
// media types that are known to identify non-distributable layers.
DistributableLayers() []Descriptor
}
// ManifestOCI extends ManifestV2 with property descriptions from the
// OCI Image Manifest specification (v1.1).
// https://github.com/opencontainers/image-spec/blob/main/manifest.md#image-manifest-property-descriptions
type ManifestOCI interface {
ManifestV2
// This OPTIONAL property contains the type of an artifact when the
// manifest is used for an artifact.
ArtifactType() string
// This OPTIONAL property specifies a descriptor of another manifest.
// This value, used by the referrers API, indicates a relationship
// to the specified manifest.
Subject() Descriptor
Annotations() map[string]string
}
// ManifestBuilder creates a manifest allowing one to include dependencies.
// Instances can be obtained from a version-specific manifest package. Manifest
// specific data is passed into the function which creates the builder.
type ManifestBuilder interface {
// Build creates the manifest from his builder.
Build(ctx context.Context) (Manifest, error)
// References returns a list of objects which have been added to this
// builder. The dependencies are returned in the order they were added,
// which should be from base to head.
References() []Descriptor
// AppendReference includes the given object in the manifest after any
// existing dependencies. If the add fails, such as when adding an
// unsupported dependency, an error may be returned.
//
// The destination of the reference is dependent on the manifest type and
// the dependency type.
AppendReference(dependency Describable) error
}
// ManifestEnumerator enables iterating over manifests.
type ManifestEnumerator interface {
// Enumerate calls ingester for each manifest.
Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
}
// Describable is an interface for descriptors.
type Describable interface {
Descriptor() Descriptor
}
// ManifestMediaTypes returns the supported media types for manifests.
func ManifestMediaTypes() (mediaTypes []string) {
for t := range mappings {
if t != "" {
mediaTypes = append(mediaTypes, t)
}
}
return
}
// UnmarshalFunc implements manifest unmarshalling a given MediaType.
type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
var mappings = make(map[string]UnmarshalFunc)
// UnmarshalManifest looks up manifest unmarshal functions based on
// MediaType.
func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
// Need to look up by the actual media type, not the raw contents of
// the header. Strip semicolons and anything following them.
var mediaType string
if ctHeader != "" {
var err error
mediaType, _, err = mime.ParseMediaType(ctHeader)
if err != nil {
return nil, Descriptor{}, err
}
}
unmarshalFunc, ok := mappings[mediaType]
if !ok {
unmarshalFunc, ok = mappings[""]
if !ok {
return nil, Descriptor{}, fmt.Errorf(
"unsupported manifest media type and no default available: %s",
mediaType,
)
}
}
return unmarshalFunc(p)
}
// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This
// should be called from specific.
func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error {
if _, ok := mappings[mediaType]; ok {
return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType)
}
mappings[mediaType] = u
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/manifest/errors.go | registry/app/manifest/errors.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package manifest
import (
"errors"
"fmt"
"strings"
"github.com/opencontainers/go-digest"
)
// ErrAccessDenied is returned when an access to a requested resource is
// denied.
var ErrAccessDenied = errors.New("access denied")
// ErrManifestNotModified is returned when a conditional manifest GetByTag
// returns nil due to the client indicating it has the latest version.
var ErrManifestNotModified = errors.New("manifest not modified")
// ErrUnsupported is returned when an unimplemented or unsupported action is
// performed.
var ErrUnsupported = errors.New("operation unsupported")
// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1
// manifest but the registry is configured to reject it.
var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported")
// TagUnknownError is returned if the given tag is not known by the tag service.
type TagUnknownError struct {
Tag string
}
func (err TagUnknownError) Error() string {
return fmt.Sprintf("unknown tag=%s", err.Tag)
}
// RegistryUnknownError is returned if the named repository is not known by
// the registry.
type RegistryUnknownError struct {
Name string
}
func (err RegistryUnknownError) Error() string {
return fmt.Sprintf("unknown registry name=%s", err.Name)
}
// RegistryNameInvalidError should be used to denote an invalid registry
// name. Reason may set, indicating the cause of invalidity.
type RegistryNameInvalidError struct {
Name string
Reason error
}
func (err RegistryNameInvalidError) Error() string {
return fmt.Sprintf("registry name %q invalid: %v", err.Name, err.Reason)
}
// ManifestUnknownError is returned if the manifest is not known by the
// registry.
type UnknownError struct {
Name string
Tag string
}
func (err UnknownError) Error() string {
return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
}
// ManifestUnknownRevisionError is returned when a manifest cannot be found by
// revision within a registry.
type UnknownRevisionError struct {
Name string
Revision digest.Digest
}
func (err UnknownRevisionError) Error() string {
return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
}
// ManifestUnverifiedError is returned when the registry is unable to verify
// the manifest.
type UnverifiedError struct{}
func (UnverifiedError) Error() string {
return "unverified manifest"
}
// ManifestReferencesExceedLimitError is returned when a manifest has too many references.
type ReferencesExceedLimitError struct {
References int
Limit int
}
func (err ReferencesExceedLimitError) Error() string {
return fmt.Sprintf("%d manifest references exceed reference limit of %d", err.References, err.Limit)
}
// ManifestPayloadSizeExceedsLimitError is returned when a manifest is bigger than the configured payload
// size limit.
type PayloadSizeExceedsLimitError struct {
PayloadSize int
Limit int
}
// Error implements the error interface for ManifestPayloadSizeExceedsLimitError.
func (err PayloadSizeExceedsLimitError) Error() string {
return fmt.Sprintf("manifest payload size of %d exceeds limit of %d", err.PayloadSize, err.Limit)
}
// ManifestVerificationErrors provides a type to collect errors encountered
// during manifest verification. Currently, it accepts errors of all types,
// but it may be narrowed to those involving manifest verification.
type VerificationErrors []error
func (errs VerificationErrors) Error() string {
parts := make([]string, 0, len(errs))
for _, err := range errs {
parts = append(parts, err.Error())
}
return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
}
// ManifestBlobUnknownError returned when a referenced blob cannot be found.
type BlobUnknownError struct {
Digest digest.Digest
}
func (err BlobUnknownError) Error() string {
return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
}
// ManifestNameInvalidError should be used to denote an invalid manifest
// name. Reason may set, indicating the cause of invalidity.
type NameInvalidError struct {
Name string
Reason error
}
func (err NameInvalidError) Error() string {
return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/manifest/versioned.go | registry/app/manifest/versioned.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package manifest
// Versioned provides a struct with the manifest schemaVersion and mediaType.
// Incoming content with unknown schema version can be decoded against this
// struct to check the version.
type Versioned struct {
// SchemaVersion is the image manifest schema that this image follows
SchemaVersion int `json:"schemaVersion"`
// MediaType is the media type of this schema.
MediaType string `json:"mediaType,omitempty"`
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/manifest/doc.go | registry/app/manifest/doc.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package manifest
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/manifest/ocischema/index.go | registry/app/manifest/ocischema/index.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocischema
import (
"encoding/json"
"errors"
"fmt"
"github.com/harness/gitness/registry/app/manifest"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// IndexSchemaVersion provides a pre-initialized version structure for OCI Image
// Indices.
var IndexSchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: v1.MediaTypeImageIndex,
}
func init() {
imageIndexFunc := func(b []byte) (manifest.Manifest, manifest.Descriptor, error) {
if err := validateIndex(b); err != nil {
return nil, manifest.Descriptor{}, err
}
m := new(DeserializedImageIndex)
err := m.UnmarshalJSON(b)
if err != nil {
return nil, manifest.Descriptor{}, err
}
if m.MediaType != "" && m.MediaType != v1.MediaTypeImageIndex {
err = fmt.Errorf(
"if present, mediaType in image index should be '%s' not '%s'",
v1.MediaTypeImageIndex, m.MediaType,
)
return nil, manifest.Descriptor{}, err
}
dgst := digest.FromBytes(b)
return m, manifest.Descriptor{
MediaType: v1.MediaTypeImageIndex,
Digest: dgst,
Size: int64(len(b)),
Annotations: m.Annotations(),
}, err
}
err := manifest.RegisterManifestSchema(v1.MediaTypeImageIndex, imageIndexFunc)
if err != nil {
panic(fmt.Sprintf("Unable to register OCI Image Index: %s", err))
}
}
// ImageIndex references manifests for various platforms.
type ImageIndex struct {
manifest.Versioned
// This OPTIONAL property contains the type of an artifact when the
// manifest is used for an artifact. This MUST be set when
// config.mediaType is set to the empty value.
ArtifactType string `json:"artifactType,omitempty"`
// Manifests references a list of manifests
Manifests []manifest.Descriptor `json:"manifests"`
// Annotations is an optional field that contains arbitrary metadata for the
// image index
Annotations map[string]string `json:"annotations,omitempty"`
// This OPTIONAL property specifies a descriptor of another manifest.
// This value, used by the referrers API, indicates a relationship to
// the specified manifest.
Subject *manifest.Descriptor `json:"subject,omitempty"`
}
// References returns the distribution descriptors for the referenced image
// manifests.
func (ii ImageIndex) References() []manifest.Descriptor {
return ii.Manifests
}
// DeserializedImageIndex wraps ManifestList with a copy of the original
// JSON.
type DeserializedImageIndex struct {
ImageIndex
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromDescriptors takes a slice of descriptors and a map of annotations, and
// returns a DeserializedManifestList which contains the resulting manifest list
// and its JSON representation. If annotations is nil or empty then the
// annotations property will be omitted from the JSON representation.
func FromDescriptors(descriptors []manifest.Descriptor, annotations map[string]string) (
*DeserializedImageIndex, error,
) {
return fromDescriptorsWithMediaType(descriptors, annotations, v1.MediaTypeImageIndex)
}
// fromDescriptorsWithMediaType is for testing purposes,
// it's useful to be able to specify the media type explicitly.
func fromDescriptorsWithMediaType(
descriptors []manifest.Descriptor,
annotations map[string]string,
mediaType string,
) (_ *DeserializedImageIndex, err error) {
m := ImageIndex{
Versioned: manifest.Versioned{
SchemaVersion: IndexSchemaVersion.SchemaVersion,
MediaType: mediaType,
},
Annotations: annotations,
}
m.Manifests = make([]manifest.Descriptor, len(descriptors))
copy(m.Manifests, descriptors)
deserialized := DeserializedImageIndex{
ImageIndex: m,
}
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new ManifestList struct from JSON data.
func (m *DeserializedImageIndex) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b))
// store manifest list in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into ManifestList object
var manifestList ImageIndex
if err := json.Unmarshal(m.canonical, &manifestList); err != nil {
return err
}
m.ImageIndex = manifestList
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedImageIndex) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedImageIndex")
}
// Payload returns the raw content of the manifest list. The contents can be
// used to calculate the content identifier.
func (m DeserializedImageIndex) Payload() (string, []byte, error) {
mediaType := m.MediaType
if m.MediaType == "" {
mediaType = v1.MediaTypeImageIndex
}
return mediaType, m.canonical, nil
}
// validateIndex returns an error if the byte slice is invalid JSON or if it
// contains fields that belong to a manifest.
func validateIndex(b []byte) error {
var doc struct {
Config any `json:"config,omitempty"`
Layers any `json:"layers,omitempty"`
}
if err := json.Unmarshal(b, &doc); err != nil {
return err
}
if doc.Config != nil || doc.Layers != nil {
return errors.New("index: expected index but found manifest")
}
return nil
}
func (m *DeserializedImageIndex) ArtifactType() string { return m.ImageIndex.ArtifactType }
func (m *DeserializedImageIndex) Subject() manifest.Descriptor {
if m.ImageIndex.Subject == nil {
return manifest.Descriptor{}
}
return *m.ImageIndex.Subject
}
func (m *DeserializedImageIndex) Annotations() map[string]string {
if m.ImageIndex.Annotations == nil {
return map[string]string{}
}
return m.ImageIndex.Annotations
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/manifest/ocischema/manifest.go | registry/app/manifest/ocischema/manifest.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocischema
import (
"encoding/json"
"errors"
"fmt"
"github.com/harness/gitness/registry/app/manifest"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// SchemaVersion provides a pre-initialized version structure for OCI Image
// Manifests.
var SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: v1.MediaTypeImageManifest,
}
func init() {
ocischemaFunc := func(b []byte) (manifest.Manifest, manifest.Descriptor, error) {
if err := validateManifest(b); err != nil {
return nil, manifest.Descriptor{}, err
}
m := new(DeserializedManifest)
err := m.UnmarshalJSON(b)
if err != nil {
return nil, manifest.Descriptor{}, err
}
dgst := digest.FromBytes(b)
return m, manifest.Descriptor{
MediaType: v1.MediaTypeImageManifest,
Digest: dgst,
Size: int64(len(b)),
Annotations: m.Annotations(),
}, err
}
err := manifest.RegisterManifestSchema(v1.MediaTypeImageManifest, ocischemaFunc)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
// Manifest defines a ocischema manifest.
type Manifest struct {
manifest.Versioned
// This OPTIONAL property contains the type of an artifact when the
// manifest is used for an artifact. This MUST be set when
// config.mediaType is set to the empty value.
ArtifactType string `json:"artifactType,omitempty"`
// Config references the image configuration as a blob.
Config manifest.Descriptor `json:"config"`
// Layers lists descriptors for the layers referenced by the
// configuration.
Layers []manifest.Descriptor `json:"layers"`
// This OPTIONAL property specifies a descriptor of another manifest.
// This value, used by the referrers API, indicates a relationship to
// the specified manifest.
Subject *manifest.Descriptor `json:"subject,omitempty"`
// Annotations contains arbitrary metadata for the image manifest.
Annotations map[string]string `json:"annotations,omitempty"`
}
// References returns the descriptors of this manifests references.
func (m Manifest) References() []manifest.Descriptor {
references := make([]manifest.Descriptor, 0, 1+len(m.Layers))
references = append(references, m.Config)
references = append(references, m.Layers...)
return references
}
// Target returns the target of this manifest.
func (m Manifest) Target() manifest.Descriptor {
return m.Config
}
// DeserializedManifest wraps Manifest with a copy of the original JSON.
// It satisfies the distribution.Manifest interface.
type DeserializedManifest struct {
Manifest
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
// DeserializedManifest which contains the manifest and its JSON representation.
func FromStruct(m Manifest) (*DeserializedManifest, error) {
var deserialized DeserializedManifest
deserialized.Manifest = m
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new Manifest struct from JSON data.
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b))
// store manifest in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into Manifest object
var mfst Manifest
if err := json.Unmarshal(m.canonical, &mfst); err != nil {
return err
}
if mfst.MediaType != "" && mfst.MediaType != v1.MediaTypeImageManifest {
return fmt.Errorf(
"if present, mediaType in manifest should be '%s' not '%s'",
v1.MediaTypeImageManifest, mfst.MediaType,
)
}
m.Manifest = mfst
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifest")
}
// Payload returns the raw content of the manifest. The contents can be used to
// calculate the content identifier.
func (m DeserializedManifest) Payload() (string, []byte, error) {
return v1.MediaTypeImageManifest, m.canonical, nil
}
// validateManifest returns an error if the byte slice is invalid JSON or if it
// contains fields that belong to a index.
func validateManifest(b []byte) error {
var doc struct {
Manifests any `json:"manifests,omitempty"`
}
if err := json.Unmarshal(b, &doc); err != nil {
return err
}
if doc.Manifests != nil {
return errors.New("ocimanifest: expected manifest but found index")
}
return nil
}
func (m *DeserializedManifest) Version() manifest.Versioned {
// Media type can be either Docker (`application/vnd.docker.distribution.manifest.v2+json`) or OCI (empty).
// We need to make it explicit if empty, otherwise we're not able to distinguish between media types.
if m.Versioned.MediaType == "" {
m.Versioned.MediaType = v1.MediaTypeImageManifest
}
return m.Versioned
}
func (m *DeserializedManifest) Config() manifest.Descriptor { return m.Target() }
func (m *DeserializedManifest) Layers() []manifest.Descriptor { return m.Manifest.Layers }
func (m *DeserializedManifest) DistributableLayers() []manifest.Descriptor {
var ll []manifest.Descriptor
for _, l := range m.Layers() {
switch l.MediaType {
case v1.MediaTypeImageLayerNonDistributable, v1.MediaTypeImageLayerNonDistributableGzip:
continue
}
ll = append(ll, l)
}
return ll
}
func (m *DeserializedManifest) ArtifactType() string { return m.Manifest.ArtifactType }
func (m *DeserializedManifest) Subject() manifest.Descriptor {
if m.Manifest.Subject == nil {
return manifest.Descriptor{}
}
return *m.Manifest.Subject
}
func (m *DeserializedManifest) Annotations() map[string]string {
if m.Manifest.Annotations == nil {
return map[string]string{}
}
return m.Manifest.Annotations
}
func (m *DeserializedManifest) TotalSize() int64 {
var layersSize int64
for _, layer := range m.Layers() {
layersSize += layer.Size
}
return layersSize + m.Config().Size + int64(len(m.canonical))
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/manifest/schema2/manifest.go | registry/app/manifest/schema2/manifest.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema2
import (
"encoding/json"
"errors"
"fmt"
"github.com/harness/gitness/registry/app/manifest"
"github.com/opencontainers/go-digest"
)
const (
// MediaTypeManifest specifies the mediaType for the current version.
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
// MediaTypeImageConfig specifies the mediaType for the image configuration.
MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json"
// MediaTypePluginConfig specifies the mediaType for plugin configuration.
MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json"
// MediaTypeLayer is the mediaType used for layers referenced by the
// manifest.
MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// MediaTypeForeignLayer is the mediaType used for layers that must be
// downloaded from foreign URLs.
MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
// MediaTypeUncompressedLayer is the mediaType used for layers which
// are not compressed.
MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
)
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
var SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: MediaTypeManifest,
}
func init() {
schema2Func := func(b []byte) (manifest.Manifest, manifest.Descriptor, error) {
m := new(DeserializedManifest)
err := m.UnmarshalJSON(b)
if err != nil {
return nil, manifest.Descriptor{}, err
}
dgst := digest.FromBytes(b)
return m, manifest.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err
}
err := manifest.RegisterManifestSchema(MediaTypeManifest, schema2Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
// Manifest defines a schema2 manifest.
type Manifest struct {
manifest.Versioned
// Config references the image configuration as a blob.
Config manifest.Descriptor `json:"config"`
// Layers lists descriptors for the layers referenced by the
// configuration.
Layers []manifest.Descriptor `json:"layers"`
}
// References returns the descriptors of this manifests references.
func (m Manifest) References() []manifest.Descriptor {
references := make([]manifest.Descriptor, 0, 1+len(m.Layers))
references = append(references, m.Config)
references = append(references, m.Layers...)
return references
}
// Target returns the target of this manifest.
func (m Manifest) Target() manifest.Descriptor {
return m.Config
}
// DeserializedManifest wraps Manifest with a copy of the original JSON.
// It satisfies the distribution.Manifest interface.
type DeserializedManifest struct {
Manifest
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
// DeserializedManifest which contains the manifest and its JSON representation.
func FromStruct(m Manifest) (*DeserializedManifest, error) {
var deserialized DeserializedManifest
deserialized.Manifest = m
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new Manifest struct from JSON data.
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b))
// store manifest in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into Manifest object
var mfst Manifest
if err := json.Unmarshal(m.canonical, &mfst); err != nil {
return err
}
if mfst.MediaType != MediaTypeManifest {
return fmt.Errorf(
"mediaType in manifest should be '%s' not '%s'",
MediaTypeManifest, mfst.MediaType,
)
}
m.Manifest = mfst
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifest")
}
func (m *DeserializedManifest) Version() manifest.Versioned { return m.Versioned }
func (m *DeserializedManifest) Config() manifest.Descriptor { return m.Target() }
func (m *DeserializedManifest) Layers() []manifest.Descriptor { return m.Manifest.Layers }
func (m *DeserializedManifest) DistributableLayers() []manifest.Descriptor {
var ll []manifest.Descriptor
for _, l := range m.Layers() {
if l.MediaType != MediaTypeForeignLayer {
ll = append(ll, l)
}
}
return ll
}
func (m *DeserializedManifest) TotalSize() int64 {
var layersSize int64
for _, layer := range m.Layers() {
layersSize += layer.Size
}
return layersSize + m.Config().Size + int64(len(m.canonical))
}
// Payload returns the raw content of the manifest. The contents can be used to
// calculate the content identifier.
func (m DeserializedManifest) Payload() (string, []byte, error) {
return m.MediaType, m.canonical, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/manifest/schema2/manifest_test.go | registry/app/manifest/schema2/manifest_test.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema2
import (
"bytes"
"encoding/json"
"reflect"
"testing"
"github.com/harness/gitness/registry/app/manifest"
)
const expectedManifestSerialization = `{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
"size": 985
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"digest": "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b",
"size": 153263
}
]
}`
func makeTestManifest(mediaType string) Manifest {
return Manifest{
Versioned: manifest.Versioned{
SchemaVersion: 2,
MediaType: mediaType,
},
Config: manifest.Descriptor{
MediaType: MediaTypeImageConfig,
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
Size: 985,
},
Layers: []manifest.Descriptor{
{
MediaType: MediaTypeLayer,
Digest: "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b",
Size: 153263,
},
},
}
}
func TestManifest(t *testing.T) {
mfst := makeTestManifest(MediaTypeManifest)
deserialized, err := FromStruct(mfst)
if err != nil {
t.Fatalf("error creating DeserializedManifest: %v", err)
}
mediaType, canonical, _ := deserialized.Payload()
if mediaType != MediaTypeManifest {
t.Fatalf("unexpected media type: %s", mediaType)
}
// Check that the canonical field is the same as json.MarshalIndent
// with these parameters.
expected, err := json.MarshalIndent(&mfst, "", " ")
if err != nil {
t.Fatalf("error marshaling manifest: %v", err)
}
if !bytes.Equal(expected, canonical) {
t.Fatalf("manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", string(expected), string(canonical))
}
// Check that canonical field matches expected value.
if !bytes.Equal([]byte(expectedManifestSerialization), canonical) {
t.Fatalf(
"manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n",
expectedManifestSerialization,
string(canonical),
)
}
var unmarshalled DeserializedManifest
if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil {
t.Fatalf("error unmarshaling manifest: %v", err)
}
if !reflect.DeepEqual(&unmarshalled, deserialized) {
t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized)
}
target := deserialized.Target()
if target.Digest != "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" {
t.Fatalf("unexpected digest in target: %s", target.Digest.String())
}
if target.MediaType != MediaTypeImageConfig {
t.Fatalf("unexpected media type in target: %s", target.MediaType)
}
if target.Size != 985 {
t.Fatalf("unexpected size in target: %d", target.Size)
}
references := deserialized.References()
if len(references) != 2 {
t.Fatalf("unexpected number of references: %d", len(references))
}
if !reflect.DeepEqual(references[0], target) {
t.Fatalf("first reference should be target: %v != %v", references[0], target)
}
// Test the second reference
if references[1].Digest != "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" {
t.Fatalf("unexpected digest in reference: %s", references[0].Digest.String())
}
if references[1].MediaType != MediaTypeLayer {
t.Fatalf("unexpected media type in reference: %s", references[0].MediaType)
}
if references[1].Size != 153263 {
t.Fatalf("unexpected size in reference: %d", references[0].Size)
}
}
func mediaTypeTest(t *testing.T, mediaType string, shouldError bool) {
mfst := makeTestManifest(mediaType)
deserialized, err := FromStruct(mfst)
if err != nil {
t.Fatalf("error creating DeserializedManifest: %v", err)
}
unmarshalled, descriptor, err := manifest.UnmarshalManifest(
MediaTypeManifest,
deserialized.canonical,
)
if shouldError {
if err == nil {
t.Fatalf("bad content type should have produced error")
}
return
}
if err != nil {
t.Fatalf("error unmarshaling manifest, %v", err)
}
asManifest, ok := unmarshalled.(*DeserializedManifest)
if !ok {
t.Fatalf("Error: unmarshalled is not of type *DeserializedManifest")
return
}
if asManifest.MediaType != mediaType {
t.Fatalf("Bad media type '%v' as unmarshalled", asManifest.MediaType)
}
if descriptor.MediaType != MediaTypeManifest {
t.Fatalf("Bad media type '%v' for descriptor", descriptor.MediaType)
}
unmarshalledMediaType, _, _ := unmarshalled.Payload()
if unmarshalledMediaType != MediaTypeManifest {
t.Fatalf("Bad media type '%v' for payload", unmarshalledMediaType)
}
}
func TestMediaTypes(t *testing.T) {
mediaTypeTest(t, "", true)
mediaTypeTest(t, MediaTypeManifest, false)
mediaTypeTest(t, MediaTypeManifest+"XXX", true)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/manifest/manifestlist/manifestlist.go | registry/app/manifest/manifestlist/manifestlist.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package manifestlist
import (
"encoding/json"
"errors"
"fmt"
"github.com/harness/gitness/registry/app/manifest"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
// MediaTypeManifestList specifies the mediaType for manifest lists.
MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json"
)
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
var SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: MediaTypeManifestList,
}
func init() {
manifestListFunc := func(b []byte) (manifest.Manifest, manifest.Descriptor, error) {
m := new(DeserializedManifestList)
err := m.UnmarshalJSON(b)
if err != nil {
return nil, manifest.Descriptor{}, err
}
if m.MediaType != MediaTypeManifestList {
err = fmt.Errorf(
"mediaType in manifest list should be '%s' not '%s'",
MediaTypeManifestList, m.MediaType,
)
return nil, manifest.Descriptor{}, err
}
dgst := digest.FromBytes(b)
return m, manifest.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err
}
err := manifest.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
// PlatformSpec specifies a platform where a particular image manifest is
// applicable.
type PlatformSpec struct {
// Architecture field specifies the CPU architecture, for example
// `amd64` or `ppc64`.
Architecture string `json:"architecture"`
// OS specifies the operating system, for example `linux` or `windows`.
OS string `json:"os"`
// OSVersion is an optional field specifying the operating system
// version, for example `10.0.10586`.
OSVersion string `json:"os.version,omitempty"`
// OSFeatures is an optional field specifying an array of strings,
// each listing a required OS feature (for example on Windows `win32k`).
OSFeatures []string `json:"os.features,omitempty"`
// Variant is an optional field specifying a variant of the CPU, for
// example `ppc64le` to specify a little-endian version of a PowerPC CPU.
Variant string `json:"variant,omitempty"`
// Features is an optional field specifying an array of strings, each
// listing a required CPU feature (for example `sse4` or `aes`).
Features []string `json:"features,omitempty"`
}
// A ManifestDescriptor references a platform-specific manifest.
type ManifestDescriptor struct {
manifest.Descriptor
// Platform specifies which platform the manifest pointed to by the
// descriptor runs on.
Platform PlatformSpec `json:"platform"`
}
// ManifestList references manifests for various platforms.
type ManifestList struct {
manifest.Versioned
// Manifests references a list of manifests
Manifests []ManifestDescriptor `json:"manifests"`
}
// References returns the distribution descriptors for the referenced image
// manifests.
func (m ManifestList) References() []manifest.Descriptor {
dependencies := make([]manifest.Descriptor, len(m.Manifests))
for i := range m.Manifests {
dependencies[i] = m.Manifests[i].Descriptor
dependencies[i].Platform = &v1.Platform{
Architecture: m.Manifests[i].Platform.Architecture,
OS: m.Manifests[i].Platform.OS,
OSVersion: m.Manifests[i].Platform.OSVersion,
OSFeatures: m.Manifests[i].Platform.OSFeatures,
Variant: m.Manifests[i].Platform.Variant,
}
}
return dependencies
}
// DeserializedManifestList wraps ManifestList with a copy of the original
// JSON.
type DeserializedManifestList struct {
ManifestList
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromDescriptors takes a slice of descriptors, and returns a
// DeserializedManifestList which contains the resulting manifest list
// and its JSON representation.
func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) {
return fromDescriptorsWithMediaType(descriptors, MediaTypeManifestList)
}
// fromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly.
func fromDescriptorsWithMediaType(
descriptors []ManifestDescriptor,
mediaType string,
) (*DeserializedManifestList, error) {
m := ManifestList{
Versioned: manifest.Versioned{
SchemaVersion: SchemaVersion.SchemaVersion,
MediaType: mediaType,
},
}
m.Manifests = make([]ManifestDescriptor, len(descriptors))
copy(m.Manifests, descriptors)
deserialized := DeserializedManifestList{
ManifestList: m,
}
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new ManifestList struct from JSON data.
func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b))
// store manifest list in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into ManifestList object
var manifestList ManifestList
if err := json.Unmarshal(m.canonical, &manifestList); err != nil {
return err
}
m.ManifestList = manifestList
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifestList")
}
// Payload returns the raw content of the manifest list. The contents can be
// used to calculate the content identifier.
func (m DeserializedManifestList) Payload() (string, []byte, error) {
return m.MediaType, m.canonical, nil
}
// validateManifestList returns an error if the byte slice is invalid JSON or if it
// contains fields that belong to a manifest.
func validateManifestList(b []byte) error {
var doc struct {
Config any `json:"config,omitempty"`
Layers any `json:"layers,omitempty"`
}
if err := json.Unmarshal(b, &doc); err != nil {
return err
}
if doc.Config != nil || doc.Layers != nil {
return errors.New("manifestlist: expected list but found manifest")
}
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/manifest/manifestlist/manifestlist_test.go | registry/app/manifest/manifestlist/manifestlist_test.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package manifestlist
import (
"bytes"
"encoding/json"
"reflect"
"testing"
"github.com/harness/gitness/registry/app/manifest"
"github.com/harness/gitness/registry/app/manifest/schema2"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
const expectedManifestListSerialization = `{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
"size": 985,
"platform": {
"architecture": "amd64",
"os": "linux",
"features": [
"sse4"
]
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:6346340964309634683409684360934680934608934608934608934068934608",
"size": 2392,
"platform": {
"architecture": "sun4m",
"os": "sunos"
}
}
]
}`
func makeTestManifestList(t *testing.T, mediaType string) ([]ManifestDescriptor, *DeserializedManifestList) {
manifestDescriptors := []ManifestDescriptor{
{
Descriptor: manifest.Descriptor{
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
Size: 985,
},
Platform: PlatformSpec{
Architecture: "amd64",
OS: "linux",
Features: []string{"sse4"},
},
},
{
Descriptor: manifest.Descriptor{
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
Digest: "sha256:6346340964309634683409684360934680934608934608934608934068934608",
Size: 2392,
},
Platform: PlatformSpec{
Architecture: "sun4m",
OS: "sunos",
},
},
}
deserialized, err := fromDescriptorsWithMediaType(manifestDescriptors, mediaType)
if err != nil {
t.Fatalf("error creating DeserializedManifestList: %v", err)
}
return manifestDescriptors, deserialized
}
func TestManifestList(t *testing.T) {
manifestDescriptors, deserialized := makeTestManifestList(t, MediaTypeManifestList)
mediaType, canonical, _ := deserialized.Payload()
if mediaType != MediaTypeManifestList {
t.Fatalf("unexpected media type: %s", mediaType)
}
// Check that the canonical field is the same as json.MarshalIndent
// with these parameters.
expected, err := json.MarshalIndent(&deserialized.ManifestList, "", " ")
if err != nil {
t.Fatalf("error marshaling manifest list: %v", err)
}
if !bytes.Equal(expected, canonical) {
t.Fatalf("manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n", string(expected), string(canonical))
}
// Check that the canonical field has the expected value.
if !bytes.Equal([]byte(expectedManifestListSerialization), canonical) {
t.Fatalf(
"manifest bytes not equal:\nexpected:\n%s\nactual:\n%s\n",
expectedManifestListSerialization,
string(canonical),
)
}
var unmarshalled DeserializedManifestList
if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil {
t.Fatalf("error unmarshaling manifest: %v", err)
}
if !reflect.DeepEqual(&unmarshalled, deserialized) {
t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized)
}
references := deserialized.References()
if len(references) != 2 {
t.Fatalf("unexpected number of references: %d", len(references))
}
for i := range references {
platform := manifestDescriptors[i].Platform
expectedPlatform := &v1.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSFeatures: platform.OSFeatures,
OSVersion: platform.OSVersion,
Variant: platform.Variant,
}
if !reflect.DeepEqual(references[i].Platform, expectedPlatform) {
t.Fatalf("unexpected value %d returned by References: %v", i, references[i])
}
references[i].Platform = nil
if !reflect.DeepEqual(references[i], manifestDescriptors[i].Descriptor) {
t.Fatalf("unexpected value %d returned by References: %v", i, references[i])
}
}
}
func mediaTypeTest(contentType string, mediaType string, shouldError bool) func(*testing.T) {
return func(t *testing.T) {
var m *DeserializedManifestList
_, m = makeTestManifestList(t, mediaType)
_, canonical, err := m.Payload()
if err != nil {
t.Fatalf("error getting payload, %v", err)
}
unmarshalled, descriptor, err := manifest.UnmarshalManifest(
contentType,
canonical,
)
if shouldError {
if err == nil {
t.Fatalf("bad content type should have produced error")
}
return
}
if err != nil {
t.Fatalf("error unmarshaling manifest, %v", err)
}
asManifest, ok := unmarshalled.(*DeserializedManifestList)
if !ok {
t.Fatalf("Error: unmarshalled is not of type *DeserializedManifestLis")
return
}
if asManifest.MediaType != mediaType {
t.Fatalf("Bad media type '%v' as unmarshalled", asManifest.MediaType)
}
if descriptor.MediaType != contentType {
t.Fatalf("Bad media type '%v' for descriptor", descriptor.MediaType)
}
unmarshalledMediaType, _, _ := unmarshalled.Payload()
if unmarshalledMediaType != contentType {
t.Fatalf("Bad media type '%v' for payload", unmarshalledMediaType)
}
}
}
func TestMediaTypes(t *testing.T) {
t.Run("ManifestList_No_MediaType", mediaTypeTest(MediaTypeManifestList, "", true))
t.Run("ManifestList", mediaTypeTest(MediaTypeManifestList, MediaTypeManifestList, false))
t.Run("ManifestList_Bad_MediaType", mediaTypeTest(MediaTypeManifestList, MediaTypeManifestList+"XXX", true))
}
func TestValidateManifestList(t *testing.T) {
man := schema2.Manifest{
Config: manifest.Descriptor{Size: 1},
Layers: []manifest.Descriptor{{Size: 2}},
}
manifestList := ManifestList{
Manifests: []ManifestDescriptor{
{Descriptor: manifest.Descriptor{Size: 3}},
},
}
t.Run(
"valid", func(t *testing.T) {
b, err := json.Marshal(manifestList)
if err != nil {
t.Fatal("unexpected error marshaling manifest list", err)
}
if err := validateManifestList(b); err != nil {
t.Error("list should be valid", err)
}
},
)
t.Run(
"invalid", func(t *testing.T) {
b, err := json.Marshal(man)
if err != nil {
t.Fatal("unexpected error marshaling manifest", err)
}
if err := validateManifestList(b); err == nil {
t.Error("manifest should not be valid")
}
},
)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/walk.go | registry/app/driver/walk.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package driver
import (
"context"
"errors"
"path/filepath"
"sort"
"strings"
"github.com/rs/zerolog/log"
)
// ErrSkipDir is used as a return value from onFileFunc to indicate that
// the directory named in the call is to be skipped. It is not returned
// as an error by any function.
var ErrSkipDir = errors.New("skip this directory")
// ErrFilledBuffer is used as a return value from onFileFunc to indicate
// that the requested number of entries has been reached and the walk can
// stop.
var ErrFilledBuffer = errors.New("we have enough entries")
// WalkFn is called once per file by Walk.
type WalkFn func(fileInfo FileInfo) error
// WalkFallback traverses a filesystem defined within driver, starting
// from the given path, calling f on each file. It uses the List method and Stat to drive itself.
// If the returned error from the WalkFn is ErrSkipDir the directory will not be entered and Walk
// will continue the traversal. If the returned error from the WalkFn is ErrFilledBuffer, the walk
// stops.
func WalkFallback(
ctx context.Context,
driver StorageDriver,
from string,
f WalkFn,
options ...func(*WalkOptions),
) error {
walkOptions := &WalkOptions{}
for _, o := range options {
o(walkOptions)
}
startAfterHint := walkOptions.StartAfterHint
// Ensure that we are checking the hint is contained within from by adding a "/".
// Add to both in case the hint and form are the same, which would still count.
rel, err := filepath.Rel(from, startAfterHint)
if err != nil || strings.HasPrefix(rel, "..") {
// The startAfterHint is outside from, so check if we even need to walk anything
// Replace any path separators with \x00 so that the sort works in a depth-first way
if strings.ReplaceAll(startAfterHint, "/", "\x00") < strings.ReplaceAll(from, "/", "\x00") {
_, err := doWalkFallback(ctx, driver, from, "", f)
return err
}
return nil
}
// The startAfterHint is within from.
// Walk up the tree until we hit from - we know it is contained.
// Ensure startAfterHint is never deeper than a child of the base
// directory so that doWalkFallback doesn't have to worry about
// depth-first comparisons
base := startAfterHint
for strings.HasPrefix(base, from) {
_, err = doWalkFallback(ctx, driver, base, startAfterHint, f)
if (!errors.As(err, &PathNotFoundError{}) && err != nil) {
return err
}
if base == from {
break
}
startAfterHint = base
base, _ = filepath.Split(startAfterHint)
if len(base) > 1 {
base = strings.TrimSuffix(base, "/")
}
}
return nil
}
// doWalkFallback performs a depth first walk using recursion.
// from is the directory that this iteration of the function should walk.
// startAfterHint is the child within from to start the walk after.
// It should only ever be a child of from, or the empty string.
func doWalkFallback(
ctx context.Context,
driver StorageDriver,
from string,
startAfterHint string,
f WalkFn,
) (bool, error) {
children, err := driver.List(ctx, from)
if err != nil {
return false, err
}
sort.Strings(children)
for _, child := range children {
// The startAfterHint has been sanitised in WalkFallback and will either be
// empty, or be suitable for an <= check for this _from_.
if child <= startAfterHint {
continue
}
fileInfo, err := driver.Stat(ctx, child)
if err != nil {
if errors.As(err, &PathNotFoundError{}) {
// repository was removed in between listing and enumeration. Ignore it.
log.Ctx(ctx).Info().Interface("path", child).Msg("ignoring deleted path")
} else {
return false, err
}
}
err = f(fileInfo)
switch {
case err == nil && fileInfo.IsDir():
if ok, err := doWalkFallback(ctx, driver, child, startAfterHint, f); err != nil || !ok {
return ok, err
}
case errors.Is(err, ErrFilledBuffer):
return false, nil // no error but stop iteration
case err != nil:
return false, err
}
}
return true, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/storagedriver.go | registry/app/driver/storagedriver.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package driver
import (
"context"
"encoding/json"
"fmt"
"io"
"regexp"
"strconv"
"strings"
)
// Version is a string representing the storage driver version, of the form
// Major.Minor.
// The registry must accept storage drivers with equal major version and greater
// minor version, but may not be compatible with older storage driver versions.
type Version string
// Major returns the major (primary) component of a version.
func (version Version) Major() uint {
majorPart, _, _ := strings.Cut(string(version), ".")
major, _ := strconv.ParseUint(majorPart, 10, 0)
return uint(major)
}
// Minor returns the minor (secondary) component of a version.
func (version Version) Minor() uint {
_, minorPart, _ := strings.Cut(string(version), ".")
minor, _ := strconv.ParseUint(minorPart, 10, 0)
return uint(minor)
}
// CurrentVersion is the current storage driver Version.
const CurrentVersion Version = "0.1"
// WalkOptions provides options to the walk function that may adjust its behaviour.
type WalkOptions struct {
// If StartAfterHint is set, the walk may start with the first item lexographically
// after the hint, but it is not guaranteed and drivers may start the walk from the path.
StartAfterHint string
}
func WithStartAfterHint(startAfterHint string) func(*WalkOptions) {
return func(s *WalkOptions) {
s.StartAfterHint = startAfterHint
}
}
// StorageDriver defines methods that a Storage Driver must implement for a
// filesystem-like key/value object storage. Storage Drivers are automatically
// registered via an internal registration mechanism, and generally created
// via the StorageDriverFactory interface
// (https://godoc.org/github.com/distribution/distribution/registry/storage/driver/factory).
// Please see the aforementioned factory package for example code showing how to get an instance
// of a StorageDriver.
type StorageDriver interface {
StorageDeleter
// Name returns the human-readable "name" of the driver, useful in error
// messages and logging. By convention, this will just be the registration
// name, but drivers may provide other information here.
Name() string
// GetContent retrieves the content stored at "path" as a []byte.
// This should primarily be used for small objects.
GetContent(ctx context.Context, path string) ([]byte, error)
// PutContent stores the []byte content at a location designated by "path".
// This should primarily be used for small objects.
PutContent(ctx context.Context, path string, content []byte) error
// Reader retrieves an io.ReadCloser for the content stored at "path"
// with a given byte offset.
// May be used to resume reading a stream by providing a nonzero offset.
Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error)
// Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit.
// A path may be appended to if it has not been committed, or if the
// existing committed content is zero length.
//
// The behaviour of appending to paths with non-empty committed content is
// undefined. Specific implementations may document their own behavior.
Writer(ctx context.Context, path string, a bool) (FileWriter, error)
// Stat retrieves the FileInfo for the given path, including the current
// size in bytes and the creation time.
Stat(ctx context.Context, path string) (FileInfo, error)
// List returns a list of the objects that are direct descendants of the
// given path.
List(ctx context.Context, path string) ([]string, error)
// Move moves an object stored at sourcePath to destPath, removing the
// original object.
Move(ctx context.Context, sourcePath string, destPath string) error
// RedirectURL returns a URL which the client of the request r may use
// to retrieve the content stored at path. Returning the empty string
// signals that the request may not be redirected.
RedirectURL(ctx context.Context, method string, path string, filename string) (string, error)
// Walk traverses a filesystem defined within driver, starting
// from the given path, calling f on each file.
// If the returned error from the WalkFn is ErrSkipDir and fileInfo refers
// to a directory, the directory will not be entered and Walk
// will continue the traversal.
// If the returned error from the WalkFn is ErrFilledBuffer, processing stops.
Walk(ctx context.Context, path string, f WalkFn, options ...func(*WalkOptions)) error
// CopyObject performs a server-side copy of an object from one location to another.
// For S3-compatible storage, this uses the efficient CopyObject API call instead of download-then-upload.
CopyObject(ctx context.Context, srcKey, destBucket, destKey string) error
}
// StorageDeleter defines methods that a Storage Driver must implement to delete objects.
// This allows using a narrower interface than StorageDriver when we only need the delete functionality, such as when
// mocking a storage driver for testing online garbage collection.
type StorageDeleter interface {
// Delete recursively deletes all objects stored at "path" and its subpaths.
Delete(ctx context.Context, path string) error
}
// FileWriter provides an abstraction for an opened writable file-like object in
// the storage backend. The FileWriter must flush all content written to it on
// the call to Close, but is only required to make its content readable on a
// call to Commit.
type FileWriter interface {
io.WriteCloser
// Size returns the number of bytes written to this FileWriter.
Size() int64
// Cancel removes any written content from this FileWriter.
Cancel(context.Context) error
// Commit flushes all content written to this FileWriter and makes it
// available for future calls to StorageDriver.GetContent and
// StorageDriver.Reader.
Commit(context.Context) error
}
// PathRegexp is the regular expression which each file path must match. A
// file path is absolute, beginning with a slash and containing a positive
// number of path components separated by slashes, where each component is
// restricted to alphanumeric characters or a period, underscore, or
// hyphen.
var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`)
// UnsupportedMethodError may be returned in the case where a
// StorageDriver implementation does not support an optional method.
type UnsupportedMethodError struct {
DriverName string
}
func (err UnsupportedMethodError) Error() string {
return fmt.Sprintf("%s: unsupported method", err.DriverName)
}
// PathNotFoundError is returned when operating on a nonexistent path.
type PathNotFoundError struct {
Path string
DriverName string
}
func (err PathNotFoundError) Error() string {
return fmt.Sprintf("%s: Path not found: %s", err.DriverName, err.Path)
}
// InvalidPathError is returned when the provided path is malformed.
type InvalidPathError struct {
Path string
DriverName string
}
func (err InvalidPathError) Error() string {
return fmt.Sprintf("%s: invalid path: %s", err.DriverName, err.Path)
}
// InvalidOffsetError is returned when attempting to read or write from an
// invalid offset.
type InvalidOffsetError struct {
Path string
Offset int64
DriverName string
}
func (err InvalidOffsetError) Error() string {
return fmt.Sprintf("%s: invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path)
}
// Error is a catch-all error type which captures an error string and
// the driver type on which it occurred.
type Error struct {
DriverName string
Detail error
}
func (err Error) Error() string {
return fmt.Sprintf("%s: %s", err.DriverName, err.Detail)
}
func (err Error) MarshalJSON() ([]byte, error) {
return json.Marshal(
struct {
DriverName string `json:"driver"`
Detail string `json:"detail"`
}{
DriverName: err.DriverName,
Detail: err.Detail.Error(),
},
)
}
// StorageDriverError provides the envelope for multiple errors
// for use within the storagedriver implementations.
type StorageDriverError struct {
DriverName string
Errs []error
}
var _ error = StorageDriverError{}
func (e StorageDriverError) Error() string {
switch len(e.Errs) {
case 0:
return fmt.Sprintf("%s: <nil>", e.DriverName)
case 1:
return fmt.Sprintf("%s: %s", e.DriverName, e.Errs[0].Error())
default:
msg := "errors:\n"
for _, err := range e.Errs {
msg += err.Error() + "\n"
}
return fmt.Sprintf("%s: %s", e.DriverName, msg)
}
}
// MarshalJSON converts slice of errors into the format
// that is serializable by JSON.
func (e StorageDriverError) MarshalJSON() ([]byte, error) {
tmpErrs := struct {
DriverName string `json:"driver"`
Details []string `json:"details"`
}{
DriverName: e.DriverName,
}
if len(e.Errs) == 0 {
tmpErrs.Details = make([]string, 0)
return json.Marshal(tmpErrs)
}
for _, err := range e.Errs {
tmpErrs.Details = append(tmpErrs.Details, err.Error())
}
return json.Marshal(tmpErrs)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/fileinfo.go | registry/app/driver/fileinfo.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package driver
import "time"
// FileInfo returns information about a given path. Inspired by os.FileInfo,
// it elides the base name method for a full path instead.
type FileInfo interface {
// Path provides the full path of the target of this file info.
Path() string
// Size returns current length in bytes of the file. The return value can
// be used to write to the end of the file at path. The value is
// meaningless if IsDir returns true.
Size() int64
// ModTime returns the modification time for the file. For backends that
// don't have a modification time, the creation time should be returned.
ModTime() time.Time
// IsDir returns true if the path is a directory.
IsDir() bool
}
// FileInfoFields provides the exported fields for implementing FileInfo
// interface in storagedriver implementations. It should be used with
// InternalFileInfo.
type FileInfoFields struct {
// Path provides the full path of the target of this file info.
Path string
// Size is current length in bytes of the file. The value of this field
// can be used to write to the end of the file at path. The value is
// meaningless if IsDir is set to true.
Size int64
// ModTime returns the modification time for the file. For backends that
// don't have a modification time, the creation time should be returned.
ModTime time.Time
// IsDir returns true if the path is a directory.
IsDir bool
}
// FileInfoInternal implements the FileInfo interface. This should only be
// used by storagedriver implementations that don't have a specialized
// FileInfo type.
type FileInfoInternal struct {
FileInfoFields
}
var (
_ FileInfo = FileInfoInternal{}
_ FileInfo = &FileInfoInternal{}
)
// Path provides the full path of the target of this file info.
func (fi FileInfoInternal) Path() string {
return fi.FileInfoFields.Path
}
// Size returns current length in bytes of the file. The return value can
// be used to write to the end of the file at path. The value is
// meaningless if IsDir returns true.
func (fi FileInfoInternal) Size() int64 {
return fi.FileInfoFields.Size
}
// ModTime returns the modification time for the file. For backends that
// don't have a modification time, the creation time should be returned.
func (fi FileInfoInternal) ModTime() time.Time {
return fi.FileInfoFields.ModTime
}
// IsDir returns true if the path is a directory.
func (fi FileInfoInternal) IsDir() bool {
return fi.FileInfoFields.IsDir
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/base/base.go | registry/app/driver/base/base.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package base provides a base implementation of the storage driver that can
// be used to implement common checks. The goal is to increase the amount of
// code sharing.
//
// The canonical approach to use this class is to embed in the exported driver
// struct such that calls are proxied through this implementation. First,
// declare the internal driver, as follows:
//
// type driver struct { ... internal ...}
//
// The resulting type should implement StorageDriver such that it can be the
// target of a Base struct. The exported type can then be declared as follows:
//
// type Driver struct {
// Base
// }
//
// Because Driver embeds Base, it effectively implements Base. If the driver
// needs to intercept a call, before going to base, Driver should implement
// that method. Effectively, Driver can intercept calls before coming in and
// driver implements the actual logic.
//
// To further shield the embed from other packages, it is recommended to
// employ a private embed struct:
//
// type baseEmbed struct {
// base.Base
// }
//
// Then, declare driver to embed baseEmbed, rather than Base directly:
//
// type Driver struct {
// baseEmbed
// }
//
// The type now implements StorageDriver, proxying through Base, without
// exporting an unnecessary field.
package base
import (
"context"
"errors"
"io"
"github.com/harness/gitness/registry/app/dist_temp/dcontext"
"github.com/harness/gitness/registry/app/driver"
"github.com/rs/zerolog/log"
)
func init() {
}
// Base provides a wrapper around a storagedriver implementation that provides
// common path and bounds checking.
type Base struct {
driver.StorageDriver
}
// Format errors received from the storage driver.
func (base *Base) setDriverName(e error) error {
if e == nil {
return nil
}
switch {
case errors.As(e, &driver.UnsupportedMethodError{}):
var e1 driver.UnsupportedMethodError
errors.As(e, &e1)
e1.DriverName = base.StorageDriver.Name()
return e1
case errors.As(e, &driver.PathNotFoundError{}):
var e2 driver.PathNotFoundError
errors.As(e, &e2)
e2.DriverName = base.StorageDriver.Name()
return e2
case errors.As(e, &driver.InvalidPathError{}):
var e3 driver.InvalidPathError
errors.As(e, &e3)
e3.DriverName = base.StorageDriver.Name()
return e3
case errors.As(e, &driver.InvalidOffsetError{}):
var e4 driver.InvalidOffsetError
errors.As(e, &e4)
e4.DriverName = base.StorageDriver.Name()
return e4
default:
return driver.Error{
DriverName: base.StorageDriver.Name(),
Detail: e,
}
}
}
// GetContent wraps GetContent of underlying storage driver.
func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) {
ctx, done := dcontext.WithTrace(ctx)
defer done("%s.GetContent(%q)", base.Name(), path)
if !driver.PathRegexp.MatchString(path) {
return nil, driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
b, e := base.StorageDriver.GetContent(ctx, path)
return b, base.setDriverName(e)
}
// PutContent wraps PutContent of underlying storage driver.
func (base *Base) PutContent(ctx context.Context, path string, content []byte) error {
ctx, done := dcontext.WithTrace(ctx)
defer done("%s.PutContent(%q)", base.Name(), path)
if !driver.PathRegexp.MatchString(path) {
return driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
err := base.setDriverName(base.StorageDriver.PutContent(ctx, path, content))
return err
}
// Reader wraps Reader of underlying storage driver.
func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
ctx, done := dcontext.WithTrace(ctx)
defer done("%s.Reader(%q, %d)", base.Name(), path, offset)
if offset < 0 {
return nil, driver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()}
}
if !driver.PathRegexp.MatchString(path) {
return nil, driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
rc, e := base.StorageDriver.Reader(ctx, path, offset)
return rc, base.setDriverName(e)
}
// Writer wraps Writer of underlying storage driver.
func (base *Base) Writer(ctx context.Context, path string, a bool) (driver.FileWriter, error) {
ctx, done := dcontext.WithTrace(ctx)
defer done("%s.Writer(%q, %v)", base.Name(), path, a)
if !driver.PathRegexp.MatchString(path) {
return nil, driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
writer, e := base.StorageDriver.Writer(ctx, path, a)
return writer, base.setDriverName(e)
}
// Stat wraps Stat of underlying storage driver.
func (base *Base) Stat(ctx context.Context, path string) (driver.FileInfo, error) {
ctx, done := dcontext.WithTrace(ctx)
defer done("%s.Stat(%q)", base.Name(), path)
if !driver.PathRegexp.MatchString(path) && path != "/" {
return nil, driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
fi, e := base.StorageDriver.Stat(ctx, path)
return fi, base.setDriverName(e)
}
// List wraps List of underlying storage driver.
func (base *Base) List(ctx context.Context, path string) ([]string, error) {
ctx, done := dcontext.WithTrace(ctx)
defer done("%s.List(%q)", base.Name(), path)
if !driver.PathRegexp.MatchString(path) && path != "/" {
return nil, driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
str, e := base.StorageDriver.List(ctx, path)
return str, base.setDriverName(e)
}
// Move wraps Move of underlying storage driver.
func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error {
ctx, done := dcontext.WithTrace(ctx)
defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath)
if !driver.PathRegexp.MatchString(sourcePath) {
return driver.InvalidPathError{Path: sourcePath, DriverName: base.StorageDriver.Name()}
} else if !driver.PathRegexp.MatchString(destPath) {
return driver.InvalidPathError{Path: destPath, DriverName: base.StorageDriver.Name()}
}
err := base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath))
return err
}
// Delete wraps Delete of underlying storage driver.
func (base *Base) Delete(ctx context.Context, path string) error {
ctx, done := dcontext.WithTrace(ctx)
defer done("%s.Delete(%q)", base.Name(), path)
if !driver.PathRegexp.MatchString(path) {
return driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
err := base.setDriverName(base.StorageDriver.Delete(ctx, path))
return err
}
// RedirectURL wraps RedirectURL of the underlying storage driver.
func (base *Base) RedirectURL(ctx context.Context, method string, path string, filename string) (string, error) {
log.Ctx(ctx).Info().Msgf("RedirectURL(%q, %q)", method, path)
if !driver.PathRegexp.MatchString(path) {
return "", driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
str, e := base.StorageDriver.RedirectURL(ctx, method, path, filename)
log.Ctx(ctx).Info().Msgf("Redirect URL generated %s", str)
return str, base.setDriverName(e)
}
// Walk wraps Walk of underlying storage driver.
func (base *Base) Walk(ctx context.Context, path string, f driver.WalkFn, options ...func(*driver.WalkOptions)) error {
ctx, done := dcontext.WithTrace(ctx)
defer done("%s.Walk(%q)", base.Name(), path)
if !driver.PathRegexp.MatchString(path) && path != "/" {
return driver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
return base.setDriverName(base.StorageDriver.Walk(ctx, path, f, options...))
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/base/regulator_test.go | registry/app/driver/base/regulator_test.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package base
import (
"fmt"
"sync"
"testing"
"time"
)
func TestRegulatorEnterExit(t *testing.T) {
const limit = 500
r, ok := NewRegulator(nil, limit).(*regulator)
if !ok {
t.Fatalf("Error: r is not of type *regulator")
return
}
for range 50 {
run := make(chan struct{})
var firstGroupReady sync.WaitGroup
var firstGroupDone sync.WaitGroup
firstGroupReady.Add(limit)
firstGroupDone.Add(limit)
for range limit {
go func() {
r.enter()
firstGroupReady.Done()
<-run
r.exit()
firstGroupDone.Done()
}()
}
firstGroupReady.Wait()
// now we exhausted all the limit, let's run a little bit more
var secondGroupReady sync.WaitGroup
var secondGroupDone sync.WaitGroup
for range 50 {
secondGroupReady.Add(1)
secondGroupDone.Add(1)
go func() {
secondGroupReady.Done()
r.enter()
r.exit()
secondGroupDone.Done()
}()
}
secondGroupReady.Wait()
// allow the first group to return resources
close(run)
done := make(chan struct{})
go func() {
secondGroupDone.Wait()
close(done)
}()
select {
case <-done:
case <-time.After(5 * time.Second):
t.Fatal("some r.enter() are still locked")
}
firstGroupDone.Wait()
if r.available != limit {
t.Fatalf("r.available: got %d, want %d", r.available, limit)
}
}
}
func TestGetLimitFromParameter(t *testing.T) {
tests := []struct {
Input any
Expected uint64
Min uint64
Default uint64
Err error
}{
{"foo", 0, 5, 5, fmt.Errorf("parameter must be an integer, 'foo' invalid")},
{"50", 50, 5, 5, nil},
{"5", 25, 25, 50, nil}, // lower than Min returns Min
{nil, 50, 25, 50, nil}, // nil returns default
{812, 812, 25, 50, nil},
}
for _, item := range tests {
t.Run(
fmt.Sprint(item.Input), func(t *testing.T) {
actual, err := GetLimitFromParameter(item.Input, item.Min, item.Default)
if err != nil && item.Err != nil && err.Error() != item.Err.Error() {
t.Fatalf("GetLimitFromParameter error, expected %#v got %#v", item.Err, err)
}
if actual != item.Expected {
t.Fatalf("GetLimitFromParameter result error, expected %d got %d", item.Expected, actual)
}
},
)
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/base/regulator.go | registry/app/driver/base/regulator.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package base
import (
"context"
"fmt"
"io"
"reflect"
"strconv"
"sync"
storagedriver "github.com/harness/gitness/registry/app/driver"
)
type regulator struct {
storagedriver.StorageDriver
*sync.Cond
available uint64
}
// GetLimitFromParameter takes an interface type as decoded from the YAML
// configuration and returns a uint64 representing the maximum number of
// concurrent calls given a minimum limit and default.
//
// If the parameter supplied is of an invalid type this returns an error.
func GetLimitFromParameter(param any, mn, def uint64) (uint64, error) {
limit := def
switch v := param.(type) {
case string:
var err error
if limit, err = strconv.ParseUint(v, 0, 64); err != nil {
return limit, fmt.Errorf("parameter must be an integer, '%v' invalid", param)
}
case uint64:
limit = v
case int, int32, int64:
val := reflect.ValueOf(v).Convert(reflect.TypeOf(param)).Int()
// if param is negative casting to uint64 will wrap around and
// give you the hugest thread limit ever. Let's be sensible, here
if val > 0 {
limit = uint64(val)
} else {
limit = mn
}
case uint, uint32:
limit = reflect.ValueOf(v).Convert(reflect.TypeOf(param)).Uint()
case nil:
// use the default
default:
return 0, fmt.Errorf("invalid value '%#v'", param)
}
if limit < mn {
return mn, nil
}
return limit, nil
}
// NewRegulator wraps the given driver and is used to regulate concurrent calls
// to the given storage driver to a maximum of the given limit. This is useful
// for storage drivers that would otherwise create an unbounded number of OS
// threads if allowed to be called unregulated.
func NewRegulator(driver storagedriver.StorageDriver, limit uint64) storagedriver.StorageDriver {
return ®ulator{
StorageDriver: driver,
Cond: sync.NewCond(&sync.Mutex{}),
available: limit,
}
}
func (r *regulator) enter() {
r.L.Lock()
for r.available == 0 {
r.Wait()
}
r.available--
r.L.Unlock()
}
func (r *regulator) exit() {
r.L.Lock()
r.Signal()
r.available++
r.L.Unlock()
}
// Name returns the human-readable "name" of the driver, useful in error
// messages and logging. By convention, this will just be the registration
// name, but drivers may provide other information here.
func (r *regulator) Name() string {
r.enter()
defer r.exit()
return r.StorageDriver.Name()
}
// GetContent retrieves the content stored at "path" as a []byte.
// This should primarily be used for small objects.
func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) {
r.enter()
defer r.exit()
return r.StorageDriver.GetContent(ctx, path)
}
// PutContent stores the []byte content at a location designated by "path".
// This should primarily be used for small objects.
func (r *regulator) PutContent(ctx context.Context, path string, content []byte) error {
r.enter()
defer r.exit()
return r.StorageDriver.PutContent(ctx, path, content)
}
// Reader retrieves an io.ReadCloser for the content stored at "path"
// with a given byte offset.
// May be used to resume reading a stream by providing a nonzero offset.
func (r *regulator) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
r.enter()
defer r.exit()
return r.StorageDriver.Reader(ctx, path, offset)
}
// Writer stores the contents of the provided io.ReadCloser at a
// location designated by the given path.
// May be used to resume writing a stream by providing a nonzero offset.
// The offset must be no larger than the CurrentSize for this path.
func (r *regulator) Writer(ctx context.Context, path string, a bool) (storagedriver.FileWriter, error) {
r.enter()
defer r.exit()
return r.StorageDriver.Writer(ctx, path, a)
}
// Stat retrieves the FileInfo for the given path, including the current
// size in bytes and the creation time.
func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
r.enter()
defer r.exit()
return r.StorageDriver.Stat(ctx, path)
}
// List returns a list of the objects that are direct descendants of the
// given path.
func (r *regulator) List(ctx context.Context, path string) ([]string, error) {
r.enter()
defer r.exit()
return r.StorageDriver.List(ctx, path)
}
// Move moves an object stored at sourcePath to destPath, removing the
// original object.
func (r *regulator) Move(ctx context.Context, sourcePath string, destPath string) error {
r.enter()
defer r.exit()
return r.StorageDriver.Move(ctx, sourcePath, destPath)
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (r *regulator) Delete(ctx context.Context, path string) error {
r.enter()
defer r.exit()
return r.StorageDriver.Delete(ctx, path)
}
// RedirectURL returns a URL which may be used to retrieve the content stored at
// the given path.
func (r *regulator) RedirectURL(ctx context.Context, method string, path string, filename string) (string, error) {
r.enter()
defer r.exit()
return r.StorageDriver.RedirectURL(ctx, method, path, filename)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/gcs/gcs_test.go | registry/app/driver/gcs/gcs_test.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcs
import (
"context"
"fmt"
"os"
"testing"
"github.com/harness/gitness/registry/app/dist_temp/dcontext"
storagedriver "github.com/harness/gitness/registry/app/driver"
"github.com/harness/gitness/registry/app/driver/testsuites"
"cloud.google.com/go/storage"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
)
var (
gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error)
skipCheck func(tb testing.TB)
)
func init() {
bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET")
credentials := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
// Skip GCS storage driver tests if environment variable parameters are not provided
skipCheck = func(tb testing.TB) {
tb.Helper()
if bucket == "" || credentials == "" {
tb.Skip("The following environment variables must be set to enable these tests: " +
"REGISTRY_STORAGE_GCS_BUCKET, GOOGLE_APPLICATION_CREDENTIALS")
}
}
gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) {
jsonKey, err := os.ReadFile(credentials)
if err != nil {
panic(fmt.Sprintf("Error reading JSON key : %v", err))
}
var ts oauth2.TokenSource
var email string
var privateKey []byte
ts, err = google.DefaultTokenSource(dcontext.Background(), storage.ScopeFullControl)
if err != nil {
// Assume that the file contents are within the environment variable since it exists
// but does not contain a valid file path
jwtConfig, err := google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl)
if err != nil {
panic(fmt.Sprintf("Error reading JWT config : %s", err))
}
email = jwtConfig.Email
privateKey = jwtConfig.PrivateKey
if len(privateKey) == 0 {
panic("Error reading JWT config : missing private_key property")
}
if email == "" {
panic("Error reading JWT config : missing client_email property")
}
ts = jwtConfig.TokenSource(dcontext.Background())
}
gcs, err := storage.NewClient(dcontext.Background(), option.WithCredentialsJSON(jsonKey))
if err != nil {
panic(fmt.Sprintf("Error initializing gcs client : %v", err))
}
parameters := driverParameters{
bucket: bucket,
rootDirectory: rootDirectory,
email: email,
privateKey: privateKey,
client: oauth2.NewClient(dcontext.Background(), ts),
chunkSize: defaultChunkSize,
gcs: gcs,
maxConcurrency: 8,
}
return New(context.Background(), parameters)
}
}
func newDriverConstructor(tb testing.TB) testsuites.DriverConstructor {
root := tb.TempDir()
return func() (storagedriver.StorageDriver, error) {
return gcsDriverConstructor(root)
}
}
func TestGCSDriverSuite(t *testing.T) {
skipCheck(t)
testsuites.Driver(t, newDriverConstructor(t), false)
}
func BenchmarkGCSDriverSuite(b *testing.B) {
skipCheck(b)
testsuites.BenchDriver(b, newDriverConstructor(b))
}
// Test Committing a FileWriter without having called Write.
func TestCommitEmpty(t *testing.T) {
skipCheck(t)
validRoot := t.TempDir()
driver, err := gcsDriverConstructor(validRoot)
if err != nil {
t.Fatalf("unexpected error creating rooted driver: %v", err)
}
filename := "/test" //nolint:goconst
ctx := dcontext.Background()
writer, err := driver.Writer(ctx, filename, false)
// nolint:errcheck
defer driver.Delete(ctx, filename)
if err != nil {
t.Fatalf("driver.Writer: unexpected error: %v", err)
}
err = writer.Commit(context.Background())
if err != nil {
t.Fatalf("writer.Commit: unexpected error: %v", err)
}
err = writer.Close()
if err != nil {
t.Fatalf("writer.Close: unexpected error: %v", err)
}
if writer.Size() != 0 {
t.Fatalf("writer.Size: %d != 0", writer.Size())
}
readContents, err := driver.GetContent(ctx, filename)
if err != nil {
t.Fatalf("driver.GetContent: unexpected error: %v", err)
}
if len(readContents) != 0 {
t.Fatalf("len(driver.GetContent(..)): %d != 0", len(readContents))
}
}
// Test Committing a FileWriter after having written exactly
// defaultChunksize bytes.
func TestCommit(t *testing.T) {
skipCheck(t)
validRoot := t.TempDir()
driver, err := gcsDriverConstructor(validRoot)
if err != nil {
t.Fatalf("unexpected error creating rooted driver: %v", err)
}
filename := "/test"
ctx := dcontext.Background()
contents := make([]byte, defaultChunkSize)
writer, err := driver.Writer(ctx, filename, false)
// nolint:errcheck
defer driver.Delete(ctx, filename)
if err != nil {
t.Fatalf("driver.Writer: unexpected error: %v", err)
}
_, err = writer.Write(contents)
if err != nil {
t.Fatalf("writer.Write: unexpected error: %v", err)
}
err = writer.Commit(context.Background())
if err != nil {
t.Fatalf("writer.Commit: unexpected error: %v", err)
}
err = writer.Close()
if err != nil {
t.Fatalf("writer.Close: unexpected error: %v", err)
}
if writer.Size() != int64(len(contents)) {
t.Fatalf("writer.Size: %d != %d", writer.Size(), len(contents))
}
readContents, err := driver.GetContent(ctx, filename)
if err != nil {
t.Fatalf("driver.GetContent: unexpected error: %v", err)
}
if len(readContents) != len(contents) {
t.Fatalf("len(driver.GetContent(..)): %d != %d", len(readContents), len(contents))
}
}
func TestRetry(t *testing.T) {
skipCheck(t)
assertError := func(expected string, observed error) {
observedMsg := "<nil>"
if observed != nil {
observedMsg = observed.Error()
}
if observedMsg != expected {
t.Fatalf("expected %v, observed %v\n", expected, observedMsg)
}
}
err := retry(func() error {
return &googleapi.Error{
Code: 503,
Message: "google api error",
}
})
assertError("googleapi: Error 503: google api error", err)
err = retry(func() error {
return &googleapi.Error{
Code: 404,
Message: "google api error",
}
})
assertError("googleapi: Error 404: google api error", err)
err = retry(func() error {
return fmt.Errorf("error")
})
assertError("error", err)
}
func TestEmptyRootList(t *testing.T) {
skipCheck(t)
validRoot := t.TempDir()
rootedDriver, err := gcsDriverConstructor(validRoot)
if err != nil {
t.Fatalf("unexpected error creating rooted driver: %v", err)
}
emptyRootDriver, err := gcsDriverConstructor("")
if err != nil {
t.Fatalf("unexpected error creating empty root driver: %v", err)
}
slashRootDriver, err := gcsDriverConstructor("/")
if err != nil {
t.Fatalf("unexpected error creating slash root driver: %v", err)
}
filename := "/test"
contents := []byte("contents")
ctx := dcontext.Background()
err = rootedDriver.PutContent(ctx, filename, contents)
if err != nil {
t.Fatalf("unexpected error creating content: %v", err)
}
defer func() {
err := rootedDriver.Delete(ctx, filename)
if err != nil {
t.Fatalf("failed to remove %v due to %v\n", filename, err)
}
}()
keys, err := emptyRootDriver.List(ctx, "/")
if err != nil {
t.Fatalf("unexpected error listing empty root content: %v", err)
}
for _, path := range keys {
if !storagedriver.PathRegexp.MatchString(path) {
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
}
}
keys, err = slashRootDriver.List(ctx, "/")
if err != nil {
t.Fatalf("unexpected error listing slash root content: %v", err)
}
for _, path := range keys {
if !storagedriver.PathRegexp.MatchString(path) {
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
}
}
}
// TestMoveDirectory checks that moving a directory returns an error.
func TestMoveDirectory(t *testing.T) {
skipCheck(t)
validRoot := t.TempDir()
driver, err := gcsDriverConstructor(validRoot)
if err != nil {
t.Fatalf("unexpected error creating rooted driver: %v", err)
}
ctx := dcontext.Background()
contents := []byte("contents")
// Create a regular file.
err = driver.PutContent(ctx, "/parent/dir/foo", contents)
if err != nil {
t.Fatalf("unexpected error creating content: %v", err)
}
defer func() {
err := driver.Delete(ctx, "/parent")
if err != nil {
t.Fatalf("failed to remove /parent due to %v\n", err)
}
}()
err = driver.Move(ctx, "/parent/dir", "/parent/other")
if err == nil {
t.Fatal("Moving directory /parent/dir /parent/other should have return a non-nil error")
}
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/gcs/gcs.go | registry/app/driver/gcs/gcs.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcs
import (
"bytes"
"context"
"crypto/md5" //nolint:gosec
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"net/url"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
storagedriver "github.com/harness/gitness/registry/app/driver"
"github.com/harness/gitness/registry/app/driver/base"
"github.com/harness/gitness/registry/app/driver/factory"
"cloud.google.com/go/storage"
"github.com/rs/zerolog/log"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"golang.org/x/oauth2/jwt"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
const (
driverName = "gcs"
dummyProjectID = "<unknown>"
minChunkSize = 256 * 1024
defaultChunkSize = 16 * 1024 * 1024
defaultMaxConcurrency = 50
minConcurrency = 25
uploadSessionContentType = "application/x-docker-upload-session"
blobContentType = "application/octet-stream"
maxTries = 5
)
var rangeHeader = regexp.MustCompile(`^bytes=([0-9])+-([0-9]+)$`)
var _ storagedriver.FileWriter = &writer{}
// driverParameters is a struct that encapsulates all of the driver parameters after all values have been set.
type driverParameters struct {
bucket string
email string
privateKey []byte
client *http.Client
rootDirectory string
chunkSize int
gcs *storage.Client
// maxConcurrency limits the number of concurrent driver operations
// to GCS, which ultimately increases reliability of many simultaneous
// pushes by ensuring we aren't DoSing our own server with many
// connections.
maxConcurrency uint64
}
func init() {
factory.Register(driverName, &gcsDriverFactory{})
}
// gcsDriverFactory implements the factory.StorageDriverFactory interface.
type gcsDriverFactory struct{}
// Create StorageDriver from parameters.
func (factory *gcsDriverFactory) Create(
ctx context.Context,
parameters map[string]interface{},
) (storagedriver.StorageDriver, error) {
return FromParameters(ctx, parameters)
}
var _ storagedriver.StorageDriver = &driver{}
// driver is a storagedriver.StorageDriver implementation backed by GCS
// Objects are stored at absolute keys in the provided bucket.
type driver struct {
client *http.Client
bucket *storage.BucketHandle
email string
privateKey []byte
rootDirectory string
chunkSize int
}
// Wrapper wraps `driver` with a throttler, ensuring that no more than N
// GCS actions can occur concurrently. The default limit is 75.
type Wrapper struct {
baseEmbed
}
type baseEmbed struct {
base.Base
}
// FromParameters constructs a new Driver with a given parameters map.
// Required parameters:
// - bucket.
func FromParameters(ctx context.Context, parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
bucket, ok := parameters["bucket"]
if !ok || fmt.Sprint(bucket) == "" {
return nil, fmt.Errorf("no bucket parameter provided")
}
rootDirectory, ok := parameters["rootdirectory"]
if !ok {
rootDirectory = ""
}
chunkSize := defaultChunkSize
chunkSizeParam, ok := parameters["chunksize"]
if ok {
switch v := chunkSizeParam.(type) {
case string:
vv, err := strconv.Atoi(v)
if err != nil {
return nil, fmt.Errorf("chunksize must be an integer, %v invalid", chunkSizeParam)
}
chunkSize = vv
case int, uint, int32, uint32, uint64, int64:
chunkSize = int(reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int())
default:
return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam)
}
if chunkSize < minChunkSize {
return nil, fmt.Errorf("chunksize %#v must be larger than or equal to %d", chunkSize, minChunkSize)
}
if chunkSize%minChunkSize != 0 {
return nil, fmt.Errorf("chunksize should be a multiple of %d", minChunkSize)
}
}
var ts oauth2.TokenSource
jwtConf := new(jwt.Config)
var err error
var gcs *storage.Client
var options []option.ClientOption
//nolint:nestif
if keyfile, ok := parameters["keyfile"]; ok {
jsonKey, err := os.ReadFile(fmt.Sprint(keyfile))
if err != nil {
return nil, err
}
jwtConf, err = google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl)
if err != nil {
return nil, err
}
ts = jwtConf.TokenSource(ctx)
options = append(options, option.WithCredentialsFile(fmt.Sprint(keyfile)))
} else if credentials, ok := parameters["credentials"]; ok {
credentialMap, ok := credentials.(map[interface{}]interface{})
if !ok {
return nil, fmt.Errorf("the credentials were not specified in the correct format")
}
stringMap := map[string]interface{}{}
for k, v := range credentialMap {
key, ok := k.(string)
if !ok {
return nil, fmt.Errorf("one of the credential keys was not a string: %s", fmt.Sprint(k))
}
stringMap[key] = v
}
data, err := json.Marshal(stringMap)
if err != nil {
return nil, fmt.Errorf("failed to marshal gcs credentials to json")
}
jwtConf, err = google.JWTConfigFromJSON(data, storage.ScopeFullControl)
if err != nil {
return nil, err
}
ts = jwtConf.TokenSource(ctx)
options = append(options, option.WithCredentialsJSON(data))
} else {
var err error
// DefaultTokenSource is a convenience method. It first calls FindDefaultCredentials,
// then uses the credentials to construct an http.Client or an oauth2.TokenSource.
// https://pkg.go.dev/golang.org/x/oauth2/google#hdr-Credentials
ts, err = google.DefaultTokenSource(ctx, storage.ScopeFullControl)
if err != nil {
return nil, err
}
}
if userAgent, ok := parameters["useragent"]; ok {
if ua, ok := userAgent.(string); ok && ua != "" {
options = append(options, option.WithUserAgent(ua))
}
}
gcs, err = storage.NewClient(ctx, options...)
if err != nil {
return nil, err
}
maxConcurrency, err := base.GetLimitFromParameter(parameters["maxconcurrency"], minConcurrency,
defaultMaxConcurrency)
if err != nil {
return nil, fmt.Errorf("maxconcurrency config error: %w", err)
}
params := driverParameters{
bucket: fmt.Sprint(bucket),
rootDirectory: fmt.Sprint(rootDirectory),
email: jwtConf.Email,
privateKey: jwtConf.PrivateKey,
client: oauth2.NewClient(ctx, ts),
chunkSize: chunkSize,
maxConcurrency: maxConcurrency,
gcs: gcs,
}
return New(ctx, params)
}
// New constructs a new driver.
func New(_ context.Context, params driverParameters) (storagedriver.StorageDriver, error) {
rootDirectory := strings.Trim(params.rootDirectory, "/")
if rootDirectory != "" {
rootDirectory += "/"
}
if params.chunkSize <= 0 || params.chunkSize%minChunkSize != 0 {
return nil, fmt.Errorf("invalid chunksize: %d is not a positive multiple of %d", params.chunkSize, minChunkSize)
}
d := &driver{
bucket: params.gcs.Bucket(params.bucket),
rootDirectory: rootDirectory,
email: params.email,
privateKey: params.privateKey,
client: params.client,
chunkSize: params.chunkSize,
}
return &Wrapper{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: base.NewRegulator(d, params.maxConcurrency),
},
},
}, nil
}
// Implement the storagedriver.StorageDriver interface
func (d *driver) Name() string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
// This should primarily be used for small objects.
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
r, err := d.bucket.Object(d.pathToKey(path)).NewReader(ctx)
if err != nil {
if errors.Is(err, storage.ErrObjectNotExist) {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
defer r.Close()
return io.ReadAll(r)
}
// PutContent stores the []byte content at a location designated by "path".
// This should primarily be used for small objects.
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
object := d.bucket.Object(d.pathToKey(path))
err := d.putContent(ctx, object, contents, blobContentType, nil)
if err != nil {
return err
}
return nil
}
// Reader retrieves an io.ReadCloser for the content stored at "path"
// with a given byte offset.
// May be used to resume reading a stream by providing a nonzero offset.
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
obj := d.bucket.Object(d.pathToKey(path))
// NOTE(milosgajdos): If length is negative, the object is read until the end
// See: https://pkg.go.dev/cloud.google.com/go/storage#ObjectHandle.NewRangeReader
r, err := obj.NewRangeReader(ctx, offset, -1)
if err != nil { //nolint:nestif
if errors.Is(err, storage.ErrObjectNotExist) {
return nil, storagedriver.PathNotFoundError{Path: path}
}
var status *googleapi.Error
if errors.As(err, &status) {
switch status.Code {
case http.StatusNotFound:
return nil, storagedriver.PathNotFoundError{Path: path}
case http.StatusRequestedRangeNotSatisfiable:
attrs, err := obj.Attrs(ctx)
if err != nil {
return nil, err
}
if offset == attrs.Size {
return io.NopCloser(bytes.NewReader([]byte{})), nil
}
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
}
}
return nil, err
}
if r.Attrs.ContentType == uploadSessionContentType {
r.Close()
return nil, storagedriver.PathNotFoundError{Path: path}
}
return r, nil
}
// Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit.
func (d *driver) Writer(ctx context.Context, path string, appendMode bool) (storagedriver.FileWriter, error) {
w := &writer{
ctx: ctx,
driver: d,
object: d.bucket.Object(d.pathToKey(path)),
buffer: make([]byte, d.chunkSize),
}
if appendMode {
err := w.init(ctx)
if err != nil {
return nil, err
}
}
return w, nil
}
type writer struct {
ctx context.Context
object *storage.ObjectHandle
driver *driver
size int64
offset int64
closed bool
cancelled bool
committed bool
sessionURI string
buffer []byte
buffSize int
}
// Cancel removes any written content from this FileWriter.
func (w *writer) Cancel(ctx context.Context) error {
w.closed = true
w.cancelled = true
err := w.object.Delete(ctx)
if err != nil {
if errors.Is(err, storage.ErrObjectNotExist) {
err = nil
}
}
return err
}
func (w *writer) Close() error {
if w.closed {
return nil
}
w.closed = true
err := w.writeChunk(w.ctx)
if err != nil {
return err
}
// Copy the remaining bytes from the buffer to the upload session
// Normally buffSize will be smaller than minChunkSize. However, in the
// unlikely event that the upload session failed to start, this number could be higher.
// In this case we can safely clip the remaining bytes to the minChunkSize
if w.buffSize > minChunkSize {
w.buffSize = minChunkSize
}
// commit the writes by updating the upload session
metadata := map[string]string{
"Session-URI": w.sessionURI,
"Offset": strconv.FormatInt(w.offset, 10),
}
err = retry(func() error {
err := w.driver.putContent(w.ctx, w.object, w.buffer[0:w.buffSize], uploadSessionContentType, metadata)
if err != nil {
return err
}
w.size = w.offset + int64(w.buffSize)
w.buffSize = 0
return nil
})
return err
}
func (d *driver) putContent(
ctx context.Context,
obj *storage.ObjectHandle,
content []byte,
contentType string,
metadata map[string]string,
) error {
wc := obj.NewWriter(ctx)
wc.Metadata = metadata
wc.ContentType = contentType
wc.ChunkSize = d.chunkSize
if _, err := bytes.NewReader(content).WriteTo(wc); err != nil {
return err
}
// NOTE(milosgajdos): Apparently it's posisble to to upload 0-byte content to GCS.
// Setting MD5 on the Writer helps to prevent presisting that data.
// If set, the uploaded data is rejected if its MD5 hash does not match this field.
// See: https://pkg.go.dev/cloud.google.com/go/storage#ObjectAttrs
h := md5.New() //nolint:gosec
h.Write(content)
wc.MD5 = h.Sum(nil)
return wc.Close()
}
// Commit flushes all content written to this FileWriter and makes it
// available for future calls to StorageDriver.GetContent and
// StorageDriver.Reader.
func (w *writer) Commit(ctx context.Context) error {
if w.closed {
return fmt.Errorf("already closed")
}
w.closed = true
// no session started yet just perform a simple upload
if w.sessionURI == "" {
err := retry(func() error {
err := w.driver.putContent(ctx, w.object, w.buffer[0:w.buffSize], blobContentType, nil)
if err != nil {
return err
}
w.committed = true
w.size = w.offset + int64(w.buffSize)
w.buffSize = 0
return nil
})
return err
}
size := w.offset + int64(w.buffSize)
var written int
// loop must be performed at least once to ensure the file is committed even when
// the buffer is empty
for {
n, err := w.putChunk(ctx, w.sessionURI, w.buffer[written:w.buffSize], w.offset, size)
written += int(n)
w.offset += n
w.size = w.offset
if err != nil {
w.buffSize = copy(w.buffer, w.buffer[written:w.buffSize])
return err
}
if written == w.buffSize {
break
}
}
w.committed = true
w.buffSize = 0
return nil
}
func (w *writer) writeChunk(ctx context.Context) error {
var err error
// chunks can be uploaded only in multiples of minChunkSize
// chunkSize is a multiple of minChunkSize less than or equal to buffSize
chunkSize := w.buffSize - (w.buffSize % minChunkSize)
if chunkSize == 0 {
return nil
}
// if their is no sessionURI yet, obtain one by starting the session
if w.sessionURI == "" {
w.sessionURI, err = w.newSession()
}
if err != nil {
return err
}
n, err := w.putChunk(ctx, w.sessionURI, w.buffer[0:chunkSize], w.offset, -1)
w.offset += n
if w.offset > w.size {
w.size = w.offset
}
// shift the remaining bytes to the start of the buffer
w.buffSize = copy(w.buffer, w.buffer[int(n):w.buffSize])
return err
}
func (w *writer) Write(p []byte) (int, error) {
if w.closed {
return 0, fmt.Errorf("already closed")
} else if w.cancelled {
return 0, fmt.Errorf("already cancelled")
}
var (
written int
err error
)
for written < len(p) {
n := copy(w.buffer[w.buffSize:], p[written:])
w.buffSize += n
if w.buffSize == cap(w.buffer) {
err = w.writeChunk(w.ctx)
if err != nil {
break
}
}
written += n
}
w.size = w.offset + int64(w.buffSize)
return written, err
}
// Size returns the number of bytes written to this FileWriter.
func (w *writer) Size() int64 {
return w.size
}
func (w *writer) init(ctx context.Context) error {
attrs, err := w.object.Attrs(ctx)
if err != nil {
return err
}
// NOTE(milosgajdos): when PUSH abruptly finishes by
// calling a single commit and then closes the stream
// attrs.ContentType ends up being set to application/octet-stream
// We must handle this case so the upload can resume.
if attrs.ContentType != uploadSessionContentType &&
attrs.ContentType != blobContentType {
return storagedriver.PathNotFoundError{Path: w.object.ObjectName()}
}
offset := int64(0)
// NOTE(milosgajdos): if a client creates an empty blob, then
// closes the stream and then attempts to append to it, the offset
// will be empty, in which case strconv.ParseInt will return error
// See: https://pkg.go.dev/strconv#ParseInt
if attrs.Metadata["Offset"] != "" {
offset, err = strconv.ParseInt(attrs.Metadata["Offset"], 10, 64)
if err != nil {
return err
}
}
r, err := w.object.NewReader(ctx)
if err != nil {
return err
}
defer r.Close()
for err == nil && w.buffSize < len(w.buffer) {
var n int
n, err = r.Read(w.buffer[w.buffSize:])
w.buffSize += n
}
if err != nil && !errors.Is(err, io.EOF) {
return err
}
// NOTE(milosgajdos): if a client closes an existing session and then attempts
// to append to an existing blob, the session will be empty; recreate it
if w.sessionURI = attrs.Metadata["Session-URI"]; w.sessionURI == "" {
w.sessionURI, err = w.newSession()
if err != nil {
return err
}
}
w.offset = offset
w.size = offset + int64(w.buffSize)
return nil
}
type request func() error
func retry(req request) error {
backoff := time.Second
var err error
for i := 0; i < maxTries; i++ {
err = req()
if err == nil {
return nil
}
status, ok := err.(*googleapi.Error) //nolint:errorlint
if !ok || (status.Code != http.StatusTooManyRequests && status.Code < http.StatusInternalServerError) {
return err
}
time.Sleep(backoff - time.Second + (time.Duration(rand.Int31n(1000)) * time.Millisecond)) //nolint:gosec
if i <= 4 {
backoff *= 2
}
}
return err
}
// Stat retrieves the FileInfo for the given path, including the current
// size in bytes and the creation time.
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
var fi storagedriver.FileInfoFields
// try to get as file
obj, err := d.bucket.Object(d.pathToKey(path)).Attrs(ctx)
if err == nil {
if obj.ContentType == uploadSessionContentType {
return nil, storagedriver.PathNotFoundError{Path: path}
}
fi = storagedriver.FileInfoFields{
Path: path,
Size: obj.Size,
ModTime: obj.Updated,
IsDir: false,
}
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
}
// try to get as folder
dirpath := d.pathToDirKey(path)
query := &storage.Query{
Prefix: dirpath,
}
obj, err = d.bucket.Objects(ctx, query).Next()
if err != nil {
if errors.Is(err, iterator.Done) {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
fi = storagedriver.FileInfoFields{
Path: path,
IsDir: true,
}
if obj.Name == dirpath {
fi.Size = obj.Size
fi.ModTime = obj.Updated
}
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
}
// List returns a list of the objects that are direct descendants of the
// given path.
func (d *driver) List(ctx context.Context, path string) ([]string, error) {
query := &storage.Query{
Delimiter: "/",
Prefix: d.pathToDirKey(path),
}
objects := d.bucket.Objects(ctx, query)
list := make([]string, 0, 64)
for {
object, err := objects.Next()
if err != nil {
if errors.Is(err, iterator.Done) {
break
}
return nil, err
}
// GCS does not guarantee strong consistency between
// DELETE and LIST operations. Check that the object is not deleted,
// and filter out any objects with a non-zero time-deleted
if object.Deleted.IsZero() && object.ContentType != uploadSessionContentType && object.Name != "" {
list = append(list, d.keyToPath(object.Name))
}
if object.Name == "" && object.Prefix != "" {
subpath := d.keyToPath(object.Prefix)
list = append(list, subpath)
}
}
if path != "/" && len(list) == 0 {
// Treat empty response as missing directory, since we don't actually
// have directories in Google Cloud Storage.
return nil, storagedriver.PathNotFoundError{Path: path}
}
return list, nil
}
// Move moves an object stored at sourcePath to destPath, removing the
// original object.
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
srcKey, dstKey := d.pathToKey(sourcePath), d.pathToKey(destPath)
src := d.bucket.Object(srcKey)
_, err := d.bucket.Object(dstKey).CopierFrom(src).Run(ctx)
if err != nil {
var status *googleapi.Error
if errors.As(err, &status) {
if status.Code == http.StatusNotFound {
return storagedriver.PathNotFoundError{Path: srcKey}
}
}
return fmt.Errorf("move %q to %q: %w", srcKey, dstKey, err)
}
err = src.Delete(ctx)
// if deleting the file fails, log the error, but do not fail; the file was successfully copied,
// and the original should eventually be cleaned when purging the uploads folder.
if err != nil {
log.Info().Ctx(ctx).Msgf("error deleting %v: %v", sourcePath, err)
}
return nil
}
// listAll recursively lists all names of objects stored at "prefix" and its subpaths.
func (d *driver) listAll(ctx context.Context, prefix string) ([]string, error) {
objects := d.bucket.Objects(ctx, &storage.Query{
Prefix: prefix,
Versions: false,
})
list := make([]string, 0, 64)
for {
object, err := objects.Next()
if err != nil {
if errors.Is(err, iterator.Done) {
break
}
return nil, err
}
// GCS does not guarantee strong consistency between
// DELETE and LIST operations. Check that the object is not deleted,
// and filter out any objects with a non-zero time-deleted
if object.Deleted.IsZero() {
list = append(list, object.Name)
}
}
return list, nil
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (d *driver) Delete(ctx context.Context, path string) error {
prefix := d.pathToDirKey(path)
keys, err := d.listAll(ctx, prefix)
if err != nil {
return err
}
if len(keys) > 0 {
// NOTE(milosgajdos): d.listAll calls (BucketHandle).Objects
// See: https://pkg.go.dev/cloud.google.com/go/storage#BucketHandle.Objects
// docs: Objects will be iterated over lexicographically by name.
// This means we don't have to reverse order the slice; we can
// range over the keys slice in reverse order
for i := len(keys) - 1; i >= 0; i-- {
key := keys[i]
err := d.bucket.Object(key).Delete(ctx)
// GCS only guarantees eventual consistency, so listAll might return
// paths that no longer exist. If this happens, just ignore any not
// found error
if status, ok := err.(*googleapi.Error); ok { //nolint:errorlint
if status.Code == http.StatusNotFound {
err = nil
}
}
if err != nil {
return err
}
}
return nil
}
err = d.bucket.Object(d.pathToKey(path)).Delete(ctx)
if errors.Is(err, storage.ErrObjectNotExist) {
return storagedriver.PathNotFoundError{Path: path}
}
return err
}
// RedirectURL returns a URL which may be used to retrieve the content stored at
// the given path, possibly using the given options.
func (d *driver) RedirectURL(_ context.Context, method string, path string, filename string) (string, error) {
if method != http.MethodGet && method != http.MethodHead {
return "", nil
}
opts := &storage.SignedURLOptions{
GoogleAccessID: d.email,
PrivateKey: d.privateKey,
Method: method,
Expires: time.Now().Add(20 * time.Minute),
}
if filename != "" {
opts.QueryParameters = url.Values{
"response-content-disposition": {fmt.Sprintf("attachment; filename=\"%s\"", filename)},
}
}
return d.bucket.SignedURL(d.pathToKey(path), opts)
}
// Walk traverses a filesystem defined within driver, starting
// from the given path, calling f on each file.
func (d *driver) Walk(
ctx context.Context,
path string,
f storagedriver.WalkFn,
options ...func(*storagedriver.WalkOptions),
) error {
return storagedriver.WalkFallback(ctx, d, path, f, options...)
}
func (w *writer) newSession() (uri string, err error) {
u := &url.URL{
Scheme: "https",
Host: "www.googleapis.com",
Path: fmt.Sprintf("/upload/storage/v1/b/%v/o", w.object.BucketName()),
RawQuery: fmt.Sprintf("uploadType=resumable&name=%v", w.object.ObjectName()),
}
req, err := http.NewRequestWithContext(w.ctx, http.MethodPost, u.String(), nil)
if err != nil {
return "", err
}
req.Header.Set("X-Upload-Content-Type", blobContentType)
req.Header.Set("Content-Length", "0")
err = retry(func() error {
resp, err := w.driver.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
err = googleapi.CheckMediaResponse(resp)
if err != nil {
return err
}
uri = resp.Header.Get("Location")
return nil
})
return uri, err
}
func (w *writer) putChunk(ctx context.Context, sessionURI string, chunk []byte, from int64, totalSize int64) (
int64,
error,
) {
req, err := http.NewRequestWithContext(ctx, http.MethodPut, sessionURI, bytes.NewReader(chunk))
if err != nil {
return 0, err
}
length := int64(len(chunk))
to := from + length - 1
size := "*"
if totalSize >= 0 {
size = strconv.FormatInt(totalSize, 10)
}
req.Header.Set("Content-Type", blobContentType)
if from == to+1 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%s", size))
} else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%s", from, to, size))
}
req.Header.Set("Content-Length", strconv.FormatInt(length, 10))
bytesPut := int64(0)
err = retry(func() error {
resp, err := w.driver.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if totalSize < 0 && resp.StatusCode == http.StatusPermanentRedirect {
groups := rangeHeader.FindStringSubmatch(resp.Header.Get("Range"))
end, err := strconv.ParseInt(groups[2], 10, 64)
if err != nil {
return err
}
bytesPut = end - from + 1
return nil
}
err = googleapi.CheckMediaResponse(resp)
if err != nil {
return err
}
bytesPut = to - from + 1
return nil
})
return bytesPut, err
}
func (d *driver) pathToKey(path string) string {
return strings.TrimSpace(strings.TrimRight(d.rootDirectory+strings.TrimLeft(path, "/"), "/"))
}
func (d *driver) pathToDirKey(path string) string {
return d.pathToKey(path) + "/"
}
func (d *driver) keyToPath(key string) string {
return "/" + strings.Trim(strings.TrimPrefix(key, d.rootDirectory), "/")
}
func (d *driver) CopyObject(_ context.Context, _, _, _ string) error {
return fmt.Errorf("not yet implemented")
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/gcs/doc.go | registry/app/driver/gcs/doc.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcs
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/filesystem/driver.go | registry/app/driver/filesystem/driver.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package filesystem
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path"
"time"
storagedriver "github.com/harness/gitness/registry/app/driver"
"github.com/harness/gitness/registry/app/driver/base"
"github.com/harness/gitness/registry/app/driver/factory"
"github.com/rs/zerolog/log"
)
const (
driverName = "filesystem"
defaultRootDirectory = "/var/lib/registry"
defaultMaxThreads = uint64(100)
// minThreads is the minimum value for the maxthreads configuration
// parameter. If the driver's parameters are less than this we set
// the parameters to minThreads.
minThreads = uint64(25)
)
func GetDriverName() string {
return driverName
}
// DriverParameters represents all configuration options available for the
// filesystem driver.
type DriverParameters struct {
RootDirectory string
MaxThreads uint64
}
// TODO: figure-out why init is not called automatically
func Register(ctx context.Context) {
log.Ctx(ctx).Info().Msgf("registering filesystem driver")
}
func init() {
factory.Register(driverName, &filesystemDriverFactory{})
}
// filesystemDriverFactory implements the factory.StorageDriverFactory interface.
type filesystemDriverFactory struct{}
func (factory *filesystemDriverFactory) Create(
_ context.Context,
parameters map[string]any,
) (storagedriver.StorageDriver, error) {
return FromParameters(parameters)
}
type driver struct {
rootDirectory string
}
func (d *driver) CopyObject(_ context.Context, _, _, _ string) error {
//TODO implement me
panic("implement me")
}
func (d *driver) BatchCopyObjects(_ context.Context, _ string, _ []string, _ int) error {
//TODO implement me
panic("implement me")
}
type baseEmbed struct {
base.Base
}
// Driver is a storagedriver.StorageDriver implementation backed by a local
// filesystem. All provided paths will be subpaths of the RootDirectory.
type Driver struct {
baseEmbed
}
// FromParameters constructs a new Driver with a given parameters map
// Optional Parameters:
// - rootdirectory
// - maxthreads.
func FromParameters(parameters map[string]any) (*Driver, error) {
params, err := fromParametersImpl(parameters)
if err != nil || params == nil {
return nil, err
}
return New(*params), nil
}
func fromParametersImpl(parameters map[string]any) (*DriverParameters, error) {
var (
err error
maxThreads = defaultMaxThreads
rootDirectory = defaultRootDirectory
)
if parameters != nil {
if rootDir, ok := parameters["rootdirectory"]; ok {
rootDirectory = fmt.Sprint(rootDir)
}
maxThreads, err = base.GetLimitFromParameter(parameters["maxthreads"], minThreads, defaultMaxThreads)
if err != nil {
return nil, fmt.Errorf("maxthreads config error: %s", err.Error())
}
}
params := &DriverParameters{
RootDirectory: rootDirectory,
MaxThreads: maxThreads,
}
return params, nil
}
// New constructs a new Driver with a given rootDirectory.
func New(params DriverParameters) *Driver {
fsDriver := &driver{rootDirectory: params.RootDirectory}
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: base.NewRegulator(fsDriver, params.MaxThreads),
},
},
}
}
// Implement the storagedriver.StorageDriver interface
func (d *driver) Name() string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
rc, err := d.Reader(ctx, path, 0)
if err != nil {
return nil, err
}
defer rc.Close()
p, err := io.ReadAll(rc)
if err != nil {
return nil, err
}
return p, nil
}
// PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error {
writer, err := d.Writer(ctx, subPath, false)
if err != nil {
return err
}
defer writer.Close()
_, err = io.Copy(writer, bytes.NewReader(contents))
if err != nil {
if cErr := writer.Cancel(ctx); cErr != nil {
return errors.Join(err, cErr)
}
return err
}
return writer.Commit(ctx)
}
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset.
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0o644)
log.Ctx(ctx).Info().Msgf("Opening file %s %s", d.fullPath(path), d.rootDirectory)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, err
}
seekPos, err := file.Seek(offset, io.SeekStart)
if err != nil {
file.Close()
return nil, err
} else if seekPos < offset {
file.Close()
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
}
return file, nil
}
func (d *driver) Writer(_ context.Context, subPath string, appendFlag bool) (storagedriver.FileWriter, error) {
fullPath := d.fullPath(subPath)
parentDir := path.Dir(fullPath)
if err := os.MkdirAll(parentDir, 0o777); err != nil {
return nil, err
}
fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0o666)
if err != nil {
return nil, err
}
var offset int64
if !appendFlag {
err := fp.Truncate(0)
if err != nil {
fp.Close()
return nil, err
}
} else {
n, err := fp.Seek(0, io.SeekEnd)
if err != nil {
fp.Close()
return nil, err
}
offset = n
}
return newFileWriter(fp, offset), nil
}
// Stat retrieves the FileInfo for the given path, including the current size
// in bytes and the creation time.
func (d *driver) Stat(_ context.Context, subPath string) (storagedriver.FileInfo, error) {
fullPath := d.fullPath(subPath)
fi, err := os.Stat(fullPath)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, storagedriver.PathNotFoundError{Path: subPath}
}
return nil, err
}
return fileInfo{
path: subPath,
FileInfo: fi,
}, nil
}
// List returns a list of the objects that are direct descendants of the given
// path.
func (d *driver) List(_ context.Context, subPath string) ([]string, error) {
fullPath := d.fullPath(subPath)
dir, err := os.Open(fullPath)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, storagedriver.PathNotFoundError{Path: subPath}
}
return nil, err
}
defer dir.Close()
fileNames, err := dir.Readdirnames(0)
if err != nil {
return nil, err
}
keys := make([]string, 0, len(fileNames))
for _, fileName := range fileNames {
keys = append(keys, path.Join(subPath, fileName))
}
return keys, nil
}
// Move moves an object stored at sourcePath to destPath, removing the original
// object.
func (d *driver) Move(_ context.Context, sourcePath string, destPath string) error {
source := d.fullPath(sourcePath)
dest := d.fullPath(destPath)
if _, err := os.Stat(source); errors.Is(err, fs.ErrNotExist) {
return storagedriver.PathNotFoundError{Path: sourcePath}
}
if err := os.MkdirAll(path.Dir(dest), 0o777); err != nil {
return err
}
err := os.Rename(source, dest)
return err
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (d *driver) Delete(_ context.Context, subPath string) error {
fullPath := d.fullPath(subPath)
_, err := os.Stat(fullPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return err
} else if err != nil {
return storagedriver.PathNotFoundError{Path: subPath}
}
err = os.RemoveAll(fullPath)
return err
}
// RedirectURL returns a URL which may be used to retrieve the content stored at the given path.
func (d *driver) RedirectURL(_ context.Context, _ string, _ string, _ string) (string, error) {
return "", nil
}
// Walk traverses a filesystem defined within driver, starting
// from the given path, calling f on each file and directory.
func (d *driver) Walk(
ctx context.Context,
path string,
f storagedriver.WalkFn,
options ...func(*storagedriver.WalkOptions),
) error {
return storagedriver.WalkFallback(ctx, d, path, f, options...)
}
// fullPath returns the absolute path of a key within the Driver's storage.
func (d *driver) fullPath(subPath string) string {
return path.Join(d.rootDirectory, subPath)
}
type fileInfo struct {
os.FileInfo
path string
}
var _ storagedriver.FileInfo = fileInfo{}
// Path provides the full path of the target of this file info.
func (fi fileInfo) Path() string {
return fi.path
}
// Size returns current length in bytes of the file. The return value can
// be used to write to the end of the file at path. The value is
// meaningless if IsDir returns true.
func (fi fileInfo) Size() int64 {
if fi.IsDir() {
return 0
}
return fi.FileInfo.Size()
}
// ModTime returns the modification time for the file. For backends that
// don't have a modification time, the creation time should be returned.
func (fi fileInfo) ModTime() time.Time {
return fi.FileInfo.ModTime()
}
// IsDir returns true if the path is a directory.
func (fi fileInfo) IsDir() bool {
return fi.FileInfo.IsDir()
}
type fileWriter struct {
file *os.File
size int64
bw *bufio.Writer
closed bool
committed bool
cancelled bool
}
func newFileWriter(file *os.File, size int64) *fileWriter {
return &fileWriter{
file: file,
size: size,
bw: bufio.NewWriter(file),
}
}
func (fw *fileWriter) Write(p []byte) (int, error) {
if fw.closed {
return 0, fmt.Errorf("already closed")
} else if fw.committed {
return 0, fmt.Errorf("already committed")
} else if fw.cancelled {
return 0, fmt.Errorf("already cancelled")
}
n, err := fw.bw.Write(p)
fw.size += int64(n)
return n, err
}
func (fw *fileWriter) Size() int64 {
return fw.size
}
func (fw *fileWriter) Close() error {
if fw.closed {
return fmt.Errorf("already closed")
}
if err := fw.bw.Flush(); err != nil {
return err
}
if err := fw.file.Sync(); err != nil {
return err
}
if err := fw.file.Close(); err != nil {
return err
}
fw.closed = true
return nil
}
func (fw *fileWriter) Cancel(_ context.Context) error {
if fw.closed {
return fmt.Errorf("already closed")
}
fw.cancelled = true
fw.file.Close()
return os.Remove(fw.file.Name())
}
func (fw *fileWriter) Commit(_ context.Context) error {
if fw.closed {
return fmt.Errorf("already closed")
} else if fw.committed {
return fmt.Errorf("already committed")
} else if fw.cancelled {
return fmt.Errorf("already cancelled")
}
if err := fw.bw.Flush(); err != nil {
return err
}
if err := fw.file.Sync(); err != nil {
return err
}
fw.committed = true
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/s3-aws/s3.go | registry/app/driver/s3-aws/s3.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package s3 provides a storagedriver.StorageDriver implementation to
// store blobs in Amazon S3 cloud storage.
//
// This package leverages the official aws client library for interfacing with
// S3.
//
// Because S3 is a key, value store the Stat call does not support last modification
// time for directories (directories are an abstraction for key, value stores)
//
// Keep in mind that S3 guarantees only read-after-write consistency for new
// objects, but no read-after-update or list-after-write consistency.
package s3
import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"math"
"net/http"
"path/filepath"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/harness/gitness/registry/app/dist_temp/dcontext"
storagedriver "github.com/harness/gitness/registry/app/driver"
"github.com/harness/gitness/registry/app/driver/base"
"github.com/harness/gitness/registry/app/driver/factory"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/rs/zerolog/log"
)
const driverName = "s3aws"
// minChunkSize defines the minimum multipart upload chunk size
// S3 API requires multipart upload chunks to be at least 5MB.
const minChunkSize = 5 * 1024 * 1024
// maxChunkSize defines the maximum multipart upload chunk size allowed by S3.
// S3 API requires max upload chunk to be 5GB.
const maxChunkSize = 5 * 1024 * 1024 * 1024
const defaultChunkSize = 2 * minChunkSize
const (
// defaultMultipartCopyChunkSize defines the default chunk size for all
// but the last Upload Part - Copy operation of a multipart copy.
// Empirically, 32 MB is optimal.
defaultMultipartCopyChunkSize = 32 * 1024 * 1024
// defaultMultipartCopyMaxConcurrency defines the default maximum number
// of concurrent Upload Part - Copy operations for a multipart copy.
defaultMultipartCopyMaxConcurrency = 100
// defaultMultipartCopyThresholdSize defines the default object size
// above which multipart copy will be used. (PUT Object - Copy is used
// for objects at or below this size.) Empirically, 32 MB is optimal.
defaultMultipartCopyThresholdSize = 32 * 1024 * 1024
)
// listMax is the largest amount of objects you can request from S3 in a list call.
const listMax = 1000
// noStorageClass defines the value to be used if storage class is not supported by the S3 endpoint.
const noStorageClass = "NONE"
const r2Regions = "wnam, enam, weur, eeur, apac, oc"
// s3StorageClasses lists all compatible (instant retrieval) S3 storage classes.
var s3StorageClasses = []string{
noStorageClass,
s3.StorageClassStandard,
s3.StorageClassReducedRedundancy,
s3.StorageClassStandardIa,
s3.StorageClassOnezoneIa,
s3.StorageClassIntelligentTiering,
s3.StorageClassOutposts,
s3.StorageClassGlacierIr,
}
// validRegions maps known s3 region identifiers to region descriptors.
var validRegions = map[string]struct{}{}
// validObjectACLs contains known s3 object Acls.
var validObjectACLs = map[string]struct{}{}
// DriverParameters A struct that encapsulates all of the driver parameters after all values have been set.
type DriverParameters struct {
AccessKey string
SecretKey string
Bucket string
Region string
RegionEndpoint string
ForcePathStyle bool
Encrypt bool
KeyID string
Secure bool
SkipVerify bool
V4Auth bool
ChunkSize int64
MultipartCopyChunkSize int64
MultipartCopyMaxConcurrency int64
MultipartCopyThresholdSize int64
RootDirectory string
StorageClass string
UserAgent string
ObjectACL string
SessionToken string
UseDualStack bool
Accelerate bool
LogLevel aws.LogLevelType
}
func GetDriverName() string {
return driverName
}
func init() {
partitions := endpoints.DefaultPartitions()
for _, p := range partitions {
for region := range p.Regions() {
validRegions[region] = struct{}{}
}
}
// Add the default Cloudflare R2 regions
for region := range strings.SplitSeq(r2Regions, ",") {
validRegions[strings.TrimSpace(region)] = struct{}{}
}
for _, objectACL := range []string{
s3.ObjectCannedACLPrivate,
s3.ObjectCannedACLPublicRead,
s3.ObjectCannedACLPublicReadWrite,
s3.ObjectCannedACLAuthenticatedRead,
s3.ObjectCannedACLAwsExecRead,
s3.ObjectCannedACLBucketOwnerRead,
s3.ObjectCannedACLBucketOwnerFullControl,
} {
validObjectACLs[objectACL] = struct{}{}
}
// Register this as the default s3 driver in addition to s3aws
factory.Register(driverName, &s3DriverFactory{})
}
// TODO: figure-out why init is not called automatically
func Register(ctx context.Context) {
log.Ctx(ctx).Info().Msgf("registering s3 driver")
}
// s3DriverFactory implements the factory.StorageDriverFactory interface.
type s3DriverFactory struct{}
func (factory *s3DriverFactory) Create(ctx context.Context, parameters map[string]any) (
storagedriver.StorageDriver,
error,
) {
return FromParameters(ctx, parameters)
}
var _ storagedriver.StorageDriver = &driver{}
type driver struct {
S3 *s3.S3
Bucket string
ChunkSize int64
Encrypt bool
KeyID string
MultipartCopyChunkSize int64
MultipartCopyMaxConcurrency int64
MultipartCopyThresholdSize int64
RootDirectory string
StorageClass string
ObjectACL string
pool *sync.Pool
}
func (d *driver) CopyObject(ctx context.Context, srcKey, destBucket, destKey string) error {
// Get source object info to determine size
srcPath := strings.TrimPrefix(srcKey, "/")
fileInfo, err := d.Stat(ctx, srcPath)
if err != nil {
return fmt.Errorf("failed to get source object info: %w", err)
}
// For objects <= threshold size, use simple copy
if fileInfo.Size() <= d.MultipartCopyThresholdSize {
copySource := fmt.Sprintf("/%s%s", d.Bucket, srcKey)
_, err := d.S3.CopyObjectWithContext(ctx, &s3.CopyObjectInput{
Bucket: aws.String(destBucket),
CopySource: aws.String(copySource),
Key: aws.String(destKey),
})
if err != nil {
return err
}
} else {
// For large objects, use multipart copy
err = d.performMultipartCopy(ctx, d.Bucket, srcKey, destBucket, destKey, fileInfo.Size())
if err != nil {
return err
}
}
// Verify the destination object exists
_, err = d.S3.HeadObjectWithContext(ctx, &s3.HeadObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(destKey),
})
return err
}
// performMultipartCopy performs multipart copy for large objects across buckets.
func (d *driver) performMultipartCopy(
ctx context.Context, srcBucket, srcKey, destBucket, destKey string, objectSize int64,
) error {
log.Ctx(ctx).Trace().Msgf("[AWS] CreateMultipartUpload: %s/%s -> %s/%s", srcBucket, srcKey, destBucket, destKey)
// Create multipart upload
createResp, err := d.S3.CreateMultipartUploadWithContext(
ctx, &s3.CreateMultipartUploadInput{
Bucket: aws.String(destBucket),
Key: aws.String(destKey),
ContentType: d.getContentType(),
ACL: d.getACL(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
ServerSideEncryption: d.getEncryptionMode(),
StorageClass: d.getStorageClass(),
},
)
if err != nil {
return err
}
numParts := (objectSize + d.MultipartCopyChunkSize - 1) / d.MultipartCopyChunkSize
completedParts := make([]*s3.CompletedPart, numParts)
errChan := make(chan error, numParts)
limiter := make(chan struct{}, d.MultipartCopyMaxConcurrency)
for i := range completedParts {
i := int64(i)
go func() {
limiter <- struct{}{}
firstByte := i * d.MultipartCopyChunkSize
lastByte := firstByte + d.MultipartCopyChunkSize - 1
if lastByte >= objectSize {
lastByte = objectSize - 1
}
log.Ctx(ctx).Trace().Msgf("[AWS] [%d] UploadPartCopy: %s/%s -> %s/%s", i, srcBucket, srcKey, destBucket,
destKey)
uploadResp, err := d.S3.UploadPartCopyWithContext(
ctx, &s3.UploadPartCopyInput{
Bucket: aws.String(destBucket),
CopySource: aws.String(srcBucket + "/" + strings.TrimPrefix(srcKey, "/")),
Key: aws.String(destKey),
PartNumber: aws.Int64(i + 1),
UploadId: createResp.UploadId,
CopySourceRange: aws.String(fmt.Sprintf("bytes=%d-%d", firstByte, lastByte)),
},
)
if err == nil {
completedParts[i] = &s3.CompletedPart{
ETag: uploadResp.CopyPartResult.ETag,
PartNumber: aws.Int64(i + 1),
}
}
errChan <- err
<-limiter
}()
}
for range completedParts {
err := <-errChan
if err != nil {
// Abort the multipart upload on error
_, abortErr := d.S3.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
Bucket: aws.String(destBucket),
Key: aws.String(destKey),
UploadId: createResp.UploadId,
})
if abortErr != nil {
log.Ctx(ctx).Error().Err(abortErr).Msg("Failed to abort multipart upload")
}
return err
}
}
log.Ctx(ctx).Trace().Msgf("[AWS] CompleteMultipartUpload: %s/%s", destBucket, destKey)
_, err = d.S3.CompleteMultipartUploadWithContext(
ctx, &s3.CompleteMultipartUploadInput{
Bucket: aws.String(destBucket),
Key: aws.String(destKey),
UploadId: createResp.UploadId,
MultipartUpload: &s3.CompletedMultipartUpload{Parts: completedParts},
},
)
return err
}
func (d *driver) BatchCopyObjects(ctx context.Context, destBucket string, keys []string, concurrency int) error {
total := len(keys)
sem := make(chan struct{}, concurrency)
errCh := make(chan error, total)
var wg sync.WaitGroup
var mu sync.Mutex
completed := 0
for _, key := range keys {
wg.Add(1)
sem <- struct{}{}
go func(key string) {
defer wg.Done()
defer func() { <-sem }()
var err error
for attempt := 1; attempt <= 3; attempt++ {
err = d.CopyObject(ctx, key, destBucket, key)
if err == nil {
break
}
time.Sleep(time.Duration(100*attempt) * time.Millisecond) // basic exponential backoff
}
if err != nil {
errCh <- fmt.Errorf("failed to copy key %s after %d retries: %w", key, 3, err)
return
}
// Update progress
mu.Lock()
completed++
log.Ctx(ctx).Info().Msgf("Progress: %d/%d copied", completed, total)
mu.Unlock()
}(key)
}
wg.Wait()
close(errCh)
if len(errCh) > 0 {
return <-errCh
}
return nil
}
type baseEmbed struct {
base.Base
}
// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3
// Objects are stored at absolute keys in the provided bucket.
type Driver struct {
baseEmbed
}
// FromParameters constructs a new Driver with a given parameters map
// Required parameters:
// - accesskey
// - secretkey
// - region
// - bucket
// - encrypt.
//
//nolint:gocognit
func FromParameters(ctx context.Context, parameters map[string]any) (*Driver, error) {
// Providing no values for these is valid in case the user is authenticating
// with an IAM on an ec2 instance (in which case the instance credentials will
// be summoned when GetAuth is called).
accessKey := parameters["accesskey"]
if accessKey == nil {
accessKey = ""
}
secretKey := parameters["secretkey"]
if secretKey == nil {
secretKey = ""
}
regionEndpoint := parameters["regionendpoint"]
if regionEndpoint == nil {
regionEndpoint = ""
}
forcePathStyleBool := true
forcePathStyle := parameters["forcepathstyle"]
switch forcePathStyle := forcePathStyle.(type) {
case string:
b, err := strconv.ParseBool(forcePathStyle)
if err != nil {
return nil, fmt.Errorf("the forcePathStyle parameter should be a boolean")
}
forcePathStyleBool = b
case bool:
forcePathStyleBool = forcePathStyle
case nil:
// do nothing
default:
return nil, fmt.Errorf("the forcePathStyle parameter should be a boolean")
}
regionName := parameters["region"]
if regionName == nil || fmt.Sprint(regionName) == "" {
return nil, fmt.Errorf("no region parameter provided")
}
region := fmt.Sprint(regionName)
// Don't check the region value if a custom endpoint is provided.
if regionEndpoint == "" {
if _, ok := validRegions[region]; !ok {
return nil, fmt.Errorf("invalid region provided: %v", region)
}
}
bucket := parameters["bucket"]
if bucket == nil || fmt.Sprint(bucket) == "" {
return nil, fmt.Errorf("no bucket parameter provided")
}
encryptBool := false
encrypt := parameters["encrypt"]
switch encrypt := encrypt.(type) {
case string:
b, err := strconv.ParseBool(encrypt)
if err != nil {
return nil, fmt.Errorf("the encrypt parameter should be a boolean")
}
encryptBool = b
case bool:
encryptBool = encrypt
case nil:
// do nothing
default:
return nil, fmt.Errorf("the encrypt parameter should be a boolean")
}
secureBool := true
secure := parameters["secure"]
switch secure := secure.(type) {
case string:
b, err := strconv.ParseBool(secure)
if err != nil {
return nil, fmt.Errorf("the secure parameter should be a boolean")
}
secureBool = b
case bool:
secureBool = secure
case nil:
// do nothing
default:
return nil, fmt.Errorf("the secure parameter should be a boolean")
}
skipVerifyBool := false
skipVerify := parameters["skipverify"]
switch skipVerify := skipVerify.(type) {
case string:
b, err := strconv.ParseBool(skipVerify)
if err != nil {
return nil, fmt.Errorf("the skipVerify parameter should be a boolean")
}
skipVerifyBool = b
case bool:
skipVerifyBool = skipVerify
case nil:
// do nothing
default:
return nil, fmt.Errorf("the skipVerify parameter should be a boolean")
}
v4Bool := true
v4auth := parameters["v4auth"]
switch v4auth := v4auth.(type) {
case string:
b, err := strconv.ParseBool(v4auth)
if err != nil {
return nil, fmt.Errorf("the v4auth parameter should be a boolean")
}
v4Bool = b
case bool:
v4Bool = v4auth
case nil:
// do nothing
default:
return nil, fmt.Errorf("the v4auth parameter should be a boolean")
}
keyID := parameters["keyid"]
if keyID == nil {
keyID = ""
}
chunkSize, err := getParameterAsInt64(
parameters, "chunksize",
defaultChunkSize, minChunkSize, maxChunkSize,
)
if err != nil {
return nil, err
}
multipartCopyChunkSize, err := getParameterAsInt64(
parameters,
"multipartcopychunksize",
defaultMultipartCopyChunkSize,
minChunkSize,
maxChunkSize,
)
if err != nil {
return nil, err
}
multipartCopyMaxConcurrency, err := getParameterAsInt64(
parameters,
"multipartcopymaxconcurrency",
defaultMultipartCopyMaxConcurrency,
1,
math.MaxInt64,
)
if err != nil {
return nil, err
}
multipartCopyThresholdSize, err := getParameterAsInt64(
parameters,
"multipartcopythresholdsize",
defaultMultipartCopyThresholdSize,
0,
maxChunkSize,
)
if err != nil {
return nil, err
}
rootDirectory := parameters["rootdirectory"]
if rootDirectory == nil {
rootDirectory = ""
}
storageClass := s3.StorageClassStandard
storageClassParam := parameters["storageclass"]
if storageClassParam != nil {
storageClassString, ok := storageClassParam.(string)
if !ok {
return nil, fmt.Errorf(
"the storageclass parameter must be one of %v, %v invalid",
s3StorageClasses,
storageClassParam,
)
}
// All valid storage class parameters are UPPERCASE, so be a bit more flexible here
storageClassString = strings.ToUpper(storageClassString)
if storageClassString != noStorageClass &&
storageClassString != s3.StorageClassStandard &&
storageClassString != s3.StorageClassReducedRedundancy &&
storageClassString != s3.StorageClassStandardIa &&
storageClassString != s3.StorageClassOnezoneIa &&
storageClassString != s3.StorageClassIntelligentTiering &&
storageClassString != s3.StorageClassOutposts &&
storageClassString != s3.StorageClassGlacierIr {
return nil, fmt.Errorf(
"the storageclass parameter must be one of %v, %v invalid",
s3StorageClasses,
storageClassParam,
)
}
storageClass = storageClassString
}
userAgent := parameters["useragent"]
if userAgent == nil {
userAgent = ""
}
objectACL := s3.ObjectCannedACLPrivate
objectACLParam := parameters["objectacl"]
if objectACLParam != nil {
objectACLString, ok := objectACLParam.(string)
if !ok {
return nil, fmt.Errorf(
"invalid value for objectacl parameter: %v",
objectACLParam,
)
}
if _, ok = validObjectACLs[objectACLString]; !ok {
return nil, fmt.Errorf(
"invalid value for objectacl parameter: %v",
objectACLParam,
)
}
objectACL = objectACLString
}
useDualStackBool := false
useDualStack := parameters["usedualstack"]
switch useDualStack := useDualStack.(type) {
case string:
b, err := strconv.ParseBool(useDualStack)
if err != nil {
return nil, fmt.Errorf("the useDualStack parameter should be a boolean")
}
useDualStackBool = b
case bool:
useDualStackBool = useDualStack
case nil:
// do nothing
default:
return nil, fmt.Errorf("the useDualStack parameter should be a boolean")
}
sessionToken := ""
accelerateBool := false
accelerate := parameters["accelerate"]
switch accelerate := accelerate.(type) {
case string:
b, err := strconv.ParseBool(accelerate)
if err != nil {
return nil, fmt.Errorf("the accelerate parameter should be a boolean")
}
accelerateBool = b
case bool:
accelerateBool = accelerate
case nil:
// do nothing
default:
return nil, fmt.Errorf("the accelerate parameter should be a boolean")
}
params := DriverParameters{
fmt.Sprint(accessKey),
fmt.Sprint(secretKey),
fmt.Sprint(bucket),
region,
fmt.Sprint(regionEndpoint),
forcePathStyleBool,
encryptBool,
fmt.Sprint(keyID),
secureBool,
skipVerifyBool,
v4Bool,
chunkSize,
multipartCopyChunkSize,
multipartCopyMaxConcurrency,
multipartCopyThresholdSize,
fmt.Sprint(rootDirectory),
storageClass,
fmt.Sprint(userAgent),
objectACL,
fmt.Sprint(sessionToken),
useDualStackBool,
accelerateBool,
getS3LogLevelFromParam(parameters["loglevel"]), //nolint:contextcheck
}
return New(ctx, params)
}
func getS3LogLevelFromParam(param any) aws.LogLevelType {
if param == nil {
return aws.LogOff
}
logLevelParam, ok := param.(string)
if !ok {
log.Ctx(context.Background()).Warn().Msg("Error: param is not of type string")
}
var logLevel aws.LogLevelType
switch strings.ToLower(logLevelParam) {
case "off":
logLevel = aws.LogOff
case "debug":
logLevel = aws.LogDebug
case "debugwithsigning":
logLevel = aws.LogDebugWithSigning
case "debugwithhttpbody":
logLevel = aws.LogDebugWithHTTPBody
case "debugwithrequestretries":
logLevel = aws.LogDebugWithRequestRetries
case "debugwithrequesterrors":
logLevel = aws.LogDebugWithRequestErrors
case "debugwitheventstreambody":
logLevel = aws.LogDebugWithEventStreamBody
default:
logLevel = aws.LogOff
}
return logLevel
}
// getParameterAsInt64 converts parameters[name] to an int64 value (using
// defaultt if nil), verifies it is no smaller than min, and returns it.
func getParameterAsInt64(
parameters map[string]any,
name string,
defaultt int64,
minSize int64,
maxSize int64,
) (int64, error) {
rv := defaultt
param := parameters[name]
switch v := param.(type) {
case string:
vv, err := strconv.ParseInt(v, 0, 64)
if err != nil {
return 0, fmt.Errorf("%s parameter must be an integer, %v invalid", name, param)
}
rv = vv
case int64:
rv = v
case int, uint, int32, uint32, uint64:
rv = reflect.ValueOf(v).Convert(reflect.TypeOf(rv)).Int()
case nil:
// do nothing
default:
return 0, fmt.Errorf("invalid value for %s: %#v", name, param)
}
if rv < minSize || rv > maxSize {
return 0, fmt.Errorf(
"the %s %#v parameter should be a number between %d and %d (inclusive)",
name,
rv,
minSize,
maxSize,
)
}
return rv, nil
}
// New constructs a new Driver with the given AWS credentials, region, encryption flag, and
// bucketName.
func New(_ context.Context, params DriverParameters) (*Driver, error) {
if !params.V4Auth &&
(params.RegionEndpoint == "" ||
strings.Contains(params.RegionEndpoint, "s3.amazonaws.com")) {
return nil, fmt.Errorf("on Amazon S3 this storage driver can only be used with v4 authentication")
}
awsConfig := aws.NewConfig().WithLogLevel(params.LogLevel)
if params.AccessKey != "" && params.SecretKey != "" {
creds := credentials.NewStaticCredentials(
params.AccessKey,
params.SecretKey,
params.SessionToken,
)
awsConfig.WithCredentials(creds)
}
if params.RegionEndpoint != "" {
awsConfig.WithEndpoint(params.RegionEndpoint)
awsConfig.WithS3ForcePathStyle(params.ForcePathStyle)
}
awsConfig.WithS3UseAccelerate(params.Accelerate)
awsConfig.WithRegion(params.Region)
awsConfig.WithDisableSSL(!params.Secure)
if params.UseDualStack {
awsConfig.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled
}
if params.SkipVerify {
httpTransport, ok := http.DefaultTransport.(*http.Transport)
if !ok {
return nil, fmt.Errorf("failed to get default transport")
}
httpTransport = httpTransport.Clone()
httpTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true, MinVersion: tls.VersionTLS12}
awsConfig.WithHTTPClient(
&http.Client{
Transport: httpTransport,
},
)
}
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("failed to create new session with aws config: %w", err)
}
if params.UserAgent != "" {
sess.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler(params.UserAgent))
}
s3obj := s3.New(sess)
// enable S3 compatible signature v2 signing instead
if !params.V4Auth {
setv2Handlers(s3obj)
}
d := &driver{
S3: s3obj,
Bucket: params.Bucket,
ChunkSize: params.ChunkSize,
Encrypt: params.Encrypt,
KeyID: params.KeyID,
MultipartCopyChunkSize: params.MultipartCopyChunkSize,
MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency,
MultipartCopyThresholdSize: params.MultipartCopyThresholdSize,
RootDirectory: params.RootDirectory,
StorageClass: params.StorageClass,
ObjectACL: params.ObjectACL,
pool: &sync.Pool{
New: func() any {
return &buffer{
data: make([]byte, 0, params.ChunkSize),
}
},
},
}
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: d,
},
},
}, nil
}
// Implement the storagedriver.StorageDriver interface
func (d *driver) Name() string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
reader, err := d.Reader(ctx, path, 0)
if err != nil {
return nil, err
}
return io.ReadAll(reader)
}
// PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
log.Ctx(ctx).Trace().Msgf("[AWS] PutContent: %s", path)
_, err := d.S3.PutObjectWithContext(
ctx, &s3.PutObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(path)),
ContentType: d.getContentType(),
ACL: d.getACL(),
ServerSideEncryption: d.getEncryptionMode(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
StorageClass: d.getStorageClass(),
Body: bytes.NewReader(contents),
},
)
return parseError(path, err)
}
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset.
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
log.Ctx(ctx).Trace().Msgf("[AWS] GetObject: %s", path)
resp, err := d.S3.GetObjectWithContext(
ctx, &s3.GetObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(path)),
Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"),
},
)
if err != nil {
var s3Err awserr.Error
if ok := errors.As(err, &s3Err); ok && s3Err.Code() == "InvalidRange" {
return io.NopCloser(bytes.NewReader(nil)), nil
}
return nil, parseError(path, err)
}
return resp.Body, nil
}
// Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit.
// It only allows appending to paths with zero size committed content,
// in which the existing content is overridden with the new content.
// It returns storagedriver.Error when appending to paths
// with non-zero committed content.
func (d *driver) Writer(ctx context.Context, path string, appendMode bool) (storagedriver.FileWriter, error) {
key := d.s3Path(path)
if !appendMode {
log.Ctx(ctx).Trace().Msgf("[AWS] CreateMultipartUpload: %s", path)
resp, err := d.S3.CreateMultipartUploadWithContext(
ctx, &s3.CreateMultipartUploadInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(key),
ContentType: d.getContentType(),
ACL: d.getACL(),
ServerSideEncryption: d.getEncryptionMode(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
StorageClass: d.getStorageClass(),
},
)
if err != nil {
return nil, err
}
return d.newWriter(ctx, key, *resp.UploadId, nil), nil
}
listMultipartUploadsInput := &s3.ListMultipartUploadsInput{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(key),
}
for {
log.Ctx(ctx).Trace().Msgf("[AWS] ListMultipartUploads: %s", path)
resp, err := d.S3.ListMultipartUploadsWithContext(ctx, listMultipartUploadsInput)
if err != nil {
return nil, parseError(path, err)
}
// resp.Uploads can only be empty on the first call
// if there were no more results to return after the first call, resp.IsTruncated would have been false
// and the loop would be exited without recalling ListMultipartUploads
if len(resp.Uploads) == 0 {
fi, err := d.Stat(ctx, path)
if err != nil {
return nil, parseError(path, err)
}
if fi.Size() == 0 {
log.Ctx(ctx).Trace().Msgf("[AWS] CreateMultipartUpload: %s", path)
resp, err := d.S3.CreateMultipartUploadWithContext(
ctx, &s3.CreateMultipartUploadInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(key),
ContentType: d.getContentType(),
ACL: d.getACL(),
ServerSideEncryption: d.getEncryptionMode(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
StorageClass: d.getStorageClass(),
},
)
if err != nil {
return nil, err
}
return d.newWriter(ctx, key, *resp.UploadId, nil), nil
}
return nil, storagedriver.Error{
DriverName: driverName,
Detail: fmt.Errorf("append to zero-size path %s unsupported", path),
}
}
var allParts []*s3.Part
for _, multi := range resp.Uploads {
if key != *multi.Key {
continue
}
log.Ctx(ctx).Trace().Msgf("[AWS] ListParts: %s", path)
partsList, err := d.S3.ListPartsWithContext(
ctx, &s3.ListPartsInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(key),
UploadId: multi.UploadId,
},
)
if err != nil {
return nil, parseError(path, err)
}
allParts = append(allParts, partsList.Parts...)
for *partsList.IsTruncated {
log.Ctx(ctx).Trace().Msgf("[AWS] ListParts: %s", path)
partsList, err = d.S3.ListPartsWithContext(
ctx, &s3.ListPartsInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(key),
UploadId: multi.UploadId,
PartNumberMarker: partsList.NextPartNumberMarker,
},
)
if err != nil {
return nil, parseError(path, err)
}
allParts = append(allParts, partsList.Parts...)
}
return d.newWriter(ctx, key, *multi.UploadId, allParts), nil
}
// resp.NextUploadIdMarker must have at least one element or we would have returned not found
listMultipartUploadsInput.UploadIdMarker = resp.NextUploadIdMarker
// from the s3 api docs, IsTruncated "specifies whether (true) or not (false) all of the results were returned"
// if everything has been returned, break
if resp.IsTruncated == nil || !*resp.IsTruncated {
break
}
}
return nil, storagedriver.PathNotFoundError{Path: path}
}
// Stat retrieves the FileInfo for the given path, including the current size
// in bytes and the creation time.
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
log.Ctx(ctx).Trace().Msgf("[AWS] ListObjectsV2: %s", path)
resp, err := d.S3.ListObjectsV2WithContext(
ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(d.s3Path(path)),
MaxKeys: aws.Int64(1),
},
)
if err != nil {
return nil, err
}
fi := storagedriver.FileInfoFields{
Path: path,
}
switch {
case len(resp.Contents) == 1:
if *resp.Contents[0].Key != d.s3Path(path) {
fi.IsDir = true
} else {
fi.IsDir = false
fi.Size = *resp.Contents[0].Size
fi.ModTime = *resp.Contents[0].LastModified
}
case len(resp.CommonPrefixes) == 1:
fi.IsDir = true
default:
return nil, storagedriver.PathNotFoundError{Path: path}
}
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
}
// List returns a list of the objects that are direct descendants of the given path.
func (d *driver) List(ctx context.Context, opath string) ([]string, error) {
path := opath
if path != "/" && path[len(path)-1] != '/' {
path += "/"
}
// This is to cover for the cases when the rootDirectory of the driver is either "" or "/".
// In those cases, there is no root prefix to replace and we must actually add a "/" to all
// results in order to keep them as valid paths as recognized by storagedriver.PathRegexp
prefix := ""
if d.s3Path("") == "" {
prefix = "/"
}
log.Ctx(ctx).Trace().Msgf("[AWS] ListObjectsV2: %s", path)
resp, err := d.S3.ListObjectsV2WithContext(
ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(d.s3Path(path)),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(listMax),
},
)
if err != nil {
return nil, parseError(opath, err)
}
files := []string{}
directories := []string{}
for {
for _, key := range resp.Contents {
files = append(files, strings.Replace(*key.Key, d.s3Path(""), prefix, 1))
}
for _, commonPrefix := range resp.CommonPrefixes {
commonPrefix := *commonPrefix.Prefix
directories = append(
directories,
strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1),
)
}
if *resp.IsTruncated {
log.Ctx(ctx).Trace().Msgf("[AWS] ListObjectsV2: %s", path)
resp, err = d.S3.ListObjectsV2WithContext(
ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(d.s3Path(path)),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(listMax),
ContinuationToken: resp.NextContinuationToken,
},
)
if err != nil {
return nil, err
}
} else {
break
}
}
if opath != "/" {
if len(files) == 0 && len(directories) == 0 {
// Treat empty response as missing directory, since we don't actually
// have directories in s3.
return nil, storagedriver.PathNotFoundError{Path: opath}
}
}
return append(files, directories...), nil
}
// Move moves an object stored at sourcePath to destPath, removing the original
// object.
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | true |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/s3-aws/s3_v2_signer.go | registry/app/driver/s3-aws/s3_v2_signer.go | // Source: https://github.com/pivotal-golang/s3cli
// Copyright (c) 2013 Damien Le Berrigaud and Nick Wade
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package s3
import (
"context"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"net/http"
"net/url"
"sort"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/rs/zerolog/log"
)
type signer struct {
// Values that must be populated from the request.
Request *http.Request
Time time.Time
Credentials *credentials.Credentials
Query url.Values
stringToSign string
signature string
}
var s3ParamsToSign = map[string]bool{
"acl": true,
"location": true,
"logging": true,
"notification": true,
"partNumber": true,
"policy": true,
"requestPayment": true,
"torrent": true,
"uploadId": true,
"uploads": true,
"versionId": true,
"versioning": true,
"versions": true,
"response-content-type": true,
"response-content-language": true,
"response-expires": true,
"response-cache-control": true,
"response-content-disposition": true,
"response-content-encoding": true,
"website": true,
"delete": true,
}
// setv2Handlers will setup v2 signature signing on the S3 driver.
func setv2Handlers(svc *s3.S3) {
svc.Handlers.Build.PushBack(
func(r *request.Request) {
parsedURL, err := url.Parse(r.HTTPRequest.URL.String())
if err != nil {
log.Ctx(context.Background()).Fatal().Msgf("Failed to parse URL: %v", err)
}
r.HTTPRequest.URL.Opaque = parsedURL.Path
},
)
svc.Handlers.Sign.Clear()
svc.Handlers.Sign.PushBack(Sign)
svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
}
// Sign requests with signature version 2.
//
// Will sign the requests with the service config's Credentials object
// Signing is skipped if the credentials is the credentials.AnonymousCredentials
// object.
func Sign(req *request.Request) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
v2 := signer{
Request: req.HTTPRequest,
Time: req.Time,
Credentials: req.Config.Credentials,
}
// nolint:errcheck.
err := v2.Sign(req.Context())
if err != nil {
log.Ctx(req.Context()).Fatal().Msgf("Error in signing s3: %v", err)
}
}
func (v2 *signer) Sign(ctx context.Context) error {
credValue, err := v2.Credentials.Get()
if err != nil {
return err
}
accessKey := credValue.AccessKeyID
var (
md5, ctype, date, xamz string
xamzDate bool
sarray []string
smap map[string]string
sharray []string
)
headers := v2.Request.Header
params := v2.Request.URL.Query()
parsedURL, err := url.Parse(v2.Request.URL.String())
if err != nil {
return err
}
host, canonicalPath := parsedURL.Host, parsedURL.Path
v2.Request.Header["Host"] = []string{host}
v2.Request.Header["date"] = []string{v2.Time.In(time.UTC).Format(time.RFC1123)}
if credValue.SessionToken != "" {
v2.Request.Header["x-amz-security-token"] = []string{credValue.SessionToken}
}
smap = make(map[string]string)
for k, v := range headers {
k = strings.ToLower(k)
switch k {
case "content-md5":
md5 = v[0]
case "content-type":
ctype = v[0]
case "date":
if !xamzDate {
date = v[0]
}
default:
if strings.HasPrefix(k, "x-amz-") {
vall := strings.Join(v, ",")
smap[k] = k + ":" + vall
if k == "x-amz-date" {
xamzDate = true
date = ""
}
sharray = append(sharray, k)
}
}
}
if len(sharray) > 0 {
sort.StringSlice(sharray).Sort()
for _, h := range sharray {
sarray = append(sarray, smap[h])
}
xamz = strings.Join(sarray, "\n") + "\n"
}
expires := false
if v, ok := params["Expires"]; ok {
expires = true
date = v[0]
params["AWSAccessKeyId"] = []string{accessKey}
}
sarray = sarray[0:0]
for k, v := range params {
if s3ParamsToSign[k] {
for _, vi := range v {
if vi == "" {
sarray = append(sarray, k)
} else {
sarray = append(sarray, k+"="+vi)
}
}
}
}
if len(sarray) > 0 {
sort.StringSlice(sarray).Sort()
canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&")
}
v2.stringToSign = strings.Join(
[]string{
v2.Request.Method,
md5,
ctype,
date,
xamz + canonicalPath,
}, "\n",
)
hash := hmac.New(sha256.New, []byte(credValue.SecretAccessKey))
hash.Write([]byte(v2.stringToSign))
v2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil))
if expires {
params["Signature"] = []string{v2.signature}
} else {
headers["Authorization"] = []string{"AWS " + accessKey + ":" + v2.signature}
}
log.Ctx(ctx).Debug().
Interface("string-to-sign", v2.stringToSign).
Interface("signature", v2.signature).
Msg("request signature")
return nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/factory/factory.go | registry/app/driver/factory/factory.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package factory
import (
"context"
"fmt"
"github.com/harness/gitness/registry/app/driver"
)
// driverFactories stores an internal mapping between storage driver names and their respective
// factories.
var driverFactories = make(map[string]StorageDriverFactory)
// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces
// Storage drivers should call Register() with a factory to make the driver available by name.
// Individual StorageDriver implementations generally register with the factory via the Register
// func (below) in their init() funcs, and as such they should be imported anonymously before use.
// See below for an example of how to register and get a StorageDriver for S3
//
// import _ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws"
// s3Driver, err = factory.Create("s3", storageParams)
// // assuming no error, s3Driver is the StorageDriver that communicates with S3 according to storageParams
type StorageDriverFactory interface {
// Create returns a new storagedriver.StorageDriver with the given parameters
// Parameters will vary by driver and may be ignored
// Each parameter key must only consist of lowercase letters and numbers
Create(ctx context.Context, parameters map[string]any) (driver.StorageDriver, error)
}
// Register makes a storage driver available by the provided name.
// If Register is called twice with the same name or if driver factory is nil, it panics.
// Additionally, it is not concurrency safe. Most Storage Drivers call this function
// in their init() functions. See the documentation for StorageDriverFactory for more.
func Register(name string, factory StorageDriverFactory) {
if factory == nil {
panic("Must not provide nil StorageDriverFactory")
}
_, registered := driverFactories[name]
if registered {
panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name))
}
driverFactories[name] = factory
}
// Create a new storagedriver.StorageDriver with the given name and
// parameters. To use a driver, the StorageDriverFactory must first be
// registered with the given name. If no drivers are found, an
// InvalidStorageDriverError is returned.
func Create(ctx context.Context, name string, parameters map[string]any) (driver.StorageDriver, error) {
driverFactory, ok := driverFactories[name]
if !ok {
return nil, InvalidStorageDriverError{name}
}
return driverFactory.Create(ctx, parameters)
}
// InvalidStorageDriverError records an attempt to construct an unregistered storage driver.
type InvalidStorageDriverError struct {
Name string
}
func (err InvalidStorageDriverError) Error() string {
return fmt.Sprintf("StorageDriver not registered: %s", err.Name)
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/driver/testsuites/testsuites.go | registry/app/driver/testsuites/testsuites.go | // Source: https://github.com/distribution/distribution
// Copyright 2014 https://github.com/distribution/distribution Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testsuites
import (
"bytes"
"context"
crand "crypto/rand"
"crypto/sha256"
"crypto/tls"
"io"
"math/rand"
"net/http"
"os"
"path"
"sort"
"sync"
"testing"
"time"
storagedriver "github.com/harness/gitness/registry/app/driver"
"github.com/stretchr/testify/suite"
)
// randomBytes pre-allocates all of the memory sizes needed for the test. If
// anything panics while accessing randomBytes, just make this number bigger.
var randomBytes = make([]byte, 128<<20)
func init() {
_, _ = crand.Read(randomBytes) // always returns len(randomBytes) and nil error
}
// DriverConstructor is a function which returns a new
// storagedriver.StorageDriver.
type DriverConstructor func() (storagedriver.StorageDriver, error)
// DriverTeardown is a function which cleans up a suite's
// storagedriver.StorageDriver.
type DriverTeardown func() error
// DriverSuite is a [suite.Suite] test suite designed to test a
// storagedriver.StorageDriver.
type DriverSuite struct {
suite.Suite
Constructor DriverConstructor
Teardown DriverTeardown
storagedriver.StorageDriver
ctx context.Context
skipVerify bool
}
// Driver runs [DriverSuite] for the given [DriverConstructor].
func Driver(t *testing.T, driverConstructor DriverConstructor, skipVerify bool) {
suite.Run(t, &DriverSuite{
Constructor: driverConstructor,
ctx: context.Background(),
skipVerify: skipVerify,
})
}
// SetupSuite implements [suite.SetupAllSuite] interface.
func (suite *DriverSuite) SetupSuite() {
d, err := suite.Constructor()
suite.Require().NoError(err)
suite.StorageDriver = d
}
// TearDownSuite implements [suite.TearDownAllSuite].
func (suite *DriverSuite) TearDownSuite() {
if suite.Teardown != nil {
suite.Require().NoError(suite.Teardown())
}
}
// TearDownTest implements [suite.TearDownTestSuite].
// This causes the suite to abort if any files are left around in the storage
// driver.
func (suite *DriverSuite) TearDownTest() {
files, _ := suite.StorageDriver.List(suite.ctx, "/")
if len(files) > 0 {
suite.T().Fatalf("Storage driver did not clean up properly. Offending files: %#v", files)
}
}
// TestRootExists ensures that all storage drivers have a root path by default.
func (suite *DriverSuite) TestRootExists() {
_, err := suite.StorageDriver.List(suite.ctx, "/")
if err != nil {
suite.T().Fatalf(`the root path "/" should always exist: %v`, err)
}
}
// TestValidPaths checks that various valid file paths are accepted by the
// storage driver.
func (suite *DriverSuite) TestValidPaths() {
contents := randomContents(64)
validFiles := []string{
"/a",
"/2",
"/aa",
"/a.a",
"/0-9/abcdefg",
"/abcdefg/z.75",
"/abc/1.2.3.4.5-6_zyx/123.z/4",
"/docker/docker-registry",
"/123.abc",
"/abc./abc",
"/.abc",
"/a--b",
"/a-.b",
"/_.abc",
"/Docker/docker-registry",
"/Abc/Cba",
}
for _, filename := range validFiles {
err := suite.StorageDriver.PutContent(suite.ctx, filename, contents)
defer suite.deletePath(firstPart(filename))
suite.Require().NoError(err)
received, err := suite.StorageDriver.GetContent(suite.ctx, filename)
suite.Require().NoError(err)
suite.Require().Equal(contents, received)
}
}
func (suite *DriverSuite) deletePath(path string) {
for tries := 2; tries > 0; tries-- {
err := suite.StorageDriver.Delete(suite.ctx, path)
if _, ok := err.(storagedriver.PathNotFoundError); ok { //nolint:errorlint
err = nil
}
suite.Require().NoError(err)
paths, _ := suite.StorageDriver.List(suite.ctx, path)
if len(paths) == 0 {
break
}
time.Sleep(time.Second * 2)
}
}
// TestInvalidPaths checks that various invalid file paths are rejected by the
// storage driver.
func (suite *DriverSuite) TestInvalidPaths() {
contents := randomContents(64)
invalidFiles := []string{
"",
"/",
"abc",
"123.abc",
"//bcd",
"/abc_123/",
}
for _, filename := range invalidFiles {
err := suite.StorageDriver.PutContent(suite.ctx, filename, contents)
// only delete if file was successfully written
if err == nil {
defer suite.deletePath(firstPart(filename))
}
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.InvalidPathError{})
suite.Require().Contains(err.Error(), suite.Name())
_, err = suite.StorageDriver.GetContent(suite.ctx, filename)
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.InvalidPathError{})
suite.Require().Contains(err.Error(), suite.Name())
}
}
// TestWriteRead1 tests a simple write-read workflow.
func (suite *DriverSuite) TestWriteRead1() {
filename := randomPath(32)
contents := []byte("a")
suite.writeReadCompare(filename, contents)
}
// TestWriteRead2 tests a simple write-read workflow with unicode data.
func (suite *DriverSuite) TestWriteRead2() {
filename := randomPath(32)
contents := []byte("\xc3\x9f")
suite.writeReadCompare(filename, contents)
}
// TestWriteRead3 tests a simple write-read workflow with a small string.
func (suite *DriverSuite) TestWriteRead3() {
filename := randomPath(32)
contents := randomContents(32)
suite.writeReadCompare(filename, contents)
}
// TestWriteRead4 tests a simple write-read workflow with 1MB of data.
func (suite *DriverSuite) TestWriteRead4() {
filename := randomPath(32)
contents := randomContents(1024 * 1024)
suite.writeReadCompare(filename, contents)
}
// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage
// driver safely.
func (suite *DriverSuite) TestWriteReadNonUTF8() {
filename := randomPath(32)
contents := []byte{0x80, 0x80, 0x80, 0x80}
suite.writeReadCompare(filename, contents)
}
// TestTruncate tests that putting smaller contents than an original file does
// remove the excess contents.
func (suite *DriverSuite) TestTruncate() {
filename := randomPath(32)
contents := randomContents(1024 * 1024)
suite.writeReadCompare(filename, contents)
contents = randomContents(1024)
suite.writeReadCompare(filename, contents)
}
// TestReadNonexistent tests reading content from an empty path.
func (suite *DriverSuite) TestReadNonexistent() {
filename := randomPath(32)
_, err := suite.StorageDriver.GetContent(suite.ctx, filename)
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
}
// TestWriteReadStreams1 tests a simple write-read streaming workflow.
func (suite *DriverSuite) TestWriteReadStreams1() {
filename := randomPath(32)
contents := []byte("a")
suite.writeReadCompareStreams(filename, contents)
}
// TestWriteReadStreams2 tests a simple write-read streaming workflow with
// unicode data.
func (suite *DriverSuite) TestWriteReadStreams2() {
filename := randomPath(32)
contents := []byte("\xc3\x9f")
suite.writeReadCompareStreams(filename, contents)
}
// TestWriteReadStreams3 tests a simple write-read streaming workflow with a
// small amount of data.
func (suite *DriverSuite) TestWriteReadStreams3() {
filename := randomPath(32)
contents := randomContents(32)
suite.writeReadCompareStreams(filename, contents)
}
// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB
// of data.
func (suite *DriverSuite) TestWriteReadStreams4() {
filename := randomPath(32)
contents := randomContents(1024 * 1024)
suite.writeReadCompareStreams(filename, contents)
}
// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the
// storage driver safely.
func (suite *DriverSuite) TestWriteReadStreamsNonUTF8() {
filename := randomPath(32)
contents := []byte{0x80, 0x80, 0x80, 0x80}
suite.writeReadCompareStreams(filename, contents)
}
// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage
// driver safely.
func (suite *DriverSuite) TestWriteReadLargeStreams() {
if testing.Short() {
suite.T().Skip("Skipping test in short mode")
}
filename := randomPath(32)
defer suite.deletePath(firstPart(filename))
checksum := sha256.New()
var fileSize int64 = 5 * 1024 * 1024 * 1024
contents := newRandReader(fileSize)
writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false)
suite.Require().NoError(err)
written, err := io.Copy(writer, io.TeeReader(contents, checksum))
suite.Require().NoError(err)
suite.Require().Equal(fileSize, written)
err = writer.Commit(context.Background())
suite.Require().NoError(err)
err = writer.Close()
suite.Require().NoError(err)
reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0)
suite.Require().NoError(err)
defer reader.Close()
writtenChecksum := sha256.New()
if _, err := io.Copy(writtenChecksum, reader); err != nil {
suite.Require().NoError(err)
}
suite.Require().Equal(checksum.Sum(nil), writtenChecksum.Sum(nil))
}
// TestReaderWithOffset tests that the appropriate data is streamed when
// reading with a given offset.
func (suite *DriverSuite) TestReaderWithOffset() {
filename := randomPath(32)
defer suite.deletePath(firstPart(filename))
chunkSize := int64(32)
contentsChunk1 := randomContents(chunkSize)
contentsChunk2 := randomContents(chunkSize)
contentsChunk3 := randomContents(chunkSize)
err := suite.StorageDriver.PutContent(suite.ctx, filename,
append(append(contentsChunk1, contentsChunk2...), contentsChunk3...))
suite.Require().NoError(err)
reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0)
suite.Require().NoError(err)
defer reader.Close()
readContents, err := io.ReadAll(reader)
suite.Require().NoError(err)
suite.Require().Equal(append(append(contentsChunk1, contentsChunk2...), contentsChunk3...), readContents)
reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize)
suite.Require().NoError(err)
defer reader.Close()
readContents, err = io.ReadAll(reader)
suite.Require().NoError(err)
suite.Require().Equal(append(contentsChunk2, contentsChunk3...), readContents)
reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*2)
suite.Require().NoError(err)
defer reader.Close()
readContents, err = io.ReadAll(reader)
suite.Require().NoError(err)
suite.Require().Equal(contentsChunk3, readContents)
// Ensure we get invalid offset for negative offsets.
reader, err = suite.StorageDriver.Reader(suite.ctx, filename, -1)
suite.Require().IsType(err, storagedriver.InvalidOffsetError{})
suite.Require().Equal(int64(-1), err.(storagedriver.InvalidOffsetError).Offset) //nolint:errorlint,errcheck
suite.Require().Equal(filename, err.(storagedriver.InvalidOffsetError).Path) //nolint:errorlint,errcheck
suite.Require().Nil(reader)
suite.Require().Contains(err.Error(), suite.Name())
// Read past the end of the content and make sure we get a reader that
// returns 0 bytes and io.EOF
reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3)
suite.Require().NoError(err)
defer reader.Close()
buf := make([]byte, chunkSize)
n, err := reader.Read(buf)
suite.Require().ErrorIs(err, io.EOF)
suite.Require().Equal(0, n)
// Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF.
reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3-1)
suite.Require().NoError(err)
defer reader.Close()
n, err = reader.Read(buf)
suite.Require().Equal(1, n)
// We don't care whether the io.EOF comes on the this read or the first
// zero read, but the only error acceptable here is io.EOF.
if err != nil {
suite.Require().ErrorIs(err, io.EOF)
}
// Any more reads should result in zero bytes and io.EOF
n, err = reader.Read(buf)
suite.Require().Equal(0, n)
suite.Require().ErrorIs(err, io.EOF)
}
// TestContinueStreamAppendLarge tests that a stream write can be appended to without
// corrupting the data with a large chunk size.
func (suite *DriverSuite) TestContinueStreamAppendLarge() {
chunkSize := int64(10 * 1024 * 1024)
if suite.Name() == "azure" {
chunkSize = int64(4 * 1024 * 1024)
}
suite.testContinueStreamAppend(chunkSize)
}
// TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only
// with a tiny chunk size in order to test corner cases for some cloud storage drivers.
func (suite *DriverSuite) TestContinueStreamAppendSmall() {
suite.testContinueStreamAppend(int64(32))
}
func (suite *DriverSuite) testContinueStreamAppend(chunkSize int64) {
filename := randomPath(32)
defer suite.deletePath(firstPart(filename))
var fullContents bytes.Buffer
contents := io.TeeReader(newRandReader(chunkSize*3), &fullContents)
writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false)
suite.Require().NoError(err)
nn, err := io.CopyN(writer, contents, chunkSize)
suite.Require().NoError(err)
suite.Require().Equal(chunkSize, nn)
err = writer.Close()
suite.Require().NoError(err)
curSize := writer.Size()
suite.Require().Equal(chunkSize, curSize)
writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true)
suite.Require().NoError(err)
suite.Require().Equal(curSize, writer.Size())
nn, err = io.CopyN(writer, contents, chunkSize)
suite.Require().NoError(err)
suite.Require().Equal(chunkSize, nn)
err = writer.Close()
suite.Require().NoError(err)
curSize = writer.Size()
suite.Require().Equal(2*chunkSize, curSize)
writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true)
suite.Require().NoError(err)
suite.Require().Equal(curSize, writer.Size())
nn, err = io.CopyN(writer, contents, chunkSize)
suite.Require().NoError(err)
suite.Require().Equal(chunkSize, nn)
err = writer.Commit(context.Background())
suite.Require().NoError(err)
err = writer.Close()
suite.Require().NoError(err)
received, err := suite.StorageDriver.GetContent(suite.ctx, filename)
suite.Require().NoError(err)
suite.Require().Equal(fullContents.Bytes(), received)
}
// TestReadNonexistentStream tests that reading a stream for a nonexistent path
// fails.
func (suite *DriverSuite) TestReadNonexistentStream() {
filename := randomPath(32)
_, err := suite.StorageDriver.Reader(suite.ctx, filename, 0)
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
_, err = suite.StorageDriver.Reader(suite.ctx, filename, 64)
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
}
// TestWriteZeroByteStreamThenAppend tests if zero byte file handling works for append to a Stream.
func (suite *DriverSuite) TestWriteZeroByteStreamThenAppend() {
filename := randomPath(32)
defer suite.deletePath(firstPart(filename))
chunkSize := int64(32)
contentsChunk1 := randomContents(chunkSize)
// Open a Writer
writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false)
suite.Require().NoError(err)
// Close the Writer
err = writer.Commit(context.Background())
suite.Require().NoError(err)
err = writer.Close()
suite.Require().NoError(err)
curSize := writer.Size()
suite.Require().Equal(int64(0), curSize)
// Open a Reader
reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0)
suite.Require().NoError(err)
defer reader.Close()
// Check the file is empty
buf := make([]byte, chunkSize)
n, err := reader.Read(buf)
suite.Require().ErrorIs(err, io.EOF)
suite.Require().Equal(0, n)
// Open a Writer for Append
awriter, err := suite.StorageDriver.Writer(suite.ctx, filename, true)
suite.Require().NoError(err)
// Write small bytes to AppendWriter
nn, err := io.Copy(awriter, bytes.NewReader(contentsChunk1))
suite.Require().NoError(err)
suite.Require().Equal(int64(len(contentsChunk1)), nn)
// Close the AppendWriter
err = awriter.Commit(context.Background())
suite.Require().NoError(err)
err = awriter.Close()
suite.Require().NoError(err)
appendSize := awriter.Size()
suite.Require().Equal(int64(len(contentsChunk1)), appendSize)
// Open a Reader
reader, err = suite.StorageDriver.Reader(suite.ctx, filename, 0)
suite.Require().NoError(err)
defer reader.Close()
// Read small bytes from Reader
readContents, err := io.ReadAll(reader)
suite.Require().NoError(err)
suite.Require().Equal(contentsChunk1, readContents)
}
// TestWriteZeroByteContentThenAppend tests if zero byte file handling works for append to PutContent.
func (suite *DriverSuite) TestWriteZeroByteContentThenAppend() {
filename := randomPath(32)
defer suite.deletePath(firstPart(filename))
chunkSize := int64(32)
contentsChunk1 := randomContents(chunkSize)
err := suite.StorageDriver.PutContent(suite.ctx, filename, nil)
suite.Require().NoError(err)
// Open a Reader
reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0)
suite.Require().NoError(err)
defer reader.Close()
// Check the file is empty
buf := make([]byte, chunkSize)
n, err := reader.Read(buf)
suite.Require().ErrorIs(err, io.EOF)
suite.Require().Equal(0, n)
// Open a Writer for Append
awriter, err := suite.StorageDriver.Writer(suite.ctx, filename, true)
suite.Require().NoError(err)
// Write small bytes to AppendWriter
nn, err := io.Copy(awriter, bytes.NewReader(contentsChunk1))
suite.Require().NoError(err)
suite.Require().Equal(int64(len(contentsChunk1)), nn)
// Close the AppendWriter
err = awriter.Commit(context.Background())
suite.Require().NoError(err)
err = awriter.Close()
suite.Require().NoError(err)
appendSize := awriter.Size()
suite.Require().Equal(int64(len(contentsChunk1)), appendSize)
// Open a Reader
reader, err = suite.StorageDriver.Reader(suite.ctx, filename, 0)
suite.Require().NoError(err)
defer reader.Close()
// Read small bytes from Reader
readContents, err := io.ReadAll(reader)
suite.Require().NoError(err)
suite.Require().Equal(contentsChunk1, readContents)
}
// TestList checks the returned list of keys after populating a directory tree.
func (suite *DriverSuite) TestList() {
rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) //nolint:gosec
defer suite.deletePath(rootDirectory)
doesnotexist := path.Join(rootDirectory, "nonexistent")
_, err := suite.StorageDriver.List(suite.ctx, doesnotexist)
suite.Require().Equal(err, storagedriver.PathNotFoundError{
Path: doesnotexist,
DriverName: suite.StorageDriver.Name(),
})
parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) //nolint:gosec
childFiles := make([]string, 50)
for i := 0; i < len(childFiles); i++ {
childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) //nolint:gosec
childFiles[i] = childFile
err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32))
suite.Require().NoError(err)
}
sort.Strings(childFiles)
keys, err := suite.StorageDriver.List(suite.ctx, "/")
suite.Require().NoError(err)
suite.Require().Equal([]string{rootDirectory}, keys)
keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory)
suite.Require().NoError(err)
suite.Require().Equal([]string{parentDirectory}, keys)
keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory)
suite.Require().NoError(err)
sort.Strings(keys)
suite.Require().Equal(childFiles, keys)
// A few checks to add here (check out #819 for more discussion on this):
// 1. Ensure that all paths are absolute.
// 2. Ensure that listings only include direct children.
// 3. Ensure that we only respond to directory listings that end with a slash (maybe?).
}
// TestMove checks that a moved object no longer exists at the source path and
// does exist at the destination.
func (suite *DriverSuite) TestMove() {
contents := randomContents(32)
sourcePath := randomPath(32)
destPath := randomPath(32)
defer suite.deletePath(firstPart(sourcePath))
defer suite.deletePath(firstPart(destPath))
err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents)
suite.Require().NoError(err)
err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath)
suite.Require().NoError(err)
received, err := suite.StorageDriver.GetContent(suite.ctx, destPath)
suite.Require().NoError(err)
suite.Require().Equal(contents, received)
_, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath)
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
}
// TestMoveOverwrite checks that a moved object no longer exists at the source
// path and overwrites the contents at the destination.
func (suite *DriverSuite) TestMoveOverwrite() {
sourcePath := randomPath(32)
destPath := randomPath(32)
sourceContents := randomContents(32)
destContents := randomContents(64)
defer suite.deletePath(firstPart(sourcePath))
defer suite.deletePath(firstPart(destPath))
err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents)
suite.Require().NoError(err)
err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents)
suite.Require().NoError(err)
err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath)
suite.Require().NoError(err)
received, err := suite.StorageDriver.GetContent(suite.ctx, destPath)
suite.Require().NoError(err)
suite.Require().Equal(sourceContents, received)
_, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath)
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
}
// TestMoveNonexistent checks that moving a nonexistent key fails and does not
// delete the data at the destination path.
func (suite *DriverSuite) TestMoveNonexistent() {
contents := randomContents(32)
sourcePath := randomPath(32)
destPath := randomPath(32)
defer suite.deletePath(firstPart(destPath))
err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents)
suite.Require().NoError(err)
err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath)
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
received, err := suite.StorageDriver.GetContent(suite.ctx, destPath)
suite.Require().NoError(err)
suite.Require().Equal(contents, received)
}
// TestMoveInvalid provides various checks for invalid moves.
func (suite *DriverSuite) TestMoveInvalid() {
contents := randomContents(32)
// Create a regular file.
err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents)
suite.Require().NoError(err)
defer suite.deletePath("/notadir")
// Now try to move a non-existent file under it.
err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar")
suite.Require().Error(err) // non-nil error
}
// TestDelete checks that the delete operation removes data from the storage
// driver.
func (suite *DriverSuite) TestDelete() {
filename := randomPath(32)
contents := randomContents(32)
defer suite.deletePath(firstPart(filename))
err := suite.StorageDriver.PutContent(suite.ctx, filename, contents)
suite.Require().NoError(err)
err = suite.StorageDriver.Delete(suite.ctx, filename)
suite.Require().NoError(err)
_, err = suite.StorageDriver.GetContent(suite.ctx, filename)
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
}
// TestRedirectURL checks that the RedirectURL method functions properly,
// but only if it is implemented.
func (suite *DriverSuite) TestRedirectURL() {
filename := randomPath(32)
contents := randomContents(32)
defer suite.deletePath(firstPart(filename))
err := suite.StorageDriver.PutContent(suite.ctx, filename, contents)
suite.Require().NoError(err)
url, err := suite.StorageDriver.RedirectURL(suite.ctx, http.MethodGet, filename, filename)
if url == "" && err == nil {
return
}
suite.Require().NoError(err)
client := http.DefaultClient
if suite.skipVerify {
httpTransport := http.DefaultTransport.(*http.Transport).Clone() //nolint:errcheck
httpTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint:gosec
client = &http.Client{
Transport: httpTransport,
}
}
req, err := http.NewRequestWithContext(suite.ctx, http.MethodGet, url, nil)
suite.Require().NoError(err)
response, err := client.Do(req)
suite.Require().NoError(err)
defer response.Body.Close()
read, err := io.ReadAll(response.Body)
suite.Require().NoError(err)
suite.Require().Equal(contents, read)
url, err = suite.StorageDriver.RedirectURL(suite.ctx, http.MethodHead, filename, filename)
if url == "" && err == nil {
return
}
suite.Require().NoError(err)
req, _ = http.NewRequestWithContext(suite.ctx, http.MethodHead, url, nil)
response, err = client.Do(req)
suite.Require().NoError(err)
defer response.Body.Close()
suite.Require().Equal(200, response.StatusCode)
suite.Require().Equal(int64(32), response.ContentLength)
}
// TestDeleteNonexistent checks that removing a nonexistent key fails.
func (suite *DriverSuite) TestDeleteNonexistent() {
filename := randomPath(32)
err := suite.StorageDriver.Delete(suite.ctx, filename)
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
}
// TestDeleteFolder checks that deleting a folder removes all child elements.
func (suite *DriverSuite) TestDeleteFolder() {
dirname := randomPath(32)
filename1 := randomPath(32)
filename2 := randomPath(32)
filename3 := randomPath(32)
contents := randomContents(32)
defer suite.deletePath(firstPart(dirname))
err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents)
suite.Require().NoError(err)
err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents)
suite.Require().NoError(err)
err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents)
suite.Require().NoError(err)
err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1))
suite.Require().NoError(err)
_, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1))
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
_, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2))
suite.Require().NoError(err)
_, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3))
suite.Require().NoError(err)
err = suite.StorageDriver.Delete(suite.ctx, dirname)
suite.Require().NoError(err)
_, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1))
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
_, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2))
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
_, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3))
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
}
// TestDeleteOnlyDeletesSubpaths checks that deleting path A does not
// delete path B when A is a prefix of B but B is not a subpath of A (so that
// deleting "/a" does not delete "/ab"). This matters for services like S3 that
// do not implement directories.
func (suite *DriverSuite) TestDeleteOnlyDeletesSubpaths() {
dirname := randomPath(32)
filename := randomPath(32)
contents := randomContents(32)
defer suite.deletePath(firstPart(dirname))
err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename), contents)
suite.Require().NoError(err)
err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename+"suffix"), contents)
suite.Require().NoError(err)
err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, dirname, filename), contents)
suite.Require().NoError(err)
err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, dirname+"suffix", filename), contents)
suite.Require().NoError(err)
err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename))
suite.Require().NoError(err)
_, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename))
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
_, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename+"suffix"))
suite.Require().NoError(err)
err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, dirname))
suite.Require().NoError(err)
_, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, dirname, filename))
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
_, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, dirname+"suffix", filename))
suite.Require().NoError(err)
}
// TestStatCall runs verifies the implementation of the storagedriver's Stat call.
func (suite *DriverSuite) TestStatCall() {
content := randomContents(4096)
dirPath := randomPath(32)
fileName := randomFilename(32)
filePath := path.Join(dirPath, fileName)
defer suite.deletePath(firstPart(dirPath))
// Call on non-existent file/dir, check error.
fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath)
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
suite.Require().Nil(fi)
fi, err = suite.StorageDriver.Stat(suite.ctx, filePath)
suite.Require().Error(err)
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
suite.Require().Contains(err.Error(), suite.Name())
suite.Require().Nil(fi)
err = suite.StorageDriver.PutContent(suite.ctx, filePath, content)
suite.Require().NoError(err)
// Call on regular file, check results
fi, err = suite.StorageDriver.Stat(suite.ctx, filePath)
suite.Require().NoError(err)
suite.Require().NotNil(fi)
suite.Require().Equal(filePath, fi.Path())
suite.Require().Equal(int64(len(content)), fi.Size())
suite.Require().False(fi.IsDir())
createdTime := fi.ModTime()
// Sleep and modify the file
time.Sleep(time.Second * 10)
content = randomContents(4096)
err = suite.StorageDriver.PutContent(suite.ctx, filePath, content)
suite.Require().NoError(err)
fi, err = suite.StorageDriver.Stat(suite.ctx, filePath)
suite.Require().NoError(err)
suite.Require().NotNil(fi)
time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency)
// Check if the modification time is after the creation time.
// In case of cloud storage services, storage frontend nodes might have
// time drift between them, however that should be solved with sleeping
// before update.
modTime := fi.ModTime()
if !modTime.After(createdTime) {
suite.T().Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime)
}
// Call on directory (do not check ModTime as dirs don't need to support it)
fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath)
suite.Require().NoError(err)
suite.Require().NotNil(fi)
suite.Require().Equal(dirPath, fi.Path())
suite.Require().Equal(int64(0), fi.Size())
suite.Require().True(fi.IsDir())
// The storage healthcheck performs this exact call to Stat.
// PathNotFoundErrors are not considered health check failures.
_, err = suite.StorageDriver.Stat(suite.ctx, "/")
// Some drivers will return a not found here, while others will not
// return an error at all. If we get an error, ensure it's a not found.
if err != nil {
suite.Require().IsType(err, storagedriver.PathNotFoundError{})
}
}
// TestPutContentMultipleTimes checks that if storage driver can overwrite the content
// in the subsequent puts. Validates that PutContent does not have to work
// with an offset like Writer does and overwrites the file entirely
// rather than writing the data to the [0,len(data)) of the file.
func (suite *DriverSuite) TestPutContentMultipleTimes() {
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | true |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/app/factory/package.go | registry/app/factory/package.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package factory
import (
"github.com/harness/gitness/registry/app/api/interfaces"
)
type PackageFactory interface {
Register(helper interfaces.PackageHelper)
Get(packageType string) interfaces.PackageHelper
GetAllPackageTypes() []string
IsValidPackageType(packageType string) bool
}
type packageFactory struct {
factory map[string]interfaces.PackageHelper
}
func NewPackageFactory() PackageFactory {
var factory = make(map[string]interfaces.PackageHelper)
return &packageFactory{
factory: factory,
}
}
func (f *packageFactory) Register(helper interfaces.PackageHelper) {
packageType := helper.GetPackageType()
if _, ok := f.factory[packageType]; !ok {
f.factory[packageType] = helper
}
}
func (f *packageFactory) Get(packageType string) interfaces.PackageHelper {
if _, ok := f.factory[packageType]; !ok {
return nil
}
return f.factory[packageType]
}
func (f *packageFactory) GetAllPackageTypes() []string {
var packageTypes []string
for packageType := range f.factory {
packageTypes = append(packageTypes, packageType)
}
return packageTypes
}
func (f *packageFactory) IsValidPackageType(packageType string) bool {
if packageType == "" {
return true
}
_, ok := f.factory[packageType]
return ok
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
harness/harness | https://github.com/harness/harness/blob/a087eef054a8fc8317f4fda02d3c7ee599b71fec/registry/tests/utils/client.go | registry/tests/utils/client.go | // Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package conformanceutils
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"net/url"
"path"
)
// Client represents a Maven registry client.
type Client struct {
baseURL string
token string
client *http.Client
debug bool
}
// NewClient creates a new Maven registry client.
func NewClient(baseURL, token string, debug bool) *Client {
return &Client{
baseURL: baseURL,
token: token,
client: &http.Client{},
debug: debug,
}
}
// Request represents an HTTP request.
type Request struct {
method string
path string
headers map[string]string
body any
}
// Response represents an HTTP response.
type Response struct {
StatusCode int
Headers http.Header
Body []byte
}
// NewRequest creates a new request.
func (c *Client) NewRequest(method, urlPath string) *Request {
return &Request{
method: method,
path: urlPath,
headers: make(map[string]string),
}
}
// SetHeader sets a request header.
func (r *Request) SetHeader(key, value string) {
r.headers[key] = value
}
// SetBody sets the request body.
func (r *Request) SetBody(body any) {
r.body = body
}
type ReaderFile struct {
*bytes.Buffer
}
func (r *ReaderFile) Close() error {
return nil
}
func NewReaderFile(data []byte) *ReaderFile {
return &ReaderFile{bytes.NewBuffer(data)}
}
func (c *Client) SetBody(req *http.Request, body []byte) {
req.Body = io.NopCloser(NewReaderFile(body))
req.ContentLength = int64(len(body))
}
func (c *Client) SetHeader(req *http.Request, key, value string) {
req.Header.Set(key, value)
}
// Do executes the request.
func (c *Client) Do(req *Request) (*Response, error) {
u, err := url.Parse(c.baseURL)
if err != nil {
return nil, fmt.Errorf("invalid base URL: %w", err)
}
cleanPath := path.Clean(req.path)
if len(cleanPath) > 0 && cleanPath[0] == '/' {
cleanPath = cleanPath[1:]
}
// Special handling for Gitness Maven registry paths
// Gitness may require additional path components or structure
u.Path = path.Join(u.Path, cleanPath)
var bodyReader io.Reader
if req.body != nil {
var bodyBytes []byte
switch v := req.body.(type) {
case []byte:
bodyBytes = v
case string:
bodyBytes = []byte(v)
default:
bodyBytes, err = json.Marshal(req.body)
if err != nil {
return nil, fmt.Errorf("failed to marshal request body: %w", err)
}
}
bodyReader = bytes.NewReader(bodyBytes)
}
httpReq, err := http.NewRequestWithContext(context.Background(), req.method, u.String(), bodyReader)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
// Only log details if DEBUG is enabled
if c.debug {
log.Printf("Making %s request to: %s\n", httpReq.Method, httpReq.URL)
if httpReq.Body != nil {
log.Printf("Request Body: %v\n", httpReq.Body)
}
for k, v := range httpReq.Header {
log.Printf("Request Header: %s: %v\n", k, v)
}
}
// Set authorization header with PAT token for Gitness authentication.
if c.token != "" {
// Always use Bearer token authentication for Gitness
httpReq.Header.Set("Authorization", "Bearer "+c.token)
if c.debug {
log.Printf("Using Gitness Bearer token for authentication\n")
}
} else {
// Always log authentication warnings regardless of debug setting
log.Printf("WARNING: No authentication token provided\n")
}
// Set headers.
for k, v := range req.headers {
httpReq.Header.Set(k, v)
}
// Set default content-type if not set.
if req.body != nil && httpReq.Header.Get("Content-Type") == "" {
httpReq.Header.Set("Content-Type", "application/json")
}
resp, err := c.client.Do(httpReq)
if err != nil {
// Always log critical errors
log.Printf("ERROR: Failed to execute request: %v\n", err)
return nil, fmt.Errorf("failed to execute request: %w", err)
}
defer resp.Body.Close()
// Print response details only if DEBUG is enabled
if c.debug {
log.Printf("Response Status: %d %s\n", resp.StatusCode, resp.Status)
for k, v := range resp.Header {
log.Printf("Response Header: %s: %v\n", k, v)
}
}
body, err := io.ReadAll(resp.Body)
if err != nil {
// Always log critical errors
log.Printf("ERROR: Failed to read response body: %v\n", err)
return nil, fmt.Errorf("failed to read response body: %w", err)
}
if len(body) > 0 {
log.Printf("Response Body: %s\n", string(body))
}
if resp.StatusCode >= 400 {
log.Printf("Error response: %d %s\n", resp.StatusCode, resp.Status)
log.Printf("Error response body: %s\n", string(body))
}
return &Response{
StatusCode: resp.StatusCode,
Headers: resp.Header,
Body: body,
}, nil
}
| go | Apache-2.0 | a087eef054a8fc8317f4fda02d3c7ee599b71fec | 2026-01-07T08:36:08.091982Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.