text
stringlengths 11
4.05M
|
|---|
/*
Tencent is pleased to support the open source community by making Basic Service Configuration Platform available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "as IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions and
limitations under the License.
*/
package table
import (
"errors"
"bscp.io/pkg/criteria/validator"
)
// TemplateSpace 模版空间
type TemplateSpace struct {
ID uint32 `json:"id" gorm:"primaryKey"`
Spec *TemplateSpaceSpec `json:"spec" gorm:"embedded"`
Attachment *TemplateSpaceAttachment `json:"attachment" gorm:"embedded"`
Revision *Revision `json:"revision" gorm:"embedded"`
}
// TableName is the template space's database table name.
func (t *TemplateSpace) TableName() string {
return "template_spaces"
}
// AppID AuditRes interface
func (t *TemplateSpace) AppID() uint32 {
return 0
}
// ResID AuditRes interface
func (t *TemplateSpace) ResID() uint32 {
return t.ID
}
// ResType AuditRes interface
func (t *TemplateSpace) ResType() string {
return "template_space"
}
// ValidateCreate validate template space is valid or not when create it.
func (t *TemplateSpace) ValidateCreate() error {
if t.ID > 0 {
return errors.New("id should not be set")
}
if t.Spec == nil {
return errors.New("spec not set")
}
if err := t.Spec.ValidateCreate(); err != nil {
return err
}
if t.Attachment == nil {
return errors.New("attachment not set")
}
if err := t.Attachment.Validate(); err != nil {
return err
}
if t.Revision == nil {
return errors.New("revision not set")
}
if err := t.Revision.ValidateCreate(); err != nil {
return err
}
return nil
}
// ValidateUpdate validate template space is valid or not when update it.
func (t *TemplateSpace) ValidateUpdate() error {
if t.ID <= 0 {
return errors.New("id should be set")
}
if t.Spec != nil {
if err := t.Spec.ValidateUpdate(); err != nil {
return err
}
}
if t.Attachment == nil {
return errors.New("attachment should be set")
}
if err := t.Attachment.Validate(); err != nil {
return err
}
if t.Revision == nil {
return errors.New("revision not set")
}
if err := t.Revision.ValidateUpdate(); err != nil {
return err
}
return nil
}
// ValidateDelete validate the template space's info when delete it.
func (t *TemplateSpace) ValidateDelete() error {
if t.ID <= 0 {
return errors.New("template space id should be set")
}
if t.Attachment == nil {
return errors.New("attachment should be set")
}
if err := t.Attachment.Validate(); err != nil {
return err
}
return nil
}
// TemplateSpaceSpec defines all the specifics for template space set by user.
type TemplateSpaceSpec struct {
Name string `json:"name" gorm:"column:name"`
Memo string `json:"memo" gorm:"column:memo"`
}
// ValidateCreate validate template space spec when it is created.
func (t *TemplateSpaceSpec) ValidateCreate() error {
if err := validator.ValidateName(t.Name); err != nil {
return err
}
return nil
}
// ValidateUpdate validate template space spec when it is updated.
func (t *TemplateSpaceSpec) ValidateUpdate() error {
if err := validator.ValidateMemo(t.Memo, false); err != nil {
return err
}
return nil
}
// TemplateSpaceAttachment defines the template space attachments.
type TemplateSpaceAttachment struct {
BizID uint32 `json:"biz_id" gorm:"column:biz_id"`
}
// Validate whether template space attachment is valid or not.
func (t *TemplateSpaceAttachment) Validate() error {
if t.BizID <= 0 {
return errors.New("invalid attachment biz id")
}
return nil
}
|
package service
import (
"context"
"net/url"
"github.com/ONSdigital/dp-api-clients-go/v2/health"
"github.com/ONSdigital/dp-net/v2/handlers/reverseproxy"
"github.com/ONSdigital/florence/assets"
"github.com/ONSdigital/florence/config"
"github.com/ONSdigital/florence/directors"
"github.com/ONSdigital/florence/service/modifiers"
"github.com/ONSdigital/log.go/log"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
)
var (
getAsset = assets.Asset
getAssetETag = assets.GetAssetETag
upgrader = websocket.Upgrader{}
)
// Service contains all the configs, server and clients to run Florence
type Service struct {
version string
Config *config.Config
healthClient *health.Client
HealthCheck HealthChecker
Router *mux.Router
Server HTTPServer
ServiceList *ExternalServiceList
}
// Run the service
func Run(ctx context.Context, cfg *config.Config, serviceList *ExternalServiceList, buildTime, gitCommit, version string, svcErrors chan error) (svc *Service, err error) {
log.Event(ctx, "running service", log.INFO)
// Initialise Service struct
svc = &Service{
version: version,
Config: cfg,
ServiceList: serviceList,
}
// Get health client for api router
svc.healthClient = serviceList.GetHealthClient("api-router", cfg.APIRouterURL)
// Get healthcheck with checkers
svc.HealthCheck, err = serviceList.GetHealthCheck(cfg, buildTime, gitCommit, version)
if err != nil {
log.Event(ctx, "failed to create health check", log.FATAL, log.Error(err))
return nil, err
}
if err := svc.registerCheckers(ctx, cfg); err != nil {
return nil, errors.Wrap(err, "unable to register checkers")
}
// Create Router and HTTP Server
svc.Router, err = svc.createRouter(ctx, cfg)
if err != nil {
return nil, err
}
svc.Server = serviceList.GetHTTPServer(cfg.BindAddr, svc.Router)
// Start Healthcheck and HTTP Server
svc.HealthCheck.Start(ctx)
go func() {
if err := svc.Server.ListenAndServe(); err != nil {
svcErrors <- errors.Wrap(err, "failure in http listen and serve")
}
}()
return svc, nil
}
// createRouter creates a Router with the necessary reverse proxies for services that florence needs to call,
// and handlers legacy index files.
// CMD API calls (recipe, import and dataset APIs) are proxied through the API router.
func (svc *Service) createRouter(ctx context.Context, cfg *config.Config) (router *mux.Router, err error) {
apiRouterURL, err := url.Parse(cfg.APIRouterURL)
if err != nil {
log.Event(ctx, "error parsing API router URL", log.FATAL, log.Error(err))
return nil, err
}
frontendRouterURL, err := url.Parse(cfg.FrontendRouterURL)
if err != nil {
log.Event(ctx, "error parsing frontend router URL", log.FATAL, log.Error(err))
return nil, err
}
tableURL, err := url.Parse(cfg.TableRendererURL)
if err != nil {
log.Event(ctx, "error parsing table renderer URL", log.FATAL, log.Error(err))
return nil, err
}
datasetControllerURL, err := url.Parse(cfg.DatasetControllerURL)
if err != nil {
log.Event(ctx, "error parsing dataset controller URL", log.FATAL, log.Error(err))
return nil, err
}
frontendRouterProxy := reverseproxy.Create(frontendRouterURL, directors.Director(""), nil)
apiRouterProxy := reverseproxy.Create(apiRouterURL, directors.Director("/api"), modifiers.IdentityResponseModifier)
tableProxy := reverseproxy.Create(tableURL, directors.Director("/table"), nil)
datasetControllerProxy := reverseproxy.Create(datasetControllerURL, directors.Director("/dataset-controller"), nil)
cantabularMetadataExtractorAPIProxy := reverseproxy.Create(apiRouterURL, directors.FixedVersionDirector(cfg.APIRouterVersion, ""), nil)
// The following proxies and their associated routes are deprecated and should be removed once the client side code has been updated to match
zebedeeProxy := reverseproxy.Create(apiRouterURL, directors.Director("/zebedee"), nil)
importAPIProxy := reverseproxy.Create(apiRouterURL, directors.FixedVersionDirector(cfg.APIRouterVersion, "/import"), nil)
datasetAPIProxy := reverseproxy.Create(apiRouterURL, directors.FixedVersionDirector(cfg.APIRouterVersion, "/dataset"), nil)
recipeAPIProxy := reverseproxy.Create(apiRouterURL, directors.FixedVersionDirector(cfg.APIRouterVersion, ""), nil)
topicsProxy := reverseproxy.Create(apiRouterURL, directors.FixedVersionDirector(cfg.APIRouterVersion, ""), nil)
imageAPIProxy := reverseproxy.Create(apiRouterURL, directors.FixedVersionDirector(cfg.APIRouterVersion, "/image"), nil)
uploadServiceAPIProxy := reverseproxy.Create(apiRouterURL, directors.FixedVersionDirector(cfg.APIRouterVersion, ""), nil)
filesAPIProxy := reverseproxy.Create(apiRouterURL, directors.FixedVersionDirector(cfg.APIRouterVersion, ""), nil)
downloadServiceProxy := reverseproxy.Create(apiRouterURL, directors.FixedVersionDirector(cfg.APIRouterVersion, ""), nil)
identityAPIProxy := reverseproxy.Create(apiRouterURL, directors.FixedVersionDirector(cfg.APIRouterVersion, ""), modifiers.IdentityResponseModifier)
// End of deprecated proxies
router = mux.NewRouter()
router.HandleFunc("/health", svc.HealthCheck.Handler)
if cfg.SharedConfig.EnableNewUpload {
router.Handle("/upload-new", uploadServiceAPIProxy)
router.Handle("/files{uri:.*}", filesAPIProxy)
router.Handle("/downloads-new{uri:.*}", downloadServiceProxy)
}
router.Handle("/upload", uploadServiceAPIProxy)
router.Handle("/upload/{id}", uploadServiceAPIProxy)
if cfg.SharedConfig.EnableDatasetImport {
router.Handle("/recipes{uri:.*}", recipeAPIProxy)
router.Handle("/import{uri:.*}", importAPIProxy)
router.Handle("/dataset/{uri:.*}", datasetAPIProxy)
router.Handle("/instances/{uri:.*}", datasetAPIProxy)
router.Handle("/dataset-controller/{uri:.*}", datasetControllerProxy)
if cfg.SharedConfig.EnableCantabularJourney {
router.Handle("/cantabular-metadata/{uri:.*}", cantabularMetadataExtractorAPIProxy)
}
}
if cfg.SharedConfig.EnableNewSignIn {
router.Handle("/tokens", identityAPIProxy)
router.Handle("/tokens/{uri:.*}", identityAPIProxy)
router.Handle("/users", identityAPIProxy)
router.Handle("/users/{uri:.*}", identityAPIProxy)
router.Handle("/groups/{uri:.*}", identityAPIProxy)
router.Handle("/groups", identityAPIProxy)
router.Handle("/password-reset", identityAPIProxy)
router.Handle("/password-reset/{uri:.*}", identityAPIProxy)
}
router.Handle("/image/{uri:.*}", imageAPIProxy)
router.Handle("/zebedee{uri:/.*}", zebedeeProxy)
router.Handle("/table/{uri:.*}", tableProxy)
router.Handle("/topics", topicsProxy)
router.Handle("/topics/{uri:.*}", topicsProxy)
if !cfg.SharedConfig.EnableNewSignIn {
// Roots for !EnableNewSignIn Florence React app
router.HandleFunc("/cookies", DeleteHttpCookie()).Methods("DELETE")
}
// Florence endpoints
router.HandleFunc("/florence/dist/{uri:.*}", staticFiles)
router.HandleFunc("/florence/", redirectToFlorence)
router.HandleFunc("/florence/index.html", redirectToFlorence)
router.Path("/florence/publishing-queue").HandlerFunc(legacyIndexFile(cfg))
router.Path("/florence/reports").HandlerFunc(legacyIndexFile(cfg))
router.Path("/florence/workspace").HandlerFunc(legacyIndexFile(cfg))
router.HandleFunc("/florence/websocket", websocketHandler(svc.version))
router.Path("/florence{uri:.*}").HandlerFunc(refactoredIndexFile(cfg))
// API and Frontend Routers
router.Handle("/api/{uri:.*}", apiRouterProxy)
router.Handle("/{uri:.*}", frontendRouterProxy)
return router, nil
}
// Close gracefully shuts the service down in the required order, with timeout
func (svc *Service) Close(ctx context.Context) error {
timeout := svc.Config.GracefulShutdownTimeout
log.Event(ctx, "commencing graceful shutdown", log.Data{"graceful_shutdown_timeout": timeout}, log.INFO)
ctx, cancel := context.WithTimeout(ctx, timeout)
hasShutdownError := false
go func() {
defer cancel()
// stop healthcheck, as it depends on everything else
if svc.ServiceList.HealthCheck {
svc.HealthCheck.Stop()
}
// stop any incoming requests
if err := svc.Server.Shutdown(ctx); err != nil {
log.Event(ctx, "failed to shutdown http server", log.Error(err), log.ERROR)
hasShutdownError = true
}
}()
// wait for shutdown success (via cancel) or failure (timeout)
<-ctx.Done()
// timeout expired
if ctx.Err() == context.DeadlineExceeded {
log.Event(ctx, "shutdown timed out", log.ERROR, log.Error(ctx.Err()))
return ctx.Err()
}
// other error
if hasShutdownError {
err := errors.New("failed to shutdown gracefully")
log.Event(ctx, "failed to shutdown gracefully ", log.ERROR, log.Error(err))
return err
}
log.Event(ctx, "graceful shutdown was successful", log.INFO)
return nil
}
func (svc *Service) registerCheckers(ctx context.Context, cfg *config.Config) (err error) {
hasErrors := false
if err = svc.HealthCheck.AddCheck("API router", svc.healthClient.Checker); err != nil {
hasErrors = true
log.Event(ctx, "error adding check for api router health client", log.ERROR, log.Error(err))
}
if hasErrors {
return errors.New("Error(s) registering checkers for healthcheck")
}
return nil
}
|
package controllers
import (
"bytes"
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/flux-web/flux-web/conf"
"github.com/flux-web/flux-web/models"
"github.com/astaxie/beego"
"github.com/astaxie/beego/context"
"github.com/astaxie/beego/httplib"
"github.com/astaxie/beego/logs"
)
const (
statusUpToDate = "up to date"
actionUpdateRelease = "updateRelease"
)
type WorkloadController struct {
Config conf.Config
beego.Controller
}
type jobResponse struct {
Result interface{} `json:"-"`
Err string `json:"Err"`
StatusString string `json:"StatusString"`
}
var l = logs.GetLogger()
var flux = models.Flux{
FluxUrl: os.Getenv("FLUX_URL"),
SyncApi: "/api/flux/v6/sync?ref=",
JobApi: "/api/flux/v6/jobs?id=",
UpdateManifestsApi: "/api/flux/v9/update-manifests",
ListImagesApi: "/api/flux/v10/images?namespace=",
ListServices: "/api/flux/v11/services?namespace=",
}
func (this *WorkloadController) ListWorkloads() {
ns := this.Ctx.Input.Param(":ns")
res, err := httplib.Get(flux.FluxUrl + flux.ListImagesApi + ns).Debug(true).Bytes()
if err != nil {
l.Panic(err.Error())
}
var images []models.Image
images, err = models.NewImages(res)
services, err := models.NewServices(flux.FluxUrl + flux.ListServices + ns)
if err != nil {
l.Printf("Found error: " + err.Error())
this.Ctx.Output.SetStatus(500)
}
var workloads []models.Workload
workloads = models.NewWorkloads(images, services)
workloadsResponse, err := json.Marshal(workloads)
this.Ctx.Output.Body(workloadsResponse)
}
func (this *WorkloadController) ReleaseWorkloads() {
releaseRequest, err := models.NewReleseRequest(this.Ctx.Input.RequestBody)
if err != nil {
l.Printf("Found error: " + err.Error())
this.Ctx.Output.SetStatus(500)
return
}
services, err := models.NewServices(flux.FluxUrl + flux.ListServices + releaseRequest.Namespace)
if err != nil {
l.Printf("Found error: " + err.Error())
this.Ctx.Output.SetStatus(500)
}
currentStatusAutomated := services.GetStatusAutomateByWorkload(releaseRequest.Workload)
if currentStatusAutomated != releaseRequest.Automated {
l.Printf("Switch automated workload stature from %t to %t\n", currentStatusAutomated, releaseRequest.Automated)
err = this.automateWorkload(releaseRequest)
if err != nil {
l.Printf("Found error: " + err.Error())
this.Ctx.Output.SetStatus(500)
return
}
}
if !services.WantedImageAlreadyDeployed(releaseRequest.Workload, releaseRequest.Target) {
this.updateWorkload(releaseRequest)
} else {
r := models.ReleaseResult{
Status: statusUpToDate,
Workload: releaseRequest.Workload,
Container: releaseRequest.Container,
Tag: releaseRequest.Target,
Action: actionUpdateRelease,
}
broadcastReleaseResult(r)
l.Printf("Image %s is already deployed!\n", releaseRequest.Target)
}
this.Ctx.WriteString("Done")
}
func (this *WorkloadController) updateWorkload(releaseRequest models.ReleaseRequest) {
jsonRequest, err := releaseRequest.GetReleaseRequestJSON(this.Config.FluxUser)
if err != nil {
l.Printf("Found error: " + err.Error())
this.Ctx.Output.SetStatus(500)
return
}
jobID, err := triggerJob(jsonRequest)
if err != nil {
l.Printf("Found error: " + err.Error())
this.Ctx.Output.SetStatus(500)
return
}
this.Ctx.WriteString("Done")
go func(jobID string, releaseRequest models.ReleaseRequest) {
waitForSync(jobID, releaseRequest)
}(jobID, releaseRequest)
}
func (this *WorkloadController) automateWorkload(releaseRequest models.ReleaseRequest) error {
jsonRequest, err := releaseRequest.GetAutomatedRequestJSON(this.Config.FluxUser)
if err != nil {
return err
}
jobID, err := triggerJob(jsonRequest)
if err != nil {
return err
}
timeout := time.After(time.Duration(this.Config.PollTimeout) * time.Second)
ticker := time.Tick(time.Duration(this.Config.PollInterval) * time.Millisecond)
mainLoop:
for {
select {
case <-timeout:
l.Printf("jobID: " + jobID + " timed out")
return errors.New("timeout while automateWorkload")
case <-ticker:
l.Printf("waiting for jobID: " + jobID + " to finish...")
jobStatus, err := fetchJobstatus(flux.FluxUrl + flux.JobApi + jobID)
if err != nil {
return err
}
if jobStatus.StatusString == statusSucceeded {
l.Printf("automate for workload" + releaseRequest.Workload + " is done!")
break mainLoop
}
}
}
return nil
}
func waitForSync(jobID string, newreleaseRequest models.ReleaseRequest) {
l.Printf("getting syncID...")
var releaseResult models.ReleaseResult
releaseResult.Workload = newreleaseRequest.Workload
releaseResult.Container = newreleaseRequest.Container
releaseResult.Tag = newreleaseRequest.Target
releaseResult.Status = "release failed"
releaseResult.Action = actionUpdateRelease
syncID, err := getSyncID(jobID)
if err != nil {
l.Printf(err.Error())
if err.Error() == "no changes found" {
releaseResult.Status = err.Error()
broadcastReleaseResult(releaseResult)
}
return
}
l.Printf("found new syncID: " + syncID)
for {
l.Printf("waiting for syncID: " + syncID + " to finish...")
resp, err := httplib.Get(flux.FluxUrl + flux.SyncApi + syncID).String()
if err != nil {
l.Printf(err.Error())
break
}
if resp == "[]" {
releaseResult.Status = statusUpToDate
l.Printf("release for " + newreleaseRequest.Workload + " is done!")
break
}
time.Sleep(time.Millisecond * 300)
}
broadcastReleaseResult(releaseResult)
}
func getSyncID(jobID string) (string, error) {
l.Printf("getting syncID...")
for {
resp, err := httplib.Get(flux.FluxUrl + flux.JobApi + jobID).Bytes()
if err != nil {
l.Println(err.Error())
return "", errors.New(err.Error())
}
job, err := models.NewJob(resp)
if err != nil {
return "", errors.New(err.Error())
}
if job.Result.Revision != "" {
l.Println("got syncID: " + job.Result.Revision)
return job.Result.Revision, nil
} else if job.Err != "" {
l.Printf("job error: " + job.Err)
return "", errors.New(job.Err)
} else {
l.Printf("job status: " + job.StatusString)
}
time.Sleep(time.Second)
}
}
func triggerJob(requestBody []byte) (string, error) {
resp, err := http.Post(flux.FluxUrl+flux.UpdateManifestsApi, "application/json", bytes.NewBuffer(requestBody))
if err != nil {
l.Printf("Error_triggerJob_01: " + err.Error())
return "", errors.New(err.Error())
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
l.Panic(err.Error())
return "", errors.New(err.Error())
}
l.Println(string(bodyBytes))
jobID := strings.Replace(string(bodyBytes), "\"", "", -1)
l.Println("job " + jobID + " triggered")
return string(jobID), nil
} else {
return "", errors.New("Job request statuscode is: " + strconv.Itoa(resp.StatusCode))
}
}
func GetImages(params ...string) []models.Image {
namespace := os.Getenv("DEFAULT_NAMESPACE")
if len(params) > 0 {
namespace = params[0]
l.Printf(namespace)
}
res, err := httplib.Get(flux.FluxUrl + flux.ListImagesApi + namespace).Debug(true).Bytes()
if err != nil {
l.Panic(err.Error())
}
images, err := models.NewImages(res)
if err != nil {
l.Panic(err.Error())
}
if len(params) > 1 {
filter := params[1]
for i := 0; i < len(images); i++ {
if !strings.Contains(images[i].ID, filter) {
images = append(images[:i], images[i+1:]...)
i--
}
}
}
return images
}
func Auth(c *context.Context) {
if readOnly, err := strconv.ParseBool(os.Getenv("READ_ONLY")); err != nil {
c.Abort(401, "Not boolean value for READ_ONLY")
return
} else if readOnly {
c.Abort(401, "Not authorized")
return
}
}
func fetchJobstatus(url string) (jobResponse, error) {
jobres := jobResponse{}
client := http.Client{
Timeout: time.Second * 20,
}
resp, err := client.Get(url)
if err != nil {
return jobres, err
}
if resp == nil {
return jobres, errors.New("fetchJobStatus, http.Get resp is nil")
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return jobres, err
}
err = json.Unmarshal(data, &jobres)
if err != nil {
return jobres, err
}
return jobres, nil
}
func broadcastReleaseResult(r models.ReleaseResult) {
jsonString, err := json.Marshal(r)
if err != nil {
l.Println(err)
}
h.broadcast <- jsonString
}
|
package xmodule
import (
"log"
"time"
)
type module struct {
particle
}
// inject instance function struct
type particle struct {
pTime time.Time
}
// Insert particle
func (p *particle) Insert () (particle) {
p.pTime = time.Now()
return p
}
// remove particle
func (p *particle) Remove (pr particle) {
log.Print(pr.pTime.Nanosecond())
}
// new module struct
func newModule()(module) {
newModule := new module
return newModule
}
func (h *module) Input(data interface{}) {
}
func (h *module) Output() (data interface{}) {
return ""
}
|
package main
import "fmt"
func main() {
//fmt.Println(circularGameLosers(5, 2))
//fmt.Println(circularGameLosers(4, 4))
fmt.Println(circularGameLosers(5, 3))
}
func circularGameLosers(n int, k int) []int {
j := 1
i := 1
mm := make(map[int]bool)
for {
if mm[i] {
break
}
mm[i] = true
i += k * j
j++
if i > n {
i %= n
if i == 0 {
i = n
}
}
}
var ans []int
for i := 1; i <= n; i++ {
if !mm[i] {
ans = append(ans, i)
}
}
return ans
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"os"
"testing"
"time"
nats "github.com/nats-io/nats.go"
)
func TestNewJobOrder(t *testing.T) {
clientID := os.Getenv("CLIENT_ID")
natsServers := os.Getenv("NATS_SERVER_ADDR")
connector, err := Connect(clientID, natsServers)
ErrorIfNotNil(t, err, fmt.Sprintf("Problem connecting to NATS server, %v", err))
defer connector.Shutdown()
t.Run("Add job order and verify saved in job orders handler", func(t *testing.T) {
// Post job order to Job Orders Handler
jobOrder := JobOrder{}
uuid1 := NewUuid()
item := Item{uuid1, "computer table", 2, "", ""}
jobOrder.Items = append(jobOrder.Items, item)
uuid2 := NewUuid()
item = Item{uuid2, "chair", 2, "", ""}
jobOrder.Items = append(jobOrder.Items, item)
uuid3 := NewUuid()
jobOrder.JobOrderID = uuid3
response, err := postJobOrder(connector.NATS(), jobOrder)
ErrorIfNotNil(t, err, fmt.Sprintf("Error, job order handler could not post job order id %v", uuid3))
if response.Message != "Received job order "+uuid3 {
LogAndFail(t, fmt.Sprintf("Error, job order handler could not post job order %s.", uuid3))
// t.Logf("Error, job order handler could not post job order %s.", uuid3)
// t.Fail()
}
// get list of job orders from sales orders handler, regardless of status
jobOrders, err := listJobOrders(connector.NATS())
ErrorIfNotNil(t, err, fmt.Sprintf("%v", uuid3))
for _, jobOrder := range jobOrders {
if jobOrder.JobOrderID == uuid3 {
return
}
}
// Posted sales order not found in Job Order Handler list
t.Fail()
})
}
func listJobOrders(conn *nats.Conn) ([]JobOrder, error) {
resp, err := conn.Request("All.JobOrder.List", nil, 500*time.Millisecond)
if err != nil {
return nil, fmt.Errorf("Error on request 'All.JobOrder.List'")
}
if resp == nil {
return nil, fmt.Errorf("Problem, has response but no message.")
}
var jobOrders []JobOrder
err = json.Unmarshal(resp.Data, &jobOrders)
if err != nil {
return nil, fmt.Errorf("Error on unmarshal jobOrders, %v", err)
}
return jobOrders, nil
}
func postJobOrder(conn *nats.Conn, jobOrder JobOrder) (Response, error) {
body, err := json.Marshal(jobOrder)
if err != nil {
return Response{}, fmt.Errorf("Error on marshalling job order, %v", err)
}
resp, err := conn.Request("Process.New.JobOrder", body, 500*time.Millisecond)
if err != nil {
return Response{}, fmt.Errorf("Error on request 'Process.New.JobOrder'")
}
var response Response
err = json.Unmarshal(resp.Data, &response)
if err != nil {
return Response{}, fmt.Errorf("Error on marshaling response, %v", err)
}
log.Println("+++ Reponse, \n\t ", response)
return response, nil
}
|
package savior_test
import (
"io/ioutil"
"path/filepath"
"testing"
"github.com/itchio/wharf/wtest"
"github.com/itchio/savior"
"github.com/stretchr/testify/assert"
)
func Test_FolderSink(t *testing.T) {
assert := assert.New(t)
dir, err := ioutil.TempDir("", "foldersink-test")
wtest.Must(t, err)
fs := &savior.FolderSink{
Directory: dir,
}
entry := &savior.Entry{
Kind: savior.EntryKindFile,
Mode: 0644,
CanonicalPath: "secret",
WriteOffset: 0,
}
{
w, err := fs.GetWriter(entry)
wtest.Must(t, err)
_, err = w.Write([]byte("foobar"))
wtest.Must(t, err)
err = w.Close()
wtest.Must(t, err)
}
entry.WriteOffset = 1
{
w, err := fs.GetWriter(entry)
wtest.Must(t, err)
_, err = w.Write([]byte("ee"))
wtest.Must(t, err)
err = w.Close()
wtest.Must(t, err)
}
bs, err := ioutil.ReadFile(filepath.Join(dir, "secret"))
wtest.Must(t, err)
s := string(bs)
assert.EqualValues("fee", s)
}
|
package public
import (
"github.com/tal-tech/go-zero/core/logx"
"net/http"
"tpay_backend/merchantapi/internal/common"
_func "tpay_backend/merchantapi/internal/handler/func"
"github.com/tal-tech/go-zero/rest/httpx"
logic_ "tpay_backend/merchantapi/internal/logic/public"
"tpay_backend/merchantapi/internal/svc"
)
func UploadFileHandler(ctx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
//得到上传的文件
_, fileHeader, err := r.FormFile("file")
if err != nil {
logx.Errorf("获取文件信息出错,err=[%v]", err)
httpx.Error(w, common.NewCodeError(common.UploadFail))
return
}
userId, errs := _func.GetLoginedUserIdRequestHeader(r)
if errs != nil {
httpx.Error(w, errs)
return
}
l := logic_.NewUploadFileLogic(r.Context(), ctx)
resp, err := l.UploadFile(userId, fileHeader)
if err != nil {
httpx.Error(w, err)
} else {
common.OkJson(w, resp)
}
}
}
|
package progressbar
import (
"fmt"
"time"
)
type DownloadBar struct {
octetsToDownload int64
octetsDownloaded int64
progressTheme ProgressTheme
previousRenderAt time.Time
previousDownloadOctetCount int64
downloadSpeedOctetsPerSecond int64
}
func NewDownloadBar(octetsToDownload int64, progressTheme ProgressTheme) *DownloadBar {
return &DownloadBar{octetsToDownload: octetsToDownload, progressTheme: progressTheme, previousRenderAt: time.Now()}
}
func (b *DownloadBar) calculateSpeed(now time.Time) {
elapsed := now.Sub(b.previousRenderAt)
if elapsed < time.Second {
return
}
octetsDownloaded := b.octetsDownloaded - b.previousDownloadOctetCount
b.previousDownloadOctetCount = b.octetsDownloaded
octetsPerSecond := int64(float64(octetsDownloaded) / elapsed.Seconds())
b.previousRenderAt = now
b.downloadSpeedOctetsPerSecond = octetsPerSecond
}
func (b *DownloadBar) Render() string {
now := time.Now()
b.calculateSpeed(now)
if b.Progress() == 100 {
return b.progressTheme.RenderPrefix(b.Progress()) + " Done!"
}
downloaded := OctetCountToIEC(b.octetsDownloaded)
totalToDownload := OctetCountToIEC(b.octetsToDownload)
speed := OctetCountToIEC(b.downloadSpeedOctetsPerSecond)
return fmt.Sprintf("%v %v / %v (%v/s) (%3d%%)", b.progressTheme.RenderPrefix(b.Progress()), downloaded, totalToDownload, speed, b.Progress())
}
func (b *DownloadBar) Progress() Percentage {
var p Percentage
if b.octetsDownloaded > b.octetsToDownload {
return 100
}
if b.octetsToDownload != 0 {
p = int(b.octetsDownloaded * 100 / b.octetsToDownload)
}
return p
}
func (b *DownloadBar) SetBytesDownloaded(octetsDownloaded int64) {
b.octetsDownloaded = octetsDownloaded
}
|
package client
import (
"context"
"github.com/openzipkin/zipkin-go"
zipkinhttpclient "github.com/openzipkin/zipkin-go/middleware/http"
"github.com/openzipkin/zipkin-go/model"
zipkinhttp "github.com/openzipkin/zipkin-go/reporter/http"
"io/ioutil"
"log"
"net/http"
"testing"
"time"
)
func TestClient_Ping(t *testing.T) {
reporter := zipkinhttp.NewReporter("http://100.100.62.190:9411/api/v2/spans")
// create our tracer's local endpoint (how the service is identified in Zipkin).
localEndpoint, err := zipkin.NewEndpoint("pong", "localhost:0")
if err != nil {
t.Fatalf("could not create endpoint: %v", err)
}
remoteEndpoint, err := zipkin.NewEndpoint("courierv3", "100.100.62.190:8081")
// create our tracer instance.
tracer, err := zipkin.NewTracer(reporter, zipkin.WithLocalEndpoint(localEndpoint))
// create global zipkin traced http client
client, err := zipkinhttpclient.NewClient(tracer, zipkinhttpclient.ClientTrace(true))
if err != nil {
t.Fatalf("could not create client: %+v\n", err)
}
req, err := http.NewRequest("GET", "http://localhost:8081/ping", nil)
if err != nil {
t.Fatalf("could not create http request: %+v\n", err)
}
span := tracer.StartSpan("ping", zipkin.RemoteEndpoint(remoteEndpoint))
req = req.WithContext(zipkin.NewContext(req.Context(), span))
res, err := client.DoWithAppSpan(req, "pong")
if err != nil {
t.Fatalf("could not ping: %v", err)
}
defer res.Body.Close()
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("could not read: %v", err)
}
log.Printf("%+v", string(b))
time.Sleep(5*time.Second)
}
func TestTracer(t *testing.T) {
var (
serviceName = "courierv4"
serviceHostPort = "localhost:8000"
zipkinHTTPEndpoint = "http://100.100.62.190:9411/api/v2/spans"
)
// create an instance of the HTTP Reporter.
reporter := zipkinhttp.NewReporter(zipkinHTTPEndpoint)
// create our tracer's local endpoint (how the service is identified in Zipkin).
localEndpoint, err := zipkin.NewEndpoint(serviceName, serviceHostPort)
if err != nil {
t.Fatalf("could not new endpoint: %+v", err)
}
// create our tracer instance.
tracer, err := zipkin.NewTracer(reporter, zipkin.WithLocalEndpoint(localEndpoint))
if err != nil {
t.Fatalf("could not new tracer: %+v", err)
}
span, _ := tracer.StartSpanFromContext(context.Background(), "ping", zipkin.Kind(model.Client))
remoteEndpoint, err := zipkin.NewEndpoint("myservicev1", "100.100.62.190:8081")
span.SetRemoteEndpoint(remoteEndpoint)
log.Printf("%+v\n", span)
span.Finish()
span.Flush()
time.Sleep(5*time.Second)
}
|
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode {
dummy := ListNode{0, nil}
curr := &dummy
carry := 0
for l1 != nil || l2 != nil {
var x, y int
if (l1 == nil) {
x = 0
} else {
x = l1.Val;
}
if (l2 == nil) {
y = 0
} else {
y = l2.Val
}
sum := x + y + carry
curr.Next = &ListNode{sum % 10, nil}
carry = sum / 10
curr = curr.Next
if l1 != nil {
l1 = l1.Next
}
if l2 != nil {
l2 = l2.Next
}
}
if carry != 0 {
curr.Next = &ListNode{1, nil}
}
return dummy.Next
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"errors"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
cloudkmspb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/cloudkms/cloudkms_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudkms"
)
// EkmConnectionServer implements the gRPC interface for EkmConnection.
type EkmConnectionServer struct{}
// ProtoToEkmConnectionServiceResolvers converts a EkmConnectionServiceResolvers object from its proto representation.
func ProtoToCloudkmsEkmConnectionServiceResolvers(p *cloudkmspb.CloudkmsEkmConnectionServiceResolvers) *cloudkms.EkmConnectionServiceResolvers {
if p == nil {
return nil
}
obj := &cloudkms.EkmConnectionServiceResolvers{
ServiceDirectoryService: dcl.StringOrNil(p.GetServiceDirectoryService()),
EndpointFilter: dcl.StringOrNil(p.GetEndpointFilter()),
Hostname: dcl.StringOrNil(p.GetHostname()),
}
for _, r := range p.GetServerCertificates() {
obj.ServerCertificates = append(obj.ServerCertificates, *ProtoToCloudkmsEkmConnectionServiceResolversServerCertificates(r))
}
return obj
}
// ProtoToEkmConnectionServiceResolversServerCertificates converts a EkmConnectionServiceResolversServerCertificates object from its proto representation.
func ProtoToCloudkmsEkmConnectionServiceResolversServerCertificates(p *cloudkmspb.CloudkmsEkmConnectionServiceResolversServerCertificates) *cloudkms.EkmConnectionServiceResolversServerCertificates {
if p == nil {
return nil
}
obj := &cloudkms.EkmConnectionServiceResolversServerCertificates{
RawDer: dcl.StringOrNil(p.GetRawDer()),
Parsed: dcl.Bool(p.GetParsed()),
Issuer: dcl.StringOrNil(p.GetIssuer()),
Subject: dcl.StringOrNil(p.GetSubject()),
NotBeforeTime: dcl.StringOrNil(p.GetNotBeforeTime()),
NotAfterTime: dcl.StringOrNil(p.GetNotAfterTime()),
SerialNumber: dcl.StringOrNil(p.GetSerialNumber()),
Sha256Fingerprint: dcl.StringOrNil(p.GetSha256Fingerprint()),
}
for _, r := range p.GetSubjectAlternativeDnsNames() {
obj.SubjectAlternativeDnsNames = append(obj.SubjectAlternativeDnsNames, r)
}
return obj
}
// ProtoToEkmConnection converts a EkmConnection resource from its proto representation.
func ProtoToEkmConnection(p *cloudkmspb.CloudkmsEkmConnection) *cloudkms.EkmConnection {
obj := &cloudkms.EkmConnection{
Name: dcl.StringOrNil(p.GetName()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
Etag: dcl.StringOrNil(p.GetEtag()),
Project: dcl.StringOrNil(p.GetProject()),
Location: dcl.StringOrNil(p.GetLocation()),
}
for _, r := range p.GetServiceResolvers() {
obj.ServiceResolvers = append(obj.ServiceResolvers, *ProtoToCloudkmsEkmConnectionServiceResolvers(r))
}
return obj
}
// EkmConnectionServiceResolversToProto converts a EkmConnectionServiceResolvers object to its proto representation.
func CloudkmsEkmConnectionServiceResolversToProto(o *cloudkms.EkmConnectionServiceResolvers) *cloudkmspb.CloudkmsEkmConnectionServiceResolvers {
if o == nil {
return nil
}
p := &cloudkmspb.CloudkmsEkmConnectionServiceResolvers{}
p.SetServiceDirectoryService(dcl.ValueOrEmptyString(o.ServiceDirectoryService))
p.SetEndpointFilter(dcl.ValueOrEmptyString(o.EndpointFilter))
p.SetHostname(dcl.ValueOrEmptyString(o.Hostname))
sServerCertificates := make([]*cloudkmspb.CloudkmsEkmConnectionServiceResolversServerCertificates, len(o.ServerCertificates))
for i, r := range o.ServerCertificates {
sServerCertificates[i] = CloudkmsEkmConnectionServiceResolversServerCertificatesToProto(&r)
}
p.SetServerCertificates(sServerCertificates)
return p
}
// EkmConnectionServiceResolversServerCertificatesToProto converts a EkmConnectionServiceResolversServerCertificates object to its proto representation.
func CloudkmsEkmConnectionServiceResolversServerCertificatesToProto(o *cloudkms.EkmConnectionServiceResolversServerCertificates) *cloudkmspb.CloudkmsEkmConnectionServiceResolversServerCertificates {
if o == nil {
return nil
}
p := &cloudkmspb.CloudkmsEkmConnectionServiceResolversServerCertificates{}
p.SetRawDer(dcl.ValueOrEmptyString(o.RawDer))
p.SetParsed(dcl.ValueOrEmptyBool(o.Parsed))
p.SetIssuer(dcl.ValueOrEmptyString(o.Issuer))
p.SetSubject(dcl.ValueOrEmptyString(o.Subject))
p.SetNotBeforeTime(dcl.ValueOrEmptyString(o.NotBeforeTime))
p.SetNotAfterTime(dcl.ValueOrEmptyString(o.NotAfterTime))
p.SetSerialNumber(dcl.ValueOrEmptyString(o.SerialNumber))
p.SetSha256Fingerprint(dcl.ValueOrEmptyString(o.Sha256Fingerprint))
sSubjectAlternativeDnsNames := make([]string, len(o.SubjectAlternativeDnsNames))
for i, r := range o.SubjectAlternativeDnsNames {
sSubjectAlternativeDnsNames[i] = r
}
p.SetSubjectAlternativeDnsNames(sSubjectAlternativeDnsNames)
return p
}
// EkmConnectionToProto converts a EkmConnection resource to its proto representation.
func EkmConnectionToProto(resource *cloudkms.EkmConnection) *cloudkmspb.CloudkmsEkmConnection {
p := &cloudkmspb.CloudkmsEkmConnection{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime))
p.SetEtag(dcl.ValueOrEmptyString(resource.Etag))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
sServiceResolvers := make([]*cloudkmspb.CloudkmsEkmConnectionServiceResolvers, len(resource.ServiceResolvers))
for i, r := range resource.ServiceResolvers {
sServiceResolvers[i] = CloudkmsEkmConnectionServiceResolversToProto(&r)
}
p.SetServiceResolvers(sServiceResolvers)
return p
}
// applyEkmConnection handles the gRPC request by passing it to the underlying EkmConnection Apply() method.
func (s *EkmConnectionServer) applyEkmConnection(ctx context.Context, c *cloudkms.Client, request *cloudkmspb.ApplyCloudkmsEkmConnectionRequest) (*cloudkmspb.CloudkmsEkmConnection, error) {
p := ProtoToEkmConnection(request.GetResource())
res, err := c.ApplyEkmConnection(ctx, p)
if err != nil {
return nil, err
}
r := EkmConnectionToProto(res)
return r, nil
}
// applyCloudkmsEkmConnection handles the gRPC request by passing it to the underlying EkmConnection Apply() method.
func (s *EkmConnectionServer) ApplyCloudkmsEkmConnection(ctx context.Context, request *cloudkmspb.ApplyCloudkmsEkmConnectionRequest) (*cloudkmspb.CloudkmsEkmConnection, error) {
cl, err := createConfigEkmConnection(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyEkmConnection(ctx, cl, request)
}
// DeleteEkmConnection handles the gRPC request by passing it to the underlying EkmConnection Delete() method.
func (s *EkmConnectionServer) DeleteCloudkmsEkmConnection(ctx context.Context, request *cloudkmspb.DeleteCloudkmsEkmConnectionRequest) (*emptypb.Empty, error) {
return nil, errors.New("no delete endpoint for EkmConnection")
}
// ListCloudkmsEkmConnection handles the gRPC request by passing it to the underlying EkmConnectionList() method.
func (s *EkmConnectionServer) ListCloudkmsEkmConnection(ctx context.Context, request *cloudkmspb.ListCloudkmsEkmConnectionRequest) (*cloudkmspb.ListCloudkmsEkmConnectionResponse, error) {
cl, err := createConfigEkmConnection(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListEkmConnection(ctx, request.GetProject(), request.GetLocation())
if err != nil {
return nil, err
}
var protos []*cloudkmspb.CloudkmsEkmConnection
for _, r := range resources.Items {
rp := EkmConnectionToProto(r)
protos = append(protos, rp)
}
p := &cloudkmspb.ListCloudkmsEkmConnectionResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigEkmConnection(ctx context.Context, service_account_file string) (*cloudkms.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return cloudkms.NewClient(conf), nil
}
|
package server_request_handler
import "net"
import "log"
import "io"
import "encoding/json"
import . "../packet"
type ServerRequestHandler struct{
Listen net.Listener
Connection net.Conn
}
func (srh *ServerRequestHandler) NewSRH(protocol string, port string){
ln, _ := net.Listen(protocol, port)
conn, _ := ln.Accept()
srh.Listen = ln
srh.Connection = conn
}
func (srh ServerRequestHandler) Send(pkt Packet) {
encoded, err := json.Marshal(pkt)
encoded_size, err := json.Marshal(len(encoded))
srh.Connection.Write(encoded_size)
srh.Connection.Write(encoded)
if (err != nil){
log.Fatal("Encoding error", err)
}
return
}
func (srh *ServerRequestHandler) Receive() Packet {
var pkt Packet
var masPktSize int64
size := make([]byte, 3)
io.ReadFull(srh.Connection,size)
_ = json.Unmarshal(size, &masPktSize)
packetMsh := make([]byte, masPktSize)
io.ReadFull(srh.Connection,packetMsh)
_ = json.Unmarshal(packetMsh, &pkt)
return pkt
}
|
// Copyright 2017 The OpenSDS Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
/*
This module defines some essential configuration infos for all storage drivers.
*/
package config
import (
"io/ioutil"
log "github.com/golang/glog"
"github.com/sodafoundation/dock/pkg/model"
"gopkg.in/yaml.v2"
)
type PoolProperties struct {
// The storage type of the storage pool.
// One of: "block", "file" or "object".
StorageType string `yaml:"storageType,omitempty"`
// The locality that pool belongs to.
AvailabilityZone string `yaml:"availabilityZone,omitempty"`
// Map of keys and StoragePoolExtraSpec object that represents the properties
// of the pool, such as supported capabilities.
// +optional
Extras model.StoragePoolExtraSpec `yaml:"extras,omitempty"`
// The volumes belong to the pool can be attached more than once.
MultiAttach bool `yaml:"multiAttach,omitempty"`
}
func Parse(conf interface{}, p string) (interface{}, error) {
confYaml, err := ioutil.ReadFile(p)
if err != nil {
log.Fatalf("Read config yaml file (%s) failed, reason:(%v)", p, err)
return nil, err
}
if err = yaml.Unmarshal(confYaml, conf); err != nil {
log.Fatalf("Parse error: %v", err)
return nil, err
}
return conf, nil
}
|
package data
import (
"testing"
)
func TestSession(t *testing.T) {
setupSession := func(t *testing.T) (session Session) {
t.Helper()
err := user.Create()
if err != nil {
t.Fatal(err)
}
gotUser, err := UserByUserIdStr(user.UserIdStr)
if err != nil {
t.Fatal(err)
}
session, err = gotUser.CreateSession()
if err != nil {
t.Fatal(err, "- Failed to create the session.")
}
return
}
// Check test
t.Run("Check", func(t *testing.T) {
reset(t)
session := setupSession(t)
_, err := session.Check()
if err != nil {
t.Error(err, "- Failed to check the session.")
}
})
// User test
t.Run("User", func(t *testing.T) {
reset(t)
session := setupSession(t)
gotUser, err := session.User()
if err != nil {
t.Error(err, "- Failed to get the session from the user.")
}
want := user.UserIdStr
got := gotUser.UserIdStr
assertCorrectMessage(t, want, got)
})
// Delete test
t.Run("Delete", func(t *testing.T) {
reset(t)
session := setupSession(t)
err := session.Delete()
if err != nil {
t.Error(err, "- Failed to delete the session.")
}
})
// ResetSessions test
t.Run("Reset", func(t *testing.T) {
reset(t)
users := []User{
{
Name: "Taro",
UserIdStr: "taroId",
Email: "taro@gmail.com",
Password: "taroPass",
ImagePath: "default.png",
},
{
Name: "Hana",
UserIdStr: "hanaId",
Email: "hana@gmail.com",
Password: "hanaPass",
ImagePath: "default.png",
},
}
sessions := []Session{}
for _, user := range users {
err := user.Create()
if err != nil {
t.Fatal(err)
}
gotUser, err := UserByUserIdStr(user.UserIdStr)
if err != nil {
t.Fatal(err)
}
session, err := gotUser.CreateSession()
if err != nil {
t.Fatal(err, "- Failed to create the session.")
}
sessions = append(sessions, session)
}
if err := ResetSessions(); err != nil {
t.Error(err, "- Failedt to reset the session table.")
}
})
}
|
//
// Copyright (c) 2016-2017, Arista Networks, Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of Arista Networks nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
// BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
// IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
package cvpapi
import (
"errors"
"testing"
)
func Test_CvpGetAllRolesRetError_UnitTest(t *testing.T) {
clientErr := errors.New("Client error")
expectedErr := errors.New("GetAllRoles: Client error")
client := NewMockClient("", clientErr)
api := NewCvpRestAPI(client)
_, err := api.GetAllRoles(0, 0)
if err.Error() != expectedErr.Error() {
t.Fatalf("Expected Client error: [%v] Got: [%v]", expectedErr, err)
}
}
func Test_CvpGetAllRolesJsonError_UnitTest(t *testing.T) {
client := NewMockClient("{", nil)
api := NewCvpRestAPI(client)
if _, err := api.GetAllRoles(0, 0); err == nil {
t.Fatal("JSON unmarshal error should be returned")
}
}
func Test_CvpGetAllRolesEmptyJsonError_UnitTest(t *testing.T) {
client := NewMockClient("", nil)
api := NewCvpRestAPI(client)
if _, err := api.GetAllRoles(0, 0); err == nil {
t.Fatal("JSON unmarshal error should be returned")
}
}
func Test_CvpGetAllRolesReturnError_UnitTest(t *testing.T) {
respStr := `{"errorCode": "112498",
"errorMessage": "Unauthorized User"}`
client := NewMockClient(respStr, nil)
api := NewCvpRestAPI(client)
if _, err := api.GetAllRoles(0, 0); err == nil {
t.Fatal("Error should be returned")
}
}
func Test_CvpGetAllRolesValid_UnitTest(t *testing.T) {
client := NewMockClient("{}", nil)
api := NewCvpRestAPI(client)
_, err := api.GetAllRoles(0, 0)
if err != nil {
t.Fatalf("Valid case failed with error: %v", err)
}
}
func Test_CvpGetAllRolesValid2_UnitTest(t *testing.T) {
data := `{
"total": 2,
"roles": [
{
"name": "network-admin",
"key": "network-admin",
"description": "",
"moduleListSize": 19,
"createdBy": "cvp system",
"createdOn": 1548362619198,
"moduleList": [
{
"name": "image",
"mode": "rw"
},
{
"name": "ztp",
"mode": "rw"
},
{
"name": "configlet",
"mode": "rw"
},
{
"name": "task",
"mode": "rw"
},
{
"name": "inventory",
"mode": "rw"
},
{
"name": "label",
"mode": "rw"
},
{
"name": "danz",
"mode": "rw"
},
{
"name": "aaa",
"mode": "rw"
},
{
"name": "account",
"mode": "rw"
},
{
"name": "snapshot",
"mode": "rw"
},
{
"name": "changeControl",
"mode": "rw"
},
{
"name": "ssl",
"mode": "rw"
},
{
"name": "purge",
"mode": "rw"
},
{
"name": "cvpTheme",
"mode": "rw"
},
{
"name": "networkProvisioning",
"mode": "rw"
},
{
"name": "audit",
"mode": "rw"
},
{
"name": "workflow",
"mode": "rw"
},
{
"name": "cloudManager",
"mode": "rw"
},
{
"name": "publicCloudAccounts",
"mode": "rw"
}
]
},
{
"name": "network-operator",
"key": "network-operator",
"description": "",
"moduleListSize": 18,
"createdBy": "cvp system",
"createdOn": 1548362619219,
"moduleList": [
{
"name": "image",
"mode": "r"
},
{
"name": "ztp",
"mode": "r"
},
{
"name": "configlet",
"mode": "r"
},
{
"name": "task",
"mode": "r"
},
{
"name": "inventory",
"mode": "r"
},
{
"name": "label",
"mode": "r"
},
{
"name": "danz",
"mode": "r"
},
{
"name": "aaa",
"mode": "r"
},
{
"name": "account",
"mode": "r"
},
{
"name": "snapshot",
"mode": "r"
},
{
"name": "changeControl",
"mode": "r"
},
{
"name": "ssl",
"mode": "r"
},
{
"name": "purge",
"mode": "r"
},
{
"name": "cvpTheme",
"mode": "rw"
},
{
"name": "networkProvisioning",
"mode": "r"
},
{
"name": "audit",
"mode": "r"
},
{
"name": "workflow",
"mode": "r"
},
{
"name": "cloudManager",
"mode": "r"
}
]
}
],
"users": {
"network-admin": 2,
"network-operator": 5
}
}`
client := NewMockClient(data, nil)
api := NewCvpRestAPI(client)
_, err := api.GetAllRoles(0, 0)
if err != nil {
t.Fatalf("Valid case failed with error: %v", err)
}
}
func Test_CvpGetRoleRetError_UnitTest(t *testing.T) {
clientErr := errors.New("Client error")
expectedErr := errors.New("GetRole: Client error")
client := NewMockClient("", clientErr)
api := NewCvpRestAPI(client)
_, err := api.GetRole("role")
if err.Error() != expectedErr.Error() {
t.Fatalf("Expected Client error: [%v] Got: [%v]", expectedErr, err)
}
}
func Test_CvpGetRoleJsonError_UnitTest(t *testing.T) {
client := NewMockClient("{", nil)
api := NewCvpRestAPI(client)
if _, err := api.GetRole("role"); err == nil {
t.Fatal("JSON unmarshal error should be returned")
}
}
func Test_CvpGetRoleEmptyJsonError_UnitTest(t *testing.T) {
client := NewMockClient("", nil)
api := NewCvpRestAPI(client)
if _, err := api.GetRole("role"); err == nil {
t.Fatal("JSON unmarshal error should be returned")
}
}
func Test_CvpGetRoleReturnError_UnitTest(t *testing.T) {
respStr := `{"errorCode": "112498",
"errorMessage": "Unauthorized User"}`
client := NewMockClient(respStr, nil)
api := NewCvpRestAPI(client)
if _, err := api.GetRole("role"); err == nil {
t.Fatal("Error should be returned")
}
}
func Test_CvpGetRoleValid_UnitTest(t *testing.T) {
data := `{
"name": "network-admin",
"key": "network-admin",
"description": "",
"moduleListSize": 19,
"createdBy": "cvp system",
"createdOn": 1548362619198,
"moduleList": [
{
"name": "image",
"mode": "rw"
},
{
"name": "ztp",
"mode": "rw"
},
{
"name": "configlet",
"mode": "rw"
},
{
"name": "task",
"mode": "rw"
},
{
"name": "inventory",
"mode": "rw"
},
{
"name": "label",
"mode": "rw"
},
{
"name": "danz",
"mode": "rw"
},
{
"name": "aaa",
"mode": "rw"
},
{
"name": "account",
"mode": "rw"
},
{
"name": "snapshot",
"mode": "rw"
},
{
"name": "changeControl",
"mode": "rw"
},
{
"name": "ssl",
"mode": "rw"
},
{
"name": "purge",
"mode": "rw"
},
{
"name": "cvpTheme",
"mode": "rw"
},
{
"name": "networkProvisioning",
"mode": "rw"
},
{
"name": "audit",
"mode": "rw"
},
{
"name": "workflow",
"mode": "rw"
},
{
"name": "cloudManager",
"mode": "rw"
},
{
"name": "publicCloudAccounts",
"mode": "rw"
}
]
}`
client := NewMockClient(data, nil)
api := NewCvpRestAPI(client)
_, err := api.GetRole("network-admin")
if err != nil {
t.Fatalf("Valid case failed with error: %v", err)
}
}
|
package account
import (
"github.com/cbsinteractive/bitmovin-api-sdk-go/common"
"github.com/cbsinteractive/bitmovin-api-sdk-go/model"
)
type AccountLoginApi struct {
apiClient *common.ApiClient
}
func NewAccountLoginApi(configs ...func(*common.ApiClient)) (*AccountLoginApi, error) {
apiClient, err := common.NewApiClient(configs...)
if err != nil {
return nil, err
}
api := &AccountLoginApi{apiClient: apiClient}
if err != nil {
return nil, err
}
return api, nil
}
func (api *AccountLoginApi) Create(login model.Login) (*model.AccountInformation, error) {
reqParams := func(params *common.RequestParams) {
}
var responseModel *model.AccountInformation
err := api.apiClient.Post("/account/login", &login, &responseModel, reqParams)
return responseModel, err
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//500. Keyboard Row
//Given a List of words, return the words that can be typed using letters of alphabet on only one row's of American keyboard like the image below.
//Example 1:
//Input: ["Hello", "Alaska", "Dad", "Peace"]
//Output: ["Alaska", "Dad"]
//Note:
//You may use one character in the keyboard more than once.
//You may assume the input string will only contain letters of alphabet.
//func findWords(words []string) []string {
//}
// Time Is Money
|
package main
import (
"fmt"
"time"
)
func fibonacci(mychan chan int) {
n := cap(mychan)
x, y := 1, 1
for i := 0; i < n; i++ {
mychan <- x
x, y = y, x+y
time.Sleep(time.Second)
}
// 记得 close 信道
// 不然主函数中遍历完并不会结束,而是会阻塞。
close(mychan)
}
func main() {
pipline := make(chan int, 50)
go fibonacci(pipline)
for k := range pipline {
fmt.Println(k)
}
}
|
package main
import "fmt"
func main() {
if 2%2 == 1 {
fmt.Println("two is odd number")
} else {
fmt.Println("two is even number")
}
// ERRORS:-
// if{
// ###########
// } //else should be written along with "}" of previous "if"
// else{
// ################
// }
// ERRORS:-
// if 'sush' //cannot create string with single quote
//
if "sush" != "sushil" {
fmt.Println("sush is not present in sushil")
}
var no = 10
if no > 10 {
fmt.Println("no greater than 10")
} else if no < 10 {
fmt.Println("no is less than 10")
} else {
fmt.Println("no is equal to 10")
}
}
|
package leetcode
import (
"reflect"
"testing"
)
func TestShiftGrid(t *testing.T) {
if !reflect.DeepEqual(shiftGrid([][]int{
[]int{1, 2, 3},
[]int{4, 5, 6},
[]int{7, 8, 9},
}, 1),
[][]int{
[]int{9, 1, 2},
[]int{3, 4, 5},
[]int{6, 7, 8},
}) {
t.Fatal()
}
if !reflect.DeepEqual(shiftGrid([][]int{
[]int{3, 8, 1, 9},
[]int{19, 7, 2, 5},
[]int{4, 6, 11, 10},
[]int{12, 0, 21, 13},
}, 4),
[][]int{
[]int{12, 0, 21, 13},
[]int{3, 8, 1, 9},
[]int{19, 7, 2, 5},
[]int{4, 6, 11, 10},
}) {
t.Fatal()
}
}
|
package model
import (
"config"
"constant"
"fmt"
"time"
mgo "gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
// mgo controller
type MgoDBCntlr struct {
sess *mgo.Session
db *mgo.Database
c *mgo.Collection
}
// DBNAME 数据库名字
var (
DBNAME = config.Conf.DB.DBName
globalSess *mgo.Session
mongoURL string
DefaultSelector = bson.M{}
)
const (
// USERTABLE user 表名
USERTABLE = "user"
FEEDBACKTABLE = "feedback"
MongoCopyType = "1"
MongoCloneType = "2"
)
func init() {
dbConf := config.Conf.DB
if dbConf.User != "" && dbConf.PW != "" {
mongoURL = fmt.Sprintf("mongodb://%s:%s@%s:%s/%s", dbConf.User, dbConf.PW, dbConf.Host, dbConf.Port, dbConf.AdminDBName)
} else {
mongoURL = fmt.Sprintf("mongodb://%s:%s", dbConf.Host, dbConf.Port)
}
var err error
globalSess, err = GetDBSession()
if err != nil {
panic(err)
}
}
/****************************************** db session manage ****************************************/
// GetSession get the db session
func GetDBSession() (*mgo.Session, error) {
globalMgoSession, err := mgo.DialWithTimeout(mongoURL, 10*time.Second)
if err != nil {
return nil, err
}
globalMgoSession.SetMode(mgo.Monotonic, true)
//default is 4096
globalMgoSession.SetPoolLimit(1000)
return globalMgoSession, nil
}
func GetCloneSess() *mgo.Session {
return globalSess.Clone()
}
func GetCopySess() *mgo.Session {
return globalSess.Copy()
}
/********************************************* MgoDBCntlr *******************************************/
/*
args 说明:
一个参数:tableName 表名字,这时采用默认db 使用copy方式
两个参数:tableName, sessType "1" 表示copy方式 "2" 表示clone方式
三个参数:tableName, sessType, dbName 数据库名字
*/
func NewMgoDBCntlr(args ...string) *MgoDBCntlr {
mgoSess := &MgoDBCntlr{}
if len(args) <= 3 {
if len(args) >= 2 && args[1] == "2" {
mgoSess.sess = globalSess.Clone()
} else {
mgoSess.sess = globalSess.Copy()
}
if len(args) == 3 {
mgoSess.db = mgoSess.sess.DB(args[2])
} else {
mgoSess.db = mgoSess.sess.DB(DBNAME)
}
mgoSess.c = mgoSess.db.C(args[0])
}
return mgoSess
}
func (this *MgoDBCntlr) Close() {
this.sess.Close()
}
func (this *MgoDBCntlr) SetTableName(tableName string) {
this.c = this.db.C(tableName)
}
/*
args: query(interface{}), result(interface{}), select(interface{})
说明:参数必须要求query、result,其他的参数依次递增,即若想使用后面的参数必须有前面的所有参数
*/
func (this *MgoDBCntlr) Find(args ...interface{}) error {
var mgoQuery *mgo.Query
if len(args) < 2 || len(args) > 3 {
return constant.ErrorOutOfRange
}
for i := 0; i < len(args); i++ {
switch i {
case 0:
mgoQuery = this.c.Find(args[i])
case 2:
mgoQuery = mgoQuery.Select(args[i])
}
}
return mgoQuery.One(args[1])
}
/*
args: query(interface{}), result(interface{}), select(interface{}), limit(int), skip(int), sort([]string)
说明:参数必须要求query、result,其他的参数依次递增,即若想使用后面的参数必须有前面的所有参数
*/
func (this *MgoDBCntlr) FindAll(args ...interface{}) error {
var mgoQuery *mgo.Query
if len(args) < 2 || len(args) > 6 {
return constant.ErrorOutOfRange
}
for i := 0; i < len(args); i++ {
switch i {
case 0:
mgoQuery = this.c.Find(args[i])
case 2:
mgoQuery = mgoQuery.Select(args[i])
case 3:
if limit, ok := args[i].(int); ok {
mgoQuery = mgoQuery.Limit(limit)
}
case 4:
if skip, ok := args[4].(int); ok {
mgoQuery = mgoQuery.Skip(skip)
}
case 5:
if sort, ok := args[i].([]string); ok {
mgoQuery = mgoQuery.Sort(sort...)
}
}
}
return mgoQuery.All(args[1])
}
func (this *MgoDBCntlr) FindCount(query interface{}) (int, error) {
return this.c.Find(query).Count()
}
func (this *MgoDBCntlr) Update(query, update interface{}) error {
return this.c.Update(query, update)
}
func (this *MgoDBCntlr) UpdateAll(query, update interface{}) (*mgo.ChangeInfo, error) {
return this.c.UpdateAll(query, update)
}
|
package c
import (
"sync"
"testing"
)
func TestChannel(t *testing.T) {
wg := new(sync.WaitGroup)
queue := make(chan rune, 128)
worker := func(queue <-chan rune, resCh chan<- string) {
t.Log("on worker")
for {
str := <-queue
// do something
resCh <- string(str) + " [appended]"
}
}
general := func(queue chan rune, wg *sync.WaitGroup) {
resch := make(chan string)
for i := 0; i < 4; i++ {
go worker(queue, resch)
}
t.Log("on general")
for {
str := <-resch
// do something
t.Log(str)
wg.Done()
}
}
tests := "hellow world"
go general(queue, wg)
for _, s := range tests {
wg.Add(1)
queue <- s
}
t.Log("wait")
wg.Wait()
}
|
package main
import (
"filesrv"
"imagesrv"
"log"
"net/http"
"os"
"time"
"github.com/gorilla/mux"
// "github.com/jinzhu/gorm"
)
type App struct {
Router *mux.Router
// DB *gorm.DB
}
func main() {
a := App{}
serve(a)
}
func serve(a App) {
a.Router = mux.NewRouter().StrictSlash(true)
a.Init()
a.Run(":" + os.Getenv("GO_EXPOSED_PORT"))
}
func (a *App) Init() {
a.initializeRoutes()
}
func (a *App) Run(port string) {
log.Fatal(http.ListenAndServe(port, a.Router))
}
func (a *App) Test(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("test"))
}
func (a *App) initializeRoutes() {
a.Router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("server alive!"))
}).Methods("GET")
filesrv.ServeHTTP(a.Router)
imagesrv.ServeHTTP(a.Router)
a.Router.Use(Logger)
}
func Logger(inner http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
inner.ServeHTTP(w, r)
log.Printf(
"%s\t%s\t%s",
r.Method,
r.RequestURI,
time.Since(start),
)
})
}
|
package main
import (
"flag"
"fmt"
"log"
"math/rand"
"time"
"github.com/austindoeswork/NPC3/manager"
"github.com/austindoeswork/NPC3/server"
)
const (
port = ":80"
)
var (
versionFlag = flag.Bool("v", false, "git commit hash")
commithash string
)
func main() {
flag.Parse()
if *versionFlag {
fmt.Println(commithash)
return
}
rand.Seed(time.Now().UTC().UnixNano())
m := manager.New()
s := server.New(port, "./static/", m)
fmt.Println("version: " + commithash)
fmt.Printf("blastoff @ %s\n", port)
log.Fatal(s.Start())
}
|
package jsondiff
import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"sort"
"gx/ipfs/QmRCUXvrfEEpWfqkLKqiaXE2uVaX73MGSVjLrfHDmzygTg/ansi"
)
// ResolutionType defines a type of comparison: equality, non-equality,
// new sub-diff and so on
type ResolutionType int
const (
TypeEquals ResolutionType = iota
TypeNotEquals
TypeAdded
TypeRemoved
TypeDiff
indentation = " "
)
var (
colorStartYellow = ansi.ColorCode("yellow")
colorStartRed = ansi.ColorCode("red")
colorStartGreen = ansi.ColorCode("green")
colorReset = ansi.ColorCode("reset")
)
// Diff is a result of comparison operation. Provides list
// of items that describe difference between objects piece by piece
type Diff struct {
items []DiffItem
hasDiff bool
}
// Items returns list of diff items
func (d Diff) Items() []DiffItem { return d.items }
// Add adds new item to diff object
func (d *Diff) Add(item DiffItem) {
d.items = append(d.items, item)
if item.Resolution != TypeEquals {
d.hasDiff = true
}
}
// IsEqual checks if given diff objects does not contain any non-equal
// element. When IsEqual returns "true" that means there is no difference
// between compared objects
func (d Diff) IsEqual() bool { return !d.hasDiff }
func (d *Diff) sort() { sort.Sort(byKey(d.items)) }
// DiffItem defines a difference between 2 items with resolution type
type DiffItem struct {
Key string
ValueA interface{}
Resolution ResolutionType
ValueB interface{}
}
type byKey []DiffItem
func (m byKey) Len() int { return len(m) }
func (m byKey) Less(i, j int) bool { return m[i].Key < m[j].Key }
func (m byKey) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
// Compare produces list of diff items that define difference between
// objects "a" and "b".
// Note: if objects are equal, all diff items will have Resolution of
// type TypeEquals
func Compare(a, b interface{}) Diff {
mapA := map[string]interface{}{}
mapB := map[string]interface{}{}
jsonA, _ := json.Marshal(a)
jsonB, _ := json.Marshal(b)
json.Unmarshal(jsonA, &mapA)
json.Unmarshal(jsonB, &mapB)
return compareStringMaps(mapA, mapB)
}
// Format produces formatted output for a diff that can be printed.
// Uses colourization which may not work with terminals that don't
// support ASCII colouring (Windows is under question).
func Format(diff Diff) []byte {
buf := bytes.Buffer{}
writeItems(&buf, "", diff.Items())
return buf.Bytes()
}
func writeItems(writer io.Writer, prefix string, items []DiffItem) {
writer.Write([]byte{'{'})
last := len(items) - 1
prefixNotEqualsA := prefix + "<> "
prefixNotEqualsB := prefix + "** "
prefixAdded := prefix + "<< "
prefixRemoved := prefix + ">> "
for i, item := range items {
writer.Write([]byte{'\n'})
switch item.Resolution {
case TypeEquals:
writeItem(writer, prefix, item.Key, item.ValueA, i < last)
case TypeNotEquals:
writer.Write([]byte(colorStartYellow))
writeItem(writer, prefixNotEqualsA, item.Key, item.ValueA, i < last)
writer.Write([]byte{'\n'})
writeItem(writer, prefixNotEqualsB, item.Key, item.ValueB, i < last)
writer.Write([]byte(colorReset))
case TypeAdded:
writer.Write([]byte(colorStartGreen))
writeItem(writer, prefixAdded, item.Key, item.ValueB, i < last)
writer.Write([]byte(colorReset))
case TypeRemoved:
writer.Write([]byte(colorStartRed))
writeItem(writer, prefixRemoved, item.Key, item.ValueA, i < last)
writer.Write([]byte(colorReset))
case TypeDiff:
subdiff := item.ValueB.([]DiffItem)
fmt.Fprintf(writer, "%s\"%s\": ", prefix, item.Key)
writeItems(writer, prefix+indentation, subdiff)
if i < last {
writer.Write([]byte{','})
}
}
}
fmt.Fprintf(writer, "\n%s}", prefix)
}
func writeItem(writer io.Writer, prefix, key string, value interface{}, isNotLast bool) {
fmt.Fprintf(writer, "%s\"%s\": ", prefix, key)
serialized, _ := json.Marshal(value)
writer.Write(serialized)
if isNotLast {
writer.Write([]byte{','})
}
}
func compare(A, B interface{}) (ResolutionType, Diff) {
equals := reflect.DeepEqual(A, B)
if equals {
return TypeEquals, Diff{}
}
mapA, okA := A.(map[string]interface{})
mapB, okB := B.(map[string]interface{})
if okA && okB {
diff := compareStringMaps(mapA, mapB)
return TypeDiff, diff
}
arrayA, okA := A.([]interface{})
arrayB, okB := B.([]interface{})
if okA && okB {
diff := compareArrays(arrayA, arrayB)
return TypeDiff, diff
}
return TypeNotEquals, Diff{}
}
func compareArrays(A, B []interface{}) Diff {
result := Diff{}
minLength := len(A)
if len(A) > len(B) {
minLength = len(B)
}
for i := 0; i < minLength; i++ {
resolutionType, subdiff := compare(A[i], B[i])
switch resolutionType {
case TypeEquals:
result.Add(DiffItem{"", A[i], TypeEquals, nil})
case TypeNotEquals:
result.Add(DiffItem{"", A[i], TypeNotEquals, B[i]})
case TypeDiff:
result.Add(DiffItem{"", nil, TypeDiff, subdiff.Items()})
}
}
for i := minLength; i < len(A); i++ {
result.Add(DiffItem{"", A[i], TypeRemoved, nil})
}
for i := minLength; i < len(B); i++ {
result.Add(DiffItem{"", nil, TypeAdded, B[i]})
}
return result
}
func compareStringMaps(A, B map[string]interface{}) Diff {
keysA := sortedKeys(A)
keysB := sortedKeys(B)
result := Diff{}
for _, kA := range keysA {
vA := A[kA]
vB, ok := B[kA]
if !ok {
result.Add(DiffItem{kA, vA, TypeRemoved, nil})
continue
}
resolutionType, subdiff := compare(vA, vB)
switch resolutionType {
case TypeEquals:
result.Add(DiffItem{kA, vA, TypeEquals, nil})
case TypeNotEquals:
result.Add(DiffItem{kA, vA, TypeNotEquals, vB})
case TypeDiff:
result.Add(DiffItem{kA, nil, TypeDiff, subdiff.Items()})
}
}
for _, kB := range keysB {
if _, ok := A[kB]; !ok {
result.Add(DiffItem{kB, nil, TypeAdded, B[kB]})
}
}
result.sort()
return result
}
func sortedKeys(m map[string]interface{}) []string {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
|
package main
import (
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"net/url"
"os"
"os/user"
resty "gopkg.in/resty.v0"
)
var username = flag.String("username", "", "Username for onetimesecret.com, only needed the first time with -password")
var password = flag.String("password", "", "Password for onetimesecret.com, only needed the first time with -username")
var config = flag.String("config", "", "Path to ots config file")
var recipient = flag.String("recipient", "", "Email of the person to send the secret to.")
var passphrase = flag.String("passphrase", "", "Passphrase to lock the secret with.")
var secret = flag.String("secret", "", "Secret to send")
func saveConfig(username string, password string, homedir string) (Config, error) {
var conf Config
conf = Config{username, password}
mjson, err := json.Marshal(conf)
if err != nil {
return Config{}, err
}
err = ioutil.WriteFile(homedir+"/.ots", mjson, 0600)
if err != nil {
return Config{}, err
}
fmt.Println("Saved credentials at " + homedir + "/.ots")
return conf, nil
}
func readConfig(filepath string) (Config, error) {
var conf Config
conf_file, err := ioutil.ReadFile(filepath)
if err != nil {
fmt.Println("Config doesn't exist. Please run with both -username and -password.")
return Config{}, err
}
err = json.Unmarshal(conf_file, &conf)
if err != nil {
fmt.Println("Unable to parse config")
return Config{}, err
}
return conf, nil
}
func getSecretLink(conf Config, secret Secret) (string, error) {
auth := conf.Username + ":" + conf.Password
api := "https://" + auth + "@onetimesecret.com"
URL, err := url.Parse(api)
if err != nil {
return "", err
}
URL.Path += "/api/v1/share"
params := url.Values{}
params.Add("secret", secret.Secret)
params.Add("ttl", secret.TTL)
if secret.Recipient != "" {
params.Add("recipient", secret.Recipient)
}
if secret.Passphrase != "" {
params.Add("passphrase", secret.Passphrase)
}
URL.RawQuery = params.Encode()
if err != nil {
return "", err
}
resp, err := resty.R().
SetHeader("Content-Type", "application/json").
Post(URL.String())
if err != nil {
return "", err
}
var parsedResp OstResp
err = json.Unmarshal([]byte(resp.String()), &parsedResp)
if err != nil {
panic(err)
}
if parsedResp.Message != "" {
err := errors.New("OTS API returned an error message")
return parsedResp.Message, err
}
return "https://onetimesecret.com/secret/" + parsedResp.SecretKey, nil
}
func main() {
flag.Parse()
var conf Config
usr, err := user.Current()
if err != nil {
panic(err)
}
if *config == "" {
*config = usr.HomeDir + "/.ots"
}
if *username != "" && *password != "" {
conf, err = saveConfig(*username, *password, usr.HomeDir)
if err != nil {
panic(err)
}
} else {
conf, err = readConfig(*config)
if err != nil {
panic(err)
}
}
if *secret == "" {
fmt.Println("Not secret provided, exiting.")
os.Exit(0)
}
secret := Secret{*secret, *passphrase, "600", *recipient}
secretLink, err := getSecretLink(conf, secret)
if err != nil {
panic(err)
}
fmt.Println(secretLink)
}
|
// Copyright (C) 2019 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package android
// This file contains all the foundation components for override modules and their base module
// types. Override modules are a kind of opposite of default modules in that they override certain
// properties of an existing base module whereas default modules provide base module data to be
// overridden. However, unlike default and defaultable module pairs, both override and overridable
// modules generate and output build actions, and it is up to product make vars to decide which one
// to actually build and install in the end. In other words, default modules and defaultable modules
// can be compared to abstract classes and concrete classes in C++ and Java. By the same analogy,
// both override and overridable modules act like concrete classes.
//
// There is one more crucial difference from the logic perspective. Unlike default pairs, most Soong
// actions happen in the base (overridable) module by creating a local variant for each override
// module based on it.
import (
"sync"
"github.com/google/blueprint"
"github.com/google/blueprint/proptools"
)
// Interface for override module types, e.g. override_android_app, override_apex
type OverrideModule interface {
Module
getOverridingProperties() []interface{}
setOverridingProperties(properties []interface{})
getOverrideModuleProperties() *OverrideModuleProperties
// Internal funcs to handle interoperability between override modules and prebuilts.
// i.e. cases where an overriding module, too, is overridden by a prebuilt module.
setOverriddenByPrebuilt(overridden bool)
getOverriddenByPrebuilt() bool
}
// Base module struct for override module types
type OverrideModuleBase struct {
moduleProperties OverrideModuleProperties
overridingProperties []interface{}
overriddenByPrebuilt bool
}
type OverrideModuleProperties struct {
// Name of the base module to be overridden
Base *string
// TODO(jungjw): Add an optional override_name bool flag.
}
func (o *OverrideModuleBase) getOverridingProperties() []interface{} {
return o.overridingProperties
}
func (o *OverrideModuleBase) setOverridingProperties(properties []interface{}) {
o.overridingProperties = properties
}
func (o *OverrideModuleBase) getOverrideModuleProperties() *OverrideModuleProperties {
return &o.moduleProperties
}
func (o *OverrideModuleBase) GetOverriddenModuleName() string {
return proptools.String(o.moduleProperties.Base)
}
func (o *OverrideModuleBase) setOverriddenByPrebuilt(overridden bool) {
o.overriddenByPrebuilt = overridden
}
func (o *OverrideModuleBase) getOverriddenByPrebuilt() bool {
return o.overriddenByPrebuilt
}
func InitOverrideModule(m OverrideModule) {
m.setOverridingProperties(m.GetProperties())
m.AddProperties(m.getOverrideModuleProperties())
}
// Interface for overridable module types, e.g. android_app, apex
type OverridableModule interface {
Module
moduleBase() *OverridableModuleBase
setOverridableProperties(prop []interface{})
addOverride(o OverrideModule)
getOverrides() []OverrideModule
override(ctx BaseModuleContext, o OverrideModule)
GetOverriddenBy() string
setOverridesProperty(overridesProperties *[]string)
// Due to complications with incoming dependencies, overrides are processed after DepsMutator.
// So, overridable properties need to be handled in a separate, dedicated deps mutator.
OverridablePropertiesDepsMutator(ctx BottomUpMutatorContext)
}
type overridableModuleProperties struct {
OverriddenBy string `blueprint:"mutated"`
}
// Base module struct for overridable module types
type OverridableModuleBase struct {
// List of OverrideModules that override this base module
overrides []OverrideModule
// Used to parallelize registerOverrideMutator executions. Note that only addOverride locks this
// mutex. It is because addOverride and getOverride are used in different mutators, and so are
// guaranteed to be not mixed. (And, getOverride only reads from overrides, and so don't require
// mutex locking.)
overridesLock sync.Mutex
overridableProperties []interface{}
// If an overridable module has a property to list other modules that itself overrides, it should
// set this to a pointer to the property through the InitOverridableModule function, so that
// override information is propagated and aggregated correctly.
overridesProperty *[]string
overridableModuleProperties overridableModuleProperties
}
func InitOverridableModule(m OverridableModule, overridesProperty *[]string) {
m.setOverridableProperties(m.(Module).GetProperties())
m.setOverridesProperty(overridesProperty)
m.AddProperties(&m.moduleBase().overridableModuleProperties)
}
func (o *OverridableModuleBase) moduleBase() *OverridableModuleBase {
return o
}
func (b *OverridableModuleBase) setOverridableProperties(prop []interface{}) {
b.overridableProperties = prop
}
func (b *OverridableModuleBase) addOverride(o OverrideModule) {
b.overridesLock.Lock()
b.overrides = append(b.overrides, o)
b.overridesLock.Unlock()
}
// Should NOT be used in the same mutator as addOverride.
func (b *OverridableModuleBase) getOverrides() []OverrideModule {
return b.overrides
}
func (b *OverridableModuleBase) setOverridesProperty(overridesProperty *[]string) {
b.overridesProperty = overridesProperty
}
// Overrides a base module with the given OverrideModule.
func (b *OverridableModuleBase) override(ctx BaseModuleContext, o OverrideModule) {
for _, p := range b.overridableProperties {
for _, op := range o.getOverridingProperties() {
if proptools.TypeEqual(p, op) {
err := proptools.ExtendProperties(p, op, nil, proptools.OrderReplace)
if err != nil {
if propertyErr, ok := err.(*proptools.ExtendPropertyError); ok {
ctx.PropertyErrorf(propertyErr.Property, "%s", propertyErr.Err.Error())
} else {
panic(err)
}
}
}
}
}
// Adds the base module to the overrides property, if exists, of the overriding module. See the
// comment on OverridableModuleBase.overridesProperty for details.
if b.overridesProperty != nil {
*b.overridesProperty = append(*b.overridesProperty, ctx.ModuleName())
}
b.overridableModuleProperties.OverriddenBy = o.Name()
}
// GetOverriddenBy returns the name of the override module that has overridden this module.
// For example, if an override module foo has its 'base' property set to bar, then another local variant
// of bar is created and its properties are overriden by foo. This method returns bar when called from
// the new local variant. It returns "" when called from the original variant of bar.
func (b *OverridableModuleBase) GetOverriddenBy() string {
return b.overridableModuleProperties.OverriddenBy
}
func (b *OverridableModuleBase) OverridablePropertiesDepsMutator(ctx BottomUpMutatorContext) {
}
// Mutators for override/overridable modules. All the fun happens in these functions. It is critical
// to keep them in this order and not put any order mutators between them.
func RegisterOverridePostDepsMutators(ctx RegisterMutatorsContext) {
ctx.BottomUp("override_deps", overrideModuleDepsMutator).Parallel()
ctx.TopDown("register_override", registerOverrideMutator).Parallel()
ctx.BottomUp("perform_override", performOverrideMutator).Parallel()
ctx.BottomUp("overridable_deps", overridableModuleDepsMutator).Parallel()
ctx.BottomUp("replace_deps_on_override", replaceDepsOnOverridingModuleMutator).Parallel()
}
type overrideBaseDependencyTag struct {
blueprint.BaseDependencyTag
}
var overrideBaseDepTag overrideBaseDependencyTag
// Adds dependency on the base module to the overriding module so that they can be visited in the
// next phase.
func overrideModuleDepsMutator(ctx BottomUpMutatorContext) {
if module, ok := ctx.Module().(OverrideModule); ok {
// See if there's a prebuilt module that overrides this override module with prefer flag,
// in which case we call SkipInstall on the corresponding variant later.
ctx.VisitDirectDepsWithTag(PrebuiltDepTag, func(dep Module) {
prebuilt, ok := dep.(PrebuiltInterface)
if !ok {
panic("PrebuiltDepTag leads to a non-prebuilt module " + dep.Name())
}
if prebuilt.Prebuilt().UsePrebuilt() {
module.setOverriddenByPrebuilt(true)
return
}
})
ctx.AddDependency(ctx.Module(), overrideBaseDepTag, *module.getOverrideModuleProperties().Base)
}
}
// Visits the base module added as a dependency above, checks the module type, and registers the
// overriding module.
func registerOverrideMutator(ctx TopDownMutatorContext) {
ctx.VisitDirectDepsWithTag(overrideBaseDepTag, func(base Module) {
if o, ok := base.(OverridableModule); ok {
o.addOverride(ctx.Module().(OverrideModule))
} else {
ctx.PropertyErrorf("base", "unsupported base module type")
}
})
}
// Now, goes through all overridable modules, finds all modules overriding them, creates a local
// variant for each of them, and performs the actual overriding operation by calling override().
func performOverrideMutator(ctx BottomUpMutatorContext) {
if b, ok := ctx.Module().(OverridableModule); ok {
overrides := b.getOverrides()
if len(overrides) == 0 {
return
}
variants := make([]string, len(overrides)+1)
// The first variant is for the original, non-overridden, base module.
variants[0] = ""
for i, o := range overrides {
variants[i+1] = o.(Module).Name()
}
mods := ctx.CreateLocalVariations(variants...)
// Make the original variation the default one to depend on if no other override module variant
// is specified.
ctx.AliasVariation(variants[0])
for i, o := range overrides {
mods[i+1].(OverridableModule).override(ctx, o)
if o.getOverriddenByPrebuilt() {
// The overriding module itself, too, is overridden by a prebuilt. Skip its installation.
mods[i+1].SkipInstall()
}
}
} else if o, ok := ctx.Module().(OverrideModule); ok {
// Create a variant of the overriding module with its own name. This matches the above local
// variant name rule for overridden modules, and thus allows ReplaceDependencies to match the
// two.
ctx.CreateLocalVariations(o.Name())
// To allow dependencies to be added without having to know the above variation.
ctx.AliasVariation(o.Name())
}
}
func overridableModuleDepsMutator(ctx BottomUpMutatorContext) {
if b, ok := ctx.Module().(OverridableModule); ok {
b.OverridablePropertiesDepsMutator(ctx)
}
}
func replaceDepsOnOverridingModuleMutator(ctx BottomUpMutatorContext) {
if b, ok := ctx.Module().(OverridableModule); ok {
if o := b.GetOverriddenBy(); o != "" {
// Redirect dependencies on the overriding module to this overridden module. Overriding
// modules are basically pseudo modules, and all build actions are associated to overridden
// modules. Therefore, dependencies on overriding modules need to be forwarded there as well.
ctx.ReplaceDependencies(o)
}
}
}
|
package exasol
import (
"context"
"crypto/rand"
"crypto/rsa"
"database/sql/driver"
"encoding/base64"
"encoding/hex"
"fmt"
"math/big"
"os/user"
"runtime"
"strconv"
"github.com/gorilla/websocket"
)
type connection struct {
config *config
websocket *websocket.Conn
ctx context.Context
isClosed bool
}
func (c *connection) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
values, err := namedValuesToValues(args)
if err != nil {
return nil, err
}
return c.query(ctx, query, values)
}
func (c *connection) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
values, err := namedValuesToValues(args)
if err != nil {
return nil, err
}
return c.exec(ctx, query, values)
}
func (c *connection) Exec(query string, args []driver.Value) (driver.Result, error) {
return c.exec(context.Background(), query, args)
}
func (c *connection) Query(query string, args []driver.Value) (driver.Rows, error) {
return c.query(context.Background(), query, args)
}
func (c *connection) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
if c.isClosed {
errorLogger.Print(ErrClosed)
return nil, driver.ErrBadConn
}
response := &CreatePreparedStatementResponse{}
err := c.createPreparedStatement(ctx, query, response)
if err != nil {
return nil, err
}
return c.createStatement(response), nil
}
func (c *connection) createPreparedStatement(ctx context.Context, query string, response *CreatePreparedStatementResponse) error {
return c.send(ctx, &CreatePreparedStatementCommand{
Command: Command{"createPreparedStatement"},
SQLText: query,
}, response)
}
func (c *connection) createStatement(result *CreatePreparedStatementResponse) *statement {
return &statement{
connection: c,
statementHandle: result.StatementHandle,
numInput: result.ParameterData.NumColumns,
columns: result.ParameterData.Columns,
}
}
func (c *connection) Prepare(query string) (driver.Stmt, error) {
return c.PrepareContext(context.Background(), query)
}
func (c *connection) Close() error {
return c.close(context.Background())
}
func (c *connection) Begin() (driver.Tx, error) {
if c.isClosed {
errorLogger.Print(ErrClosed)
return nil, driver.ErrBadConn
}
if c.config.Autocommit {
return nil, ErrAutocommitEnabled
}
return &transaction{
connection: c,
}, nil
}
func (c *connection) query(ctx context.Context, query string, args []driver.Value) (driver.Rows, error) {
if c.isClosed {
errorLogger.Print(ErrClosed)
return nil, driver.ErrBadConn
}
// No values provided, simple execute is enough
if len(args) == 0 {
return c.executeSimpleWithRows(ctx, query)
}
response := &CreatePreparedStatementResponse{}
err := c.createPreparedStatement(ctx, query, response)
if err != nil {
return nil, err
}
result, err := c.executePreparedStatement(ctx, response, args)
if err != nil {
return nil, err
}
return toRow(result, c)
}
func (c *connection) executeSimpleWithRows(ctx context.Context, query string) (driver.Rows, error) {
result, err := c.simpleExec(ctx, query)
if err != nil {
return nil, err
}
return toRow(result, c)
}
func (c *connection) executePreparedStatement(ctx context.Context, s *CreatePreparedStatementResponse, args []driver.Value) (*SQLQueriesResponse, error) {
columns := s.ParameterData.Columns
if len(args)%len(columns) != 0 {
return nil, ErrInvalidValuesCount
}
data := make([][]interface{}, len(columns))
for i, arg := range args {
if data[i%len(columns)] == nil {
data[i%len(columns)] = make([]interface{}, 0)
}
data[i%len(columns)] = append(data[i%len(columns)], arg)
}
command := &ExecutePreparedStatementCommand{
Command: Command{"executePreparedStatement"},
StatementHandle: s.StatementHandle,
Columns: columns,
NumColumns: len(columns),
NumRows: len(data[0]),
Data: data,
Attributes: Attributes{
ResultSetMaxRows: c.config.ResultSetMaxRows,
},
}
result := &SQLQueriesResponse{}
err := c.send(ctx, command, result)
if err != nil {
return nil, err
}
if result.NumResults == 0 {
return nil, ErrMalformedData
}
return result, c.closePreparedStatement(ctx, s)
}
func (c *connection) closePreparedStatement(ctx context.Context, s *CreatePreparedStatementResponse) error {
return c.send(ctx, &ClosePreparedStatementCommand{
Command: Command{"closePreparedStatement"},
StatementHandle: s.StatementHandle,
}, nil)
}
func (c *connection) exec(ctx context.Context, query string, args []driver.Value) (driver.Result, error) {
if c.isClosed {
errorLogger.Print(ErrClosed)
return nil, driver.ErrBadConn
}
// No values provided, simple execute is enough
if len(args) == 0 {
return c.executeSimpleWithResult(ctx, query)
}
prepResponse := &CreatePreparedStatementResponse{}
err := c.send(ctx, &CreatePreparedStatementCommand{
Command: Command{"createPreparedStatement"},
SQLText: query,
}, prepResponse)
if err != nil {
return nil, err
}
result, err := c.executePreparedStatement(ctx, prepResponse, args)
if err != nil {
return nil, err
}
return toResult(result)
}
func (c *connection) executeSimpleWithResult(ctx context.Context, query string) (driver.Result, error) {
result, err := c.simpleExec(ctx, query)
if err != nil {
return nil, err
}
return toResult(result)
}
func (c *connection) simpleExec(ctx context.Context, query string) (*SQLQueriesResponse, error) {
command := &SQLCommand{
Command: Command{"execute"},
SQLText: query,
Attributes: Attributes{
ResultSetMaxRows: c.config.ResultSetMaxRows,
},
}
result := &SQLQueriesResponse{}
err := c.send(ctx, command, result)
if err != nil {
return nil, err
}
if result.NumResults == 0 {
return nil, ErrMalformedData
}
return result, err
}
func (c *connection) close(ctx context.Context) error {
c.isClosed = true
err := c.send(ctx, &Command{Command: "disconnect"}, nil)
c.websocket.Close()
c.websocket = nil
return err
}
func (c *connection) login(ctx context.Context) error {
hasCompression := c.config.Compression
c.config.Compression = false
loginCommand := &LoginCommand{
Command: Command{"login"},
ProtocolVersion: c.config.ApiVersion,
}
loginResponse := &PublicKeyResponse{}
err := c.send(ctx, loginCommand, loginResponse)
if err != nil {
return err
}
pubKeyMod, _ := hex.DecodeString(loginResponse.PublicKeyModulus)
var modulus big.Int
modulus.SetBytes(pubKeyMod)
pubKeyExp, _ := strconv.ParseUint(loginResponse.PublicKeyExponent, 16, 32)
pubKey := rsa.PublicKey{
N: &modulus,
E: int(pubKeyExp),
}
password := []byte(c.config.Password)
encPass, err := rsa.EncryptPKCS1v15(rand.Reader, &pubKey, password)
if err != nil {
errorLogger.Printf("password encryption error: %s", err)
return driver.ErrBadConn
}
b64Pass := base64.StdEncoding.EncodeToString(encPass)
authRequest := AuthCommand{
Username: c.config.User,
Password: b64Pass,
UseCompression: false,
ClientName: c.config.ClientName,
DriverName: fmt.Sprintf("exasol-driver-go %s", driverVersion),
ClientOs: runtime.GOOS,
ClientVersion: c.config.ClientName,
ClientRuntime: runtime.Version(),
Attributes: Attributes{
Autocommit: c.config.Autocommit,
CurrentSchema: c.config.Schema,
CompressionEnabled: hasCompression,
},
}
if osUser, err := user.Current(); err != nil {
authRequest.ClientOsUsername = osUser.Username
}
authResponse := &AuthResponse{}
err = c.send(ctx, authRequest, authResponse)
if err != nil {
return err
}
c.isClosed = false
c.config.Compression = hasCompression
return nil
}
|
package cli
import (
"bufio"
"fmt"
"os"
"strings"
"github.com/evan-buss/openbooks/core"
"github.com/evan-buss/openbooks/irc"
)
func terminalMenu(irc *irc.Conn) {
fmt.Print("\ns)search\ng)et book\nd)one\n~> ")
// Trim user input so we don't send 2 messages
clean := func(message string) string { return strings.Trim(message, "\r\n") }
reader := bufio.NewReader(os.Stdin)
input, _ := reader.ReadString('\n')
input = clean(input)
switch input {
case "s":
fmt.Print("@search ")
message, _ := reader.ReadString('\n')
core.SearchBook(irc, clean(message))
fmt.Println("\nSent search request.")
case "g":
fmt.Print("Download String: ")
message, _ := reader.ReadString('\n')
core.DownloadBook(irc, clean(message))
fmt.Println("\nSent download request.")
case "d":
fmt.Println("Disconnecting.")
irc.Disconnect()
os.Exit(0)
default:
fmt.Println("Invalid Selection.")
terminalMenu(irc)
}
}
func fullHandler(config Config) core.EventHandler {
handler := core.EventHandler{}
handler[core.BadServer] = func(text string) {
config.badServerHandler(text)
terminalMenu(config.irc)
}
handler[core.BookResult] = func(text string) {
config.downloadHandler(text)
terminalMenu(config.irc)
}
handler[core.SearchResult] = func(text string) {
config.searchHandler(text)
terminalMenu(config.irc)
}
handler[core.SearchAccepted] = config.searchAcceptedHandler
handler[core.NoResults] = func(text string) {
config.noResultsHandler(text)
terminalMenu(config.irc)
}
handler[core.MatchesFound] = config.matchesFoundHandler
handler[core.Ping] = config.pingHandler
return handler
}
|
package bus
import (
"fmt"
"time"
"github.com/pkg/errors"
)
const (
CreateConnection = "CreateConnection"
UpdateConnection = "UpdateConnection"
DeleteConnection = "DeleteConnection"
CreateRelay = "CreateRelay"
UpdateRelay = "UpdateRelay"
DeleteRelay = "DeleteRelay"
StopRelay = "StopRelay"
ResumeRelay = "ResumeRelay"
CreateTunnel = "CreateTunnel"
UpdateTunnel = "UpdateTunnel"
DeleteTunnel = "DeleteTunnel"
StopTunnel = "StopTunnel"
ResumeTunnel = "ResumeTunnel"
UpdateConfig = "UpdateConfig"
)
var (
ValidActions = []Action{
CreateConnection, UpdateConnection, DeleteConnection,
CreateRelay, UpdateRelay, DeleteRelay, StopRelay, ResumeRelay,
CreateTunnel, UpdateTunnel, DeleteTunnel, StopTunnel, ResumeTunnel,
UpdateConfig,
}
)
type Action string
type Message struct {
Action Action
Data []byte // <- consumer decides what's in here based on action
Metadata map[string]string
EmittedBy string
EmittedAt time.Time // UTC
}
// TODO: implement, this isn't being used anywhere at the moment
func (m *Message) Validate() error {
if m == nil {
return errors.New("message cannot be nil")
}
var found bool
for _, v := range ValidActions {
if m.Action == v {
found = true
}
}
if !found {
return fmt.Errorf("unrecognized action '%s'", m.Action)
}
if m.EmittedBy == "" {
return errors.New("EmittedBy cannot be empty")
}
if m.EmittedAt.IsZero() {
return errors.New("EmittedAt cannot be unset")
}
return nil
}
|
package list
/*import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"time"
cloudpkg "github.com/devspace-cloud/devspace/pkg/devspace/cloud"
cloudconfig "github.com/devspace-cloud/devspace/pkg/devspace/cloud/config"
cloudlatest "github.com/devspace-cloud/devspace/pkg/devspace/cloud/config/versions/latest"
"github.com/devspace-cloud/devspace/pkg/devspace/cloud/token"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/devspace-cloud/devspace/pkg/util/ptr"
homedir "github.com/mitchellh/go-homedir"
"gotest.tools/assert"
)
type customGraphqlClient struct {
responses []interface{}
}
func (q *customGraphqlClient) GrapqhlRequest(p *cloudpkg.Provider, request string, vars map[string]interface{}, response interface{}) error {
if len(q.responses) == 0 {
panic("Not enough responses. Need response for: " + request)
}
currentResponse := q.responses[0]
q.responses = q.responses[1:]
errorResponse, isError := currentResponse.(error)
if isError {
return errorResponse
}
buf, err := json.Marshal(currentResponse)
if err != nil {
panic(fmt.Sprintf("Cannot encode response. %d responses left", len(q.responses)))
}
json.NewDecoder(bytes.NewReader(buf)).Decode(&response)
return nil
}
type listClustersTestCase struct {
name string
providerFlag string
allFlag bool
graphQLResponses []interface{}
providerList []*cloudlatest.Provider
expectTablePrint bool
expectedHeader []string
expectedValues [][]string
expectedErr string
}
func TestListClusters(t *testing.T) {
claimAsJSON, _ := json.Marshal(token.ClaimSet{
Expiration: time.Now().Add(time.Hour).Unix(),
})
validEncodedClaim := base64.URLEncoding.EncodeToString(claimAsJSON)
for strings.HasSuffix(string(validEncodedClaim), "=") {
validEncodedClaim = strings.TrimSuffix(validEncodedClaim, "=")
}
testCases := []listClustersTestCase{
listClustersTestCase{
name: "No clusters",
providerFlag: "app.devspace.com",
providerList: []*cloudlatest.Provider{
&cloudlatest.Provider{
Name: "app.devspace.com",
Key: "someKey",
},
},
graphQLResponses: []interface{}{
struct {
Clusters []*cloudlatest.Cluster `json:"cluster"`
}{
Clusters: []*cloudlatest.Cluster{},
},
},
},
listClustersTestCase{
name: "One cluster",
providerFlag: "app.devspace.com",
providerList: []*cloudlatest.Provider{
&cloudlatest.Provider{
Name: "app.devspace.com",
Key: "someKey",
Token: "." + validEncodedClaim + ".",
},
},
graphQLResponses: []interface{}{
struct {
Clusters []*cloudlatest.Cluster `json:"cluster"`
}{
Clusters: []*cloudlatest.Cluster{
&cloudlatest.Cluster{
ClusterID: 1,
Server: ptr.String("someServer"),
Name: "someName",
EncryptToken: true,
Owner: &cloudlatest.Owner{
OwnerID: 1,
Name: "someOwner",
},
},
&cloudlatest.Cluster{
ClusterID: 2,
Server: ptr.String("someServer2"),
Name: "someName2",
EncryptToken: true,
},
},
},
},
expectedHeader: []string{"ID", "Name", "Owner", "Created"},
expectedValues: [][]string{
[]string{"1", "someOwner:someName", "someOwner", ""},
},
},
}
dir, err := ioutil.TempDir("", "test")
if err != nil {
t.Fatalf("Error creating temporary directory: %v", err)
}
wdBackup, err := os.Getwd()
if err != nil {
t.Fatalf("Error getting current working directory: %v", err)
}
err = os.Chdir(dir)
if err != nil {
t.Fatalf("Error changing working directory: %v", err)
}
homedir, err := homedir.Dir()
assert.NilError(t, err, "Error getting homedir")
relDir, err := filepath.Rel(homedir, dir)
assert.NilError(t, err, "Error getting relative dir path")
cloudconfig.DevSpaceProvidersConfigPath = filepath.Join(relDir, "Doesn'tExist")
cloudconfig.LegacyDevSpaceCloudConfigPath = filepath.Join(relDir, "Doesn'tExist")
defer func() {
//Delete temp folder
err = os.Chdir(wdBackup)
if err != nil {
t.Fatalf("Error changing dir back: %v", err)
}
err = os.RemoveAll(dir)
if err != nil {
t.Fatalf("Error removing dir: %v", err)
}
}()
log.SetInstance(log.Discard)
for _, testCase := range testCases {
testListClusters(t, testCase)
}
}
func testListClusters(t *testing.T, testCase listClustersTestCase) {
log.SetFakePrintTable(func(s log.Logger, header []string, values [][]string) {
assert.Assert(t, testCase.expectTablePrint || len(testCase.expectedHeader)+len(testCase.expectedValues) > 0, "PrintTable unexpectedly called in testCase %s", testCase.name)
assert.Equal(t, reflect.DeepEqual(header, testCase.expectedHeader), true, "Unexpected header in testCase %s. Expected:%v\nActual:%v", testCase.name, testCase.expectedHeader, header)
assert.Equal(t, reflect.DeepEqual(values, testCase.expectedValues), true, "Unexpected values in testCase %s. Expected:%v\nActual:%v", testCase.name, testCase.expectedValues, values)
})
cloudpkg.DefaultGraphqlClient = &customGraphqlClient{
responses: testCase.graphQLResponses,
}
providerConfig, err := cloudconfig.Load()
assert.NilError(t, err, "Error getting provider config in testCase %s", testCase.name)
providerConfig.Providers = testCase.providerList
err = (&clustersCmd{
All: testCase.allFlag,
Provider: testCase.providerFlag,
}).RunListClusters(nil, []string{})
if testCase.expectedErr == "" {
assert.NilError(t, err, "Unexpected error in testCase %s.", testCase.name)
} else {
assert.Error(t, err, testCase.expectedErr, "Wrong or no error in testCase %s.", testCase.name)
}
}*/
|
package main
import "fmt"
type IF interface {
get() string
set(string)
}
type Doc struct {
text string
}
func (d Doc) get() string {
return d.text
}
func (d Doc) set(s string) {
d.text = s
}
|
package structs
import "time"
type Book struct {
ID int `json:"id"`
Title string `json:"title" binding:"required,min=2,max=200"`
Description string `json:"description"`
ISBN string `json:"isbn" binding:"required,min=8,max=10"`
Authors string `json:"authors" binding:"required,min=2,max=200"`
Available bool
}
type History struct {
Status bool `json:"status"`
When time.Time `json:"when"`
}
|
package main
import (
"flag"
"fmt"
"github.com/macaron/go-mh-z19b"
)
func main() {
device := flag.String("device", "/dev/serial0", "specific MH-Z19B")
flag.Parse()
ppm, _ := mhz19b.Read(*device)
fmt.Printf("%d\n", ppm)
}
|
package main
import (
"fmt"
"runtime"
"runtime/debug"
"time"
)
func main() {
debug.FreeOSMemory()
var (
m1 runtime.MemStats
m2 runtime.MemStats
m3 runtime.MemStats
m4 runtime.MemStats
m5 runtime.MemStats
m6 runtime.MemStats
m7 runtime.MemStats
)
runtime.ReadMemStats(&m1)
t := struct{}{}
time.Sleep(1)
runtime.ReadMemStats(&m2)
fmt.Printf("%#v\n", t)
memUsage(&m1, &m2)
P()
fmt.Println()
b := false
time.Sleep(1)
runtime.ReadMemStats(&m3)
fmt.Printf("%#v\n", b)
memUsage(&m2, &m3)
P()
fmt.Println()
// debug.FreeOSMemory()
runtime.GC()
time.Sleep(1)
fmt.Printf("%#v\n", "GC")
runtime.ReadMemStats(&m4)
memUsage(&m3, &m4)
P()
fmt.Println()
m := make(map[int]interface{})
time.Sleep(1)
fmt.Printf("%#v\n", m)
runtime.ReadMemStats(&m5)
memUsage(&m4, &m5)
P()
fmt.Println()
for i := 0; i < 1000000; i++ {
m[i] = true
}
time.Sleep(1)
runtime.ReadMemStats(&m6)
memUsage(&m5, &m6)
P()
fmt.Println()
for i := 0; i < 1000000; i++ {
delete(m, i)
}
time.Sleep(1)
runtime.ReadMemStats(&m7)
memUsage(&m6, &m7)
P()
fmt.Println()
}
var p = fmt.Println
func memUsage(m1, m2 *runtime.MemStats) {
p("Alloc:", m2.Alloc-m1.Alloc,
"TotalAlloc:", m2.TotalAlloc-m1.TotalAlloc,
"HeapAlloc:", m2.HeapAlloc-m1.HeapAlloc)
}
var mem runtime.MemStats
func P() {
runtime.ReadMemStats(&mem)
fmt.Println(mem.HeapSys, mem.HeapAlloc, mem.Alloc, mem.Frees)
}
|
// This file contains functions transpiling unary and binary operator
// expressions.
package transpiler
import (
"fmt"
"go/token"
goast "go/ast"
"github.com/elliotchance/c2go/ast"
"github.com/elliotchance/c2go/program"
"github.com/elliotchance/c2go/types"
"github.com/elliotchance/c2go/util"
)
func transpileBinaryOperator(n *ast.BinaryOperator, p *program.Program) (*goast.BinaryExpr, string, error) {
left, leftType, err := transpileToExpr(n.Children[0], p)
if err != nil {
return nil, "", err
}
right, rightType, err := transpileToExpr(n.Children[1], p)
if err != nil {
return nil, "", err
}
operator := getTokenForOperator(n.Operator)
returnType := types.ResolveTypeForBinaryOperator(p, n.Operator, leftType, rightType)
if operator == token.LAND {
left = types.CastExpr(p, left, leftType, "bool")
right = types.CastExpr(p, right, rightType, "bool")
return &goast.BinaryExpr{
X: left,
Op: operator,
Y: right,
}, "bool", nil
}
// Convert "(0)" to "nil" when we are dealing with equality.
if (operator == token.NEQ || operator == token.EQL) &&
types.IsNullExpr(right) {
right = goast.NewIdent("nil")
}
if operator == token.ASSIGN {
right = types.CastExpr(p, right, rightType, returnType)
}
return &goast.BinaryExpr{
X: left,
Op: operator,
Y: right,
}, types.ResolveTypeForBinaryOperator(p, n.Operator, leftType, rightType), nil
}
func transpileUnaryOperator(n *ast.UnaryOperator, p *program.Program) (goast.Expr, string, error) {
operator := getTokenForOperator(n.Operator)
// Unfortunately we cannot use the Go increment operators because we are not
// providing any position information for tokens. This means that the ++/--
// would be placed before the expression and would be invalid in Go.
//
// Until it can be properly fixed (can we trick Go into to placing it after
// the expression with a magic position?) we will have to return a
// BinaryExpr with the same functionality.
if operator == token.INC || operator == token.DEC {
binaryOperator := "+="
if operator == token.DEC {
binaryOperator = "-="
}
return transpileBinaryOperator(&ast.BinaryOperator{
Type: n.Type,
Operator: binaryOperator,
Children: []ast.Node{
n.Children[0], &ast.IntegerLiteral{
Type: "int",
Value: "1",
Children: []ast.Node{},
},
},
}, p)
}
// Otherwise handle like a unary operator.
e, eType, err := transpileToExpr(n.Children[0], p)
if err != nil {
return nil, "", err
}
if operator == token.NOT {
if eType == "bool" || eType == "_Bool" {
return &goast.UnaryExpr{
X: e,
Op: operator,
}, "bool", nil
}
p.AddImport("github.com/elliotchance/c2go/noarch")
t := types.ResolveType(p, eType)
functionName := fmt.Sprintf("noarch.Not%s", util.Ucfirst(t))
return &goast.CallExpr{
Fun: goast.NewIdent(functionName),
Args: []goast.Expr{e},
}, eType, nil
}
if operator == token.MUL {
if eType == "const char *" {
return &goast.IndexExpr{
X: e,
Index: &goast.BasicLit{
Kind: token.INT,
Value: "0",
},
}, "char", nil
}
t, err := types.GetDereferenceType(eType)
if err != nil {
return nil, "", err
}
return &goast.StarExpr{
X: e,
}, t, nil
}
if operator == token.AND {
eType += " *"
}
return &goast.UnaryExpr{
X: e,
Op: operator,
}, eType, nil
}
// transpileConditionalOperator transpiles a conditional (also known as a
// ternary) operator:
//
// a ? b : c
//
// We cannot simply convert these to an "if" statement becuase they by inside
// another expression.
//
// Since Go does not support the ternary operator or inline "if" statements we
// use a function, noarch.Ternary() to work the same way.
//
// It is also important to note that C only evaulates the "b" or "c" condition
// based on the result of "a" (from the above example). So we wrap the "b" and
// "c" in closures so that the Ternary function will only evaluate one of them.
func transpileConditionalOperator(n *ast.ConditionalOperator, p *program.Program) (*goast.CallExpr, string, error) {
a, _, err := transpileToExpr(n.Children[0], p)
if err != nil {
return nil, "", err
}
b, _, err := transpileToExpr(n.Children[1], p)
if err != nil {
return nil, "", err
}
c, _, err := transpileToExpr(n.Children[2], p)
if err != nil {
return nil, "", err
}
p.AddImport("github.com/elliotchance/c2go/noarch")
// The following code will generate the Go AST that will simulate a
// conditional (ternary) operator, in the form of:
//
// noarch.Ternary(
// $1,
// func () interface{} {
// return $2
// },
// func () interface{} {
// return $3
// },
// )
//
// $2 and $3 (the true and false condition respectively) must be wrapped in
// a closure so that they are not both executed.
return &goast.CallExpr{
Fun: goast.NewIdent("noarch.Ternary"),
Args: []goast.Expr{
a,
newTernaryWrapper(b),
newTernaryWrapper(c),
},
}, n.Type, nil
}
// newTernaryWrapper is a helper method used by transpileConditionalOperator().
// It will wrap an expression in a closure.
func newTernaryWrapper(e goast.Expr) *goast.FuncLit {
return &goast.FuncLit{
Type: &goast.FuncType{
Params: &goast.FieldList{},
Results: &goast.FieldList{
List: []*goast.Field{
&goast.Field{
Type: &goast.InterfaceType{
Methods: &goast.FieldList{},
},
},
},
},
},
Body: &goast.BlockStmt{
List: []goast.Stmt{
&goast.ReturnStmt{
Results: []goast.Expr{e},
},
},
},
}
}
// transpileParenExpr transpiles an expression that is wrapped in parentheses.
func transpileParenExpr(n *ast.ParenExpr, p *program.Program) (*goast.ParenExpr, string, error) {
e, eType, err := transpileToExpr(n.Children[0], p)
if err != nil {
return nil, "", err
}
return &goast.ParenExpr{
Lparen: token.NoPos,
X: e,
Rparen: token.NoPos,
}, eType, nil
}
func transpileCompoundAssignOperator(n *ast.CompoundAssignOperator, p *program.Program) (*goast.BinaryExpr, string, error) {
left, _, err := transpileToExpr(n.Children[0], p)
if err != nil {
return nil, "", err
}
right, _, err := transpileToExpr(n.Children[1], p)
if err != nil {
return nil, "", err
}
return &goast.BinaryExpr{
X: left,
Y: right,
Op: getTokenForOperator(n.Opcode),
}, "", nil
}
// getTokenForOperator returns the Go operator token for the provided C
// operator.
func getTokenForOperator(operator string) token.Token {
switch operator {
// Arithmetic
case "--":
return token.DEC
case "++":
return token.INC
case "+":
return token.ADD
case "-":
return token.SUB
case "*":
return token.MUL
case "/":
return token.QUO
case "%":
return token.REM
// Assignment
case "=":
return token.ASSIGN
case "+=":
return token.ADD_ASSIGN
case "-=":
return token.SUB_ASSIGN
case "*=":
return token.MUL_ASSIGN
case "/=":
return token.QUO_ASSIGN
case "%=":
return token.REM_ASSIGN
case "&=":
return token.AND_ASSIGN
case "|=":
return token.OR_ASSIGN
case "^=":
return token.XOR_ASSIGN
case "<<=":
return token.SHL_ASSIGN
case ">>=":
return token.SHR_ASSIGN
// Bitwise
case "&":
return token.AND
case "|":
return token.OR
case "~":
return token.XOR
case ">>":
return token.SHR
case "<<":
return token.SHL
// Comparison
case ">=":
return token.GEQ
case "<=":
return token.LEQ
case "<":
return token.LSS
case ">":
return token.GTR
case "!=":
return token.NEQ
case "==":
return token.EQL
// Logical
case "!":
return token.NOT
case "&&":
return token.LAND
case "||":
return token.LOR
}
panic(fmt.Sprintf("unknown operator: %s", operator))
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package appfile
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
commontypes "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/cue/process"
util2 "github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/common"
"github.com/oam-dev/kubevela/pkg/utils/util"
)
const (
// TerraformBaseLocation is the base directory to store all Terraform JSON files
TerraformBaseLocation = ".vela/terraform/"
// TerraformLog is the logfile name for terraform
TerraformLog = "terraform.log"
)
// ApplyTerraform deploys addon resources
func ApplyTerraform(app *v1beta1.Application, k8sClient client.Client, ioStream util.IOStreams, namespace string, args common.Args) ([]commontypes.ApplicationComponent, error) {
pd, err := args.GetPackageDiscover()
if err != nil {
return nil, err
}
// TODO(zzxwill) Need to check whether authentication credentials of a specific cloud provider are exported as environment variables, like `ALICLOUD_ACCESS_KEY`
var nativeVelaComponents []commontypes.ApplicationComponent
// parse template
appParser := appfile.NewApplicationParser(k8sClient, pd)
ctx := util2.SetNamespaceInCtx(context.Background(), namespace)
appFile, err := appParser.GenerateAppFile(ctx, app)
if err != nil {
return nil, fmt.Errorf("failed to parse appfile: %w", err)
}
if appFile == nil {
return nil, fmt.Errorf("failed to parse appfile")
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
for i, wl := range appFile.ParsedComponents {
switch wl.CapabilityCategory {
case types.TerraformCategory:
name := wl.Name
ioStream.Infof("\nApplying cloud resources %s\n", name)
tf, err := getTerraformJSONFiles(wl, appfile.GenerateContextDataFromAppFile(appFile, wl.Name))
if err != nil {
return nil, fmt.Errorf("failed to get Terraform JSON files from workload %s: %w", name, err)
}
tfJSONDir := filepath.Join(TerraformBaseLocation, name)
if _, err = os.Stat(tfJSONDir); err != nil && os.IsNotExist(err) {
if err = os.MkdirAll(tfJSONDir, 0750); err != nil {
return nil, fmt.Errorf("failed to create directory for %s: %w", tfJSONDir, err)
}
}
if err := os.WriteFile(filepath.Join(tfJSONDir, "main.tf.json"), tf, 0600); err != nil {
return nil, fmt.Errorf("failed to convert Terraform template: %w", err)
}
outputs, err := callTerraform(tfJSONDir)
if err != nil {
return nil, err
}
if err := os.Chdir(cwd); err != nil {
return nil, err
}
outputList := strings.Split(strings.ReplaceAll(string(outputs), " ", ""), "\n")
if outputList[len(outputList)-1] == "" {
outputList = outputList[:len(outputList)-1]
}
if err := generateSecretFromTerraformOutput(k8sClient, outputList, name, namespace); err != nil {
return nil, err
}
default:
nativeVelaComponents = append(nativeVelaComponents, app.Spec.Components[i])
}
}
return nativeVelaComponents, nil
}
func callTerraform(tfJSONDir string) ([]byte, error) {
if err := os.Chdir(tfJSONDir); err != nil {
return nil, err
}
var cmd *exec.Cmd
cmd = exec.Command("bash", "-c", "terraform init")
if err := common.RealtimePrintCommandOutput(cmd, TerraformLog); err != nil {
return nil, err
}
cmd = exec.Command("bash", "-c", "terraform apply --auto-approve")
if err := common.RealtimePrintCommandOutput(cmd, TerraformLog); err != nil {
return nil, err
}
// Get output from Terraform
cmd = exec.Command("bash", "-c", "terraform output")
outputs, err := cmd.Output()
if err != nil {
return nil, err
}
return outputs, nil
}
// generateSecretFromTerraformOutput generates secret from Terraform output
func generateSecretFromTerraformOutput(k8sClient client.Client, outputList []string, name, namespace string) error {
ctx := context.TODO()
err := k8sClient.Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})
if err == nil {
return fmt.Errorf("namespace %s doesn't exist", namespace)
}
var cmData = make(map[string]string, len(outputList))
for _, i := range outputList {
line := strings.Split(i, "=")
if len(line) != 2 {
return fmt.Errorf("terraform output isn't in the right format")
}
k := strings.TrimSpace(line[0])
v := strings.TrimSpace(line[1])
if k != "" && v != "" {
cmData[k] = v
}
}
objectKey := client.ObjectKey{
Namespace: namespace,
Name: name,
}
var secret v1.Secret
if err := k8sClient.Get(ctx, objectKey, &secret); err != nil && !errors.IsNotFound(err) {
return fmt.Errorf("retrieving the secret from cloud resource %s hit an issue: %w", name, err)
} else if err == nil {
if err := k8sClient.Delete(ctx, &secret); err != nil {
return fmt.Errorf("failed to store cloud resource %s output to secret: %w", name, err)
}
}
secret = v1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
StringData: cmData,
}
if err := k8sClient.Create(ctx, &secret); err != nil {
return fmt.Errorf("failed to store cloud resource %s output to secret: %w", name, err)
}
return nil
}
// getTerraformJSONFiles gets Terraform JSON files or modules from workload
func getTerraformJSONFiles(comp *appfile.Component, ctxData process.ContextData) ([]byte, error) {
pCtx, err := appfile.PrepareProcessContext(comp, ctxData)
if err != nil {
return nil, err
}
base, _ := pCtx.Output()
tf, err := base.Compile()
if err != nil {
return nil, err
}
return tf, nil
}
|
// Copyright 2016 by caixw, All rights reserved.
// Use of this source code is governed by a MIT
// license that can be found in the LICENSE file.
package syntax
import (
"testing"
"github.com/tanxiaolong/apidoc/types"
"github.com/issue9/assert"
)
// go1.9 BenchmarkDoc_Parse-4 50000 24044 ns/op
func BenchmarkDoc_Parse(b *testing.B) {
code := `
@api get /baseurl/api/login api summary
api description 1
api description 2
@apiGroup users
@apiQuery q1 int q1 summary
@apiQuery q2 int q2 summary
@apiParam p1 int p1 summary
@apiParam p2 int p2 summary
@apiSuccess 200 json
@apiHeader h1 v1
@apiHeader h2 v2
@apiParam p1 int optional p1 summary
@apiParam p2 int p2 summary
@apiExample json
{
p1:v1,
p2:v2
}
@apiExample xml
<root>
<p1>v1</p1>
<p2>v2</p2>
</root>
@apiError 200 json
@apiHeader h1 v1
@apiHeader h2 v2
`
d := types.NewDoc()
input := &Input{
Data: []rune(code),
}
for i := 0; i < b.N; i++ {
Parse(input, d)
}
}
// go1.9 BenchmarkTag_readWord-4 20000000 109 ns/op
func BenchmarkTag_readWord(b *testing.B) {
a := assert.New(b)
t := &tag{data: []rune("line1\n @delimiter line2 \n")}
a.NotNil(t)
for i := 0; i < b.N; i++ {
_ = t.readWord()
t.pos = 0
}
}
// go1.9 BenchmarkTag_readLine-4 20000000 93.6 ns/op
func BenchmarkTag_readLine(b *testing.B) {
a := assert.New(b)
t := &tag{data: []rune("line1\n @delimiter line2 \n")}
a.NotNil(t)
for i := 0; i < b.N; i++ {
_ = t.readLine()
t.pos = 0
}
}
// go1.9 BenchmarkTag_readEnd-4 10000000 172 ns/op
func BenchmarkTag_readEnd(b *testing.B) {
a := assert.New(b)
t := &tag{data: []rune("line1\n line2 \n")}
a.NotNil(t)
for i := 0; i < b.N; i++ {
_ = t.readEnd()
t.pos = 0
}
}
// go1.9 BenchmarkNewLexer-4 1000000000 2.71 ns/op
func BenchmarkNewLexer(b *testing.B) {
input := &Input{
Data: []rune("line"),
}
for i := 0; i < b.N; i++ {
l := newLexer(input)
if l.atEOF() {
}
}
}
|
package main
import (
"github.com/ActiveState/log"
"github.com/ActiveState/logyard-apps/sieve"
"github.com/ActiveState/stackato-go/server"
)
type Config struct {
Events map[string]map[string]sieve.EventParserSpec `json:"events"`
}
var c *server.Config
func getConfig() *Config {
return c.GetConfig().(*Config)
}
func LoadConfig() {
var err error
c, err = server.NewConfig("logyard_sieve", Config{})
if err != nil {
log.Fatalf("Unable to load logyard_sieve config; %v", err)
}
log.Info(getConfig().Events)
}
|
package API
import (
"Work_5/Service"
"Work_5/object"
"github.com/gin-gonic/gin"
"net/http"
)
func Comment(ctx *gin.Context) {
//绑定user,article,comment的参数
var user object.User
var article object.Article
var comment object.Comment
ctx.ShouldBind(&user)
ctx.ShouldBind(&article)
ctx.ShouldBind(&comment)
//调用评论文章的Service方法
err := Service.CommentArticle(&user, &article, &comment)
//根据调用结果返回
if err.IsErr {
ctx.JSON(http.StatusOK, err)
} else {
ctx.JSON(http.StatusOK, comment)
}
}
|
package main
import "fmt"
func main() {
bool
benar = true
bool
salah = false
if benar {
fmt.Println("benar", true)
}
}
|
package utils
// TODO this struct only exists because go sucks so hard it can not
// resolve cyclic dependencies. Therefore operations in the models
// package can not call anything from the database package (because
// database is also importing models, obviously).
// Why on gods green earth is go not being able to resolve cyclic deps??
// Furthermore, it also "dirties" the API here, because I am not even
// allowed to send a WorkItem instance to the methods here (because that
// would again require importing the models package, creating a cyclic dep).
// My head explodes.
import (
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
// DatabaseMetaService is the central registry for meta queries.
var DatabaseMetaService *DatabaseMeta
// DatabaseMeta provides all meta access to the database.
type DatabaseMeta struct {
database *mgo.Database
}
// NewDatabaseMeta creates a new storage backend for WorkItems.
func NewDatabaseMeta(database *mgo.Database) *DatabaseMeta {
DatabaseMetaService = &DatabaseMeta{database: database}
return DatabaseMetaService
}
// HasChildren returns true when the work item identified with the ID has children.
func (d *DatabaseMeta) HasChildren(collectionName string, workItemID bson.ObjectId) (bool, error) {
coll := d.database.C(collectionName)
count, err := coll.Find(bson.M{"parent_workitem_id": workItemID}).Count()
if err != nil {
ErrorLog.Printf("Error while retrieving child count from database: %s", err.Error())
return false, err
}
DebugLog.Printf("Retrieved WorkItem child count from database for parent WorkItem %s.", workItemID)
return (count>0), nil
}
// getIterationMeta returns iteration meta data.
func (d *DatabaseMeta) GetIterationMeta(collectionName string, iterationID bson.ObjectId) (int, int, error) {
coll := d.database.C(collectionName)
countAll, err := coll.Find(bson.M{"iteration_id": iterationID}).Count()
// TODO this uses a fixed "closed" state, that may change in the future.
countClosed, err := coll.Find(bson.M{"iteration_id": iterationID, "attributes.system$state": "closed"}).Count()
if err != nil {
ErrorLog.Printf("Error while retrieving Iteration/WorkItem meta counts from database: %s", err.Error())
return -1, -1, err
}
DebugLog.Printf("Retrieved Iteration/WorkItem meta counts from database for Iteration %s.", iterationID)
return countAll, countClosed, nil
}
|
package datastore
import "fmt"
// DeleteAll deletes all records from a table.
func DeleteAll(table string) (err error) {
q := fmt.Sprintf("delete from %s", table)
_, err = Db.ExecContext(ctx, q)
if err != nil {
return
}
return
}
|
// Copyright (c) 2020 Blockwatch Data Inc.
// Author: alex@blockwatch.cc
package index
// import logpkg "github.com/echa/log"
//
// var log logpkg.Logger = logpkg.Log
//
// func init() {
// DisableLog()
// }
//
// func DisableLog() {
// log = logpkg.Disabled
// }
//
// func UseLogger(logger logpkg.Logger) {
// log = logger
// }
|
package main
type serveHttpTmplModel struct {
StructName string
Handlers []handlerTmplModel
}
type handlerTmplModel struct {
HandlerName string
ReceiverType string
URL string
Method string
IsProtected bool
}
type minMaxIntTmplModel struct {
FieldName string
MinValue string
MaxValue string
}
type enumTmplModel struct {
FieldName string
Enum []string
}
type createObjModel struct {
StructName string
Fields []Field
}
type ApiValidatorTags struct {
Required bool
ParamName string
Min string
Max string
DefaultString string
DefaultInt string
Enum []string
}
type Fields struct {
Fields []Field
}
type Field struct {
Name string
Type string
Tag string
}
type ApigenComment struct {
URL string `json:"url"`
Auth bool `json:"auth"`
Method string `json:"method"`
}
|
// Copyright 2018 Diego Bernardes. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flare
import (
"fmt"
"net/http"
"github.com/go-chi/chi"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
document "github.com/diegobernardes/flare/domain/document/http"
resource "github.com/diegobernardes/flare/domain/resource/http"
subscription "github.com/diegobernardes/flare/domain/subscription/http"
"github.com/diegobernardes/flare/infra/config"
infraHTTP "github.com/diegobernardes/flare/infra/http"
repositoryHook "github.com/diegobernardes/flare/provider/hook/repository"
)
type domain struct {
resource *resource.Handler
subscription *subscription.Handler
document *document.Handler
logger log.Logger
repository *repository
worker *worker
cfg *config.Client
hook *hook
}
func (d *domain) init() error {
r, err := d.initResource()
if err != nil {
return err
}
d.resource = r
s, err := d.initSubscription()
if err != nil {
return err
}
d.subscription = s
doc, err := d.initDocument()
if err != nil {
return err
}
d.document = doc
return nil
}
func (d *domain) initResource() (*resource.Handler, error) {
writer, err := infraHTTP.NewWriter(d.logger)
if err != nil {
return nil, errors.Wrap(err, "error during http.Writer initialization")
}
repository := &repositoryHook.Resource{
Repository: d.repository.base.Resource(),
Hook: d.hook.resource,
}
if err = repository.Init(); err != nil {
return nil, errors.Wrap(err, "error during initialize resource hook repository")
}
handler, err := resource.NewHandler(
resource.HandlerGetResourceID(func(r *http.Request) string { return chi.URLParam(r, "id") }),
resource.HandlerGetResourceURI(func(id string) string {
return fmt.Sprintf("/resources/%s", id)
}),
resource.HandlerParsePagination(
infraHTTP.ParsePagination(d.cfg.GetInt("domain.pagination.default-limit")),
),
resource.HandlerWriter(writer),
resource.HandlerRepository(repository),
)
if err != nil {
return nil, errors.Wrap(err, "error during resource.Handler initialization")
}
return handler, nil
}
func (d *domain) initSubscription() (*subscription.Handler, error) {
writer, err := infraHTTP.NewWriter(d.logger)
if err != nil {
return nil, errors.Wrap(err, "error during http.Writer initialization")
}
subscriptionService, err := subscription.NewHandler(
subscription.HandlerParsePagination(
infraHTTP.ParsePagination(d.cfg.GetInt("domain.pagination.default-limit")),
),
subscription.HandlerWriter(writer),
subscription.HandlerGetResourceID(func(r *http.Request) string {
return chi.URLParam(r, "resourceID")
}),
subscription.HandlerGetSubscriptionID(func(r *http.Request) string {
return chi.URLParam(r, "id")
}),
subscription.HandlerGetSubscriptionURI(func(resourceId, id string) string {
return fmt.Sprintf("/resources/%s/subscriptions/%s", resourceId, id)
}),
subscription.HandlerResourceRepository(d.repository.base.Resource()),
subscription.HandlerSubscriptionRepository(d.repository.base.Subscription()),
)
if err != nil {
return nil, errors.Wrap(err, "error during subscription.Handler initialization")
}
return subscriptionService, nil
}
func (d *domain) initDocument() (*document.Handler, error) {
writer, err := infraHTTP.NewWriter(d.logger)
if err != nil {
return nil, errors.Wrap(err, "error during http.Writer initialization")
}
documentHandler, err := document.NewHandler(
document.HandlerDocumentRepository(d.repository.base.Document()),
document.HandlerSubscriptionRepository(d.repository.base.Subscription()),
document.HandlerResourceRepository(d.repository.base.Resource()),
document.HandlerGetDocumentID(func(r *http.Request) string { return chi.URLParam(r, "*") }),
document.HandlerSubscriptionTrigger(d.worker.subscriptionPartition),
document.HandlerWriter(writer),
)
if err != nil {
return nil, errors.Wrap(err, "error during document.Handler initialization")
}
return documentHandler, nil
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"net"
"net/http"
"os"
"path/filepath"
"github.com/Sirupsen/logrus"
"github.com/Stratoscale/logserver/cache"
"github.com/Stratoscale/logserver/debug"
"github.com/Stratoscale/logserver/download"
"github.com/Stratoscale/logserver/dynamic"
"github.com/Stratoscale/logserver/engine"
"github.com/Stratoscale/logserver/parse"
"github.com/Stratoscale/logserver/route"
"github.com/Stratoscale/logserver/source"
"github.com/bakins/logrus-middleware"
"github.com/gorilla/mux"
)
var log = logrus.WithField("pkg", "main")
const (
defaultConfig = "logserver.json"
defaultAddr = "localhost:8888"
)
var options struct {
addr string
config string
debug bool
dynamic bool
}
func init() {
flag.StringVar(&options.addr, "addr", defaultAddr, "Serving address")
flag.StringVar(&options.config, "config", defaultConfig, "Path to a config file")
flag.BoolVar(&options.debug, "debug", false, "Show debug logs")
flag.BoolVar(&options.dynamic, "dynamic", false, "Run in dynamic mode")
}
type config struct {
Global engine.Config `json:"global"`
Sources []source.Config `json:"sources"`
Parsers []parse.Config `json:"parsers"`
Dynamic dynamic.Config `json:"dynamic"`
Cache cache.Config `json:"cache"`
Route route.Config `json:"route"`
}
func (c config) journal() string {
if name := c.Dynamic.OpenJournal; name != "" {
return name
}
for _, src := range c.Sources {
if name := src.OpenJournal; name != "" {
return name
}
}
return ""
}
func main() {
flag.Parse()
// apply debug logs
if options.debug {
logrus.SetLevel(logrus.DebugLevel)
}
// validate address
_, _, err := net.SplitHostPort(options.addr)
failOnErr(err, "Bad address value: %s", options.addr)
cfg := loadConfig(options.config)
log.Infof("Loading parsers...")
parser, err := parse.New(cfg.Parsers)
failOnErr(err, "Creating parsers")
// add journal parser if necessary
if journalName := cfg.journal(); journalName != "" {
log.Infof("Adding a journalctl parser")
err := parser.AppendJournal(journalName)
if err != nil {
log.WithError(err).Warn("Failed adding a journalctl parser")
}
}
log.Printf("Loaded with %d parsers", len(parser))
cache := cache.New(cfg.Cache)
r := mux.NewRouter()
route.Static(r)
if !options.dynamic {
s, err := source.New(cfg.Sources, cache)
failOnErr(err, "Creating config")
defer s.CloseSources()
dl := download.New(filepath.Join(cfg.Route.RootPath, "_dl"), s, cache)
eng := engine.New(cfg.Global, s, parser, cache)
// put websocket handler behind the root and behind the proxy path
// it must be before the redirect handlers because it is on the proxy path
route.Engine(r, "/", eng)
route.Download(r, "/", dl)
if cfg.Route.RootPath != "" && cfg.Route.RootPath != "/" {
route.Engine(r, cfg.Route.RootPath, eng)
route.Download(r, cfg.Route.RootPath, dl)
}
// add redirect of request that are sent to a proxy path with the same URL without the proxy prefix
route.Redirect(r, cfg.Route)
// handle with index on any route that does not match anything else
failOnErr(route.Index(r, "/", cfg.Route), "Creating index")
} else {
var err error
h, err := dynamic.New(cfg.Dynamic, cfg.Global, parser, cache)
failOnErr(err, "Creating dynamic handler")
logMW := logrusmiddleware.Middleware{Logger: log.Logger}
h = logMW.Handler(h, "")
r.PathPrefix("/").Handler(h)
}
// add debug handlers
if options.debug {
debug.PProfHandle(r)
}
log.Infof("Serving on http://%s", options.addr)
err = http.ListenAndServe(options.addr, r)
failOnErr(err, "Serving")
}
func loadConfig(fileName string) config {
f, err := os.Open(fileName)
failOnErr(err, fmt.Sprintf("open file %s", fileName))
defer f.Close()
var cfg config
err = json.NewDecoder(f).Decode(&cfg)
failOnErr(err, "Decode config file")
return cfg
}
func failOnErr(err error, msg string, args ...interface{}) {
if err == nil {
return
}
log.Fatalf("%s: %s", fmt.Sprintf(msg, args...), err)
}
|
package main
import "fmt"
func main() {
var i [5]int = [5]int{1, 2, 3, 4, 5}
var slicedI []int = i[:]
fmt.Println(slicedI)
}
|
package sweetiebot
import "fmt"
import "time"
type Logger interface {
Log(args ...interface{})
LogError(msg string, err error)
Error(message string, channelID string)
}
type Log struct {
lasterr int64
info *GuildInfo
}
func (l *Log) Log(args ...interface{}) {
s := fmt.Sprint(args...)
fmt.Printf("[%s] %s\n", time.Now().Format(time.Stamp), s)
if sb.db != nil && l.info != nil && sb.IsMainGuild(l.info) && sb.db.status.get() {
sb.db.Audit(AUDIT_TYPE_LOG, nil, s, SBatoi(l.info.ID))
}
if l.info != nil && l.info.config.Log.Channel > 0 {
l.info.SendMessage(SBitoa(l.info.config.Log.Channel), "```\n"+s+"```")
}
}
func (l *Log) LogError(msg string, err error) {
if err != nil {
l.Log(msg, err.Error())
}
}
func (l *Log) Error(channelID string, message string) {
if l.info != nil && RateLimit(&l.lasterr, l.info.config.Log.Cooldown) { // Don't print more than one error message every n seconds.
l.info.SendMessage(channelID, "```\n"+message+"```")
}
//l.Log(message); // Always log it to the debug log. TODO: This is really annoying, maybe we shouldn't do this
}
|
package peach
const (
//PeachDriverApollo
PeachDriverApollo = "apollo"
)
|
package redis_api
import (
// "os"
//"net/http"
)
var connections redisConnections
var handler *Message
func init() {
connections := make(redisConnections)
for _,conf := range RedisHosts {
hval := conf.GetHval()
connections[hval] = GetRedis(hval)
conn := connections[hval]
if conn.Err == nil {
conn.initKeys()
}
}
//root := os.Getenv("GOPATH")+"/src/github.com/go-libraries/redis-api"
handler = &Message{
//Url:"http://127.0.0.1:10003",
//Root:root,
//FileHandler:http.FileServer(http.Dir(root+"/resources/app")),
}
}
|
package main
import (
"fmt"
)
func main() {
var t string = "我是测试字符串"
var r string
r = Reverse(t)
fmt.Println("我是测试字符串")
fmt.Println(r)
}
func Reverse(s string) string{
var str []rune = []rune(s)
fmt.Println(str)
for i, j := 0, len(str) -1; i < j; i ,j = i +1, j - 1 {
str[i], str[j] = str[j], str[i]
}
fmt.Println(str)
return string(str)
}
|
/*
Copyright 2020 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package testlog provides custom loggers for use in tests.
package testlog
import (
"fmt"
"os"
"testing"
"github.com/gravitational/teleport/lib/utils"
"github.com/sirupsen/logrus"
)
// FailureOnly returns a logger that only prints the logs to STDERR when the
// test fails.
func FailureOnly(t *testing.T) *logrus.Entry {
// Collect all the output in buf.
buf := utils.NewSyncBuffer()
log := logrus.New()
log.Out = buf
// Register a cleanup callback which prints buf iff t has failed.
t.Cleanup(func() {
if !t.Failed() {
return
}
fmt.Fprintln(os.Stderr, buf.String())
})
return logrus.NewEntry(log)
}
|
package main
import "fmt"
type ListNode struct {
Val int
Next *ListNode
}
func main() {
head := ListNode{4, &ListNode{5, &ListNode{6, &ListNode{7, nil}}}}
//printList(&head)
printList(rotateRight(&head, 8))
}
func printList(l *ListNode) {
for l != nil {
fmt.Printf("%+v %p\n", l, l)
l = l.Next
}
}
func rotateRight(head *ListNode, k int) *ListNode {
if head == nil || head.Next == nil || k == 0 {
return head
}
cur := head
n := 1
for cur.Next != nil {
n++
cur = cur.Next
}
cur.Next = head
mod := k % n
if mod == 0 {
cur.Next = nil
return head
}
step := n - mod
for step > 0 {
cur = cur.Next
step--
}
newHead := cur.Next
cur.Next = nil
return newHead
}
|
package table_column
import (
tableColumn "yj-app/app/model/tool/table_column"
"yj-app/app/yjgframe/utils/convert"
)
//新增业务字段
func Insert(entity *tableColumn.Entity) (int64, error) {
_, err := entity.Insert()
if err != nil {
return 0, err
}
return entity.ColumnId, err
}
//修改业务字段
func Update(entity *tableColumn.Entity) (int64, error) {
return entity.Update()
}
//根据主键查询数据
func SelectRecordById(id int64) (*tableColumn.Entity, error) {
entity := &tableColumn.Entity{ColumnId: id}
_, err := entity.FindOne()
return entity, err
}
//根据主键删除数据
func DeleteRecordById(id int64) bool {
rs, err := (&tableColumn.Entity{ColumnId: id}).Delete()
if err == nil && rs > 0 {
return true
}
return false
}
//批量删除数据记录
func DeleteRecordByIds(ids string) int64 {
idarr := convert.ToInt64Array(ids, ",")
result, err := tableColumn.DeleteBatch(idarr...)
if err != nil {
return 0
}
return result
}
//查询业务字段列表
func SelectGenTableColumnListByTableId(tableId int64) (*[]tableColumn.Entity, error) {
return tableColumn.SelectGenTableColumnListByTableId(tableId)
}
//根据表名称查询列信息
func SelectDbTableColumnsByName(tableName string) (*[]tableColumn.Entity, error) {
return tableColumn.SelectDbTableColumnsByName(tableName)
}
|
package producer
import (
"reflect"
"testing"
)
func getFieldString(obj interface{}, field string) string {
v := reflect.Indirect(reflect.ValueOf(obj))
return v.FieldByNameFunc(func(n string) bool {
return n == field
}).String()
}
func TestWithUnitName(t *testing.T) {
opt := defaultProducerOptions()
unitName := "unsh"
WithUnitName(unitName)(&opt)
if opt.UnitName != unitName {
t.Errorf("producer option WithUnitName. want:%s, got=%s", unitName, opt.UnitName)
}
}
func TestWithNameServerDomain(t *testing.T) {
opt := defaultProducerOptions()
nameServerAddr := "http://127.0.0.1:8080/nameserver/addr"
WithNameServerDomain(nameServerAddr)(&opt)
domainStr := getFieldString(opt.Resolver, "domain")
if domainStr != nameServerAddr {
t.Errorf("producer option WithUnitName. want:%s, got=%s", nameServerAddr, domainStr)
}
}
func TestWithNameServerDomainAndUnitName(t *testing.T) {
unitName := "unsh"
// test with two different orders
t.Run("WithNameServerDomain & WithUnitName", func(t *testing.T) {
addr := "http://127.0.0.1:8080/nameserver/addr"
opt := defaultProducerOptions()
WithNameServerDomain(addr)(&opt)
WithUnitName(unitName)(&opt)
domainStr := getFieldString(opt.Resolver, "domain")
expectedAddr := "http://127.0.0.1:8080/nameserver/addr-unsh?nofix=1"
if domainStr != expectedAddr {
t.Errorf("producer option WithNameServerDomain & WithUnitName. want:%s, got=%s", expectedAddr, domainStr)
}
})
t.Run("WithUnitName & WithNameServerDomain", func(t *testing.T) {
addr := "http://127.0.0.1:8080/nameserver/addr"
opt := defaultProducerOptions()
WithUnitName(unitName)(&opt)
WithNameServerDomain(addr)(&opt)
domainStr := getFieldString(opt.Resolver, "domain")
expectedAddr := "http://127.0.0.1:8080/nameserver/addr-unsh?nofix=1"
if domainStr != expectedAddr {
t.Errorf("producer option WithUnitName & WithNameServerDomain. want:%s, got=%s", expectedAddr, domainStr)
}
})
// test with two different orders - name server with query string
t.Run("WithNameServerDomain & WithUnitName", func(t *testing.T) {
addr := "http://127.0.0.1:8080/nameserver/addr?labels=abc"
opt := defaultProducerOptions()
WithNameServerDomain(addr)(&opt)
WithUnitName(unitName)(&opt)
domainStr := getFieldString(opt.Resolver, "domain")
expectedAddr := "http://127.0.0.1:8080/nameserver/addr-unsh?nofix=1&labels=abc"
if domainStr != expectedAddr {
t.Errorf("producer option WithNameServerDomain & WithUnitName. want:%s, got=%s", expectedAddr, domainStr)
}
})
t.Run("WithUnitName & WithNameServerDomain", func(t *testing.T) {
addr := "http://127.0.0.1:8080/nameserver/addr?labels=abc"
opt := defaultProducerOptions()
WithUnitName(unitName)(&opt)
WithNameServerDomain(addr)(&opt)
domainStr := getFieldString(opt.Resolver, "domain")
expectedAddr := "http://127.0.0.1:8080/nameserver/addr-unsh?nofix=1&labels=abc"
if domainStr != expectedAddr {
t.Errorf("producer option WithUnitName & WithNameServerDomain. want:%s, got=%s", expectedAddr, domainStr)
}
})
}
|
package operators
import (
"context"
"reflect"
"github.com/go-logr/logr"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
operatorsv2 "github.com/operator-framework/api/pkg/operators/v2"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil"
"github.com/operator-framework/operator-lifecycle-manager/pkg/metrics"
)
// OperatorConditionGeneratorReconciler reconciles a ClusterServiceVersion object and creates an OperatorCondition.
type OperatorConditionGeneratorReconciler struct {
Client client.Client
log logr.Logger
}
// +kubebuilder:rbac:groups=operators.coreos.com,resources=operatorconditions,verbs=get;list;update;patch;delete
// +kubebuilder:rbac:groups=operators.coreos.com,resources=operatorconditions/status,verbs=update;patch
// SetupWithManager adds the OperatorCondition Reconciler reconciler to the given controller manager.
func (r *OperatorConditionGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) error {
handler := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &operatorsv1alpha1.ClusterServiceVersion{}, handler.OnlyControllerOwner())
p := predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
if _, ok := e.Object.GetLabels()[operatorsv1alpha1.CopiedLabelKey]; ok {
return false
}
return true
},
DeleteFunc: func(e event.DeleteEvent) bool {
if _, ok := e.Object.GetLabels()[operatorsv1alpha1.CopiedLabelKey]; ok {
return false
}
return true
},
UpdateFunc: func(e event.UpdateEvent) bool {
if _, ok := e.ObjectOld.GetLabels()[operatorsv1alpha1.CopiedLabelKey]; ok {
return false
}
return true
},
GenericFunc: func(e event.GenericEvent) bool {
if _, ok := e.Object.GetLabels()[operatorsv1alpha1.CopiedLabelKey]; ok {
return false
}
return true
},
}
return ctrl.NewControllerManagedBy(mgr).
For(&operatorsv1alpha1.ClusterServiceVersion{}, builder.WithPredicates(p)).
Watches(&operatorsv2.OperatorCondition{}, handler).
Complete(r)
}
// NewOperatorConditionGeneratorReconciler constructs and returns an OperatorConditionGeneratorReconciler.
// As a side effect, the given scheme has operator discovery types added to it.
func NewOperatorConditionGeneratorReconciler(cli client.Client, log logr.Logger, scheme *runtime.Scheme) (*OperatorConditionGeneratorReconciler, error) {
// Add watched types to scheme.
if err := AddToScheme(scheme); err != nil {
return nil, err
}
return &OperatorConditionGeneratorReconciler{
Client: cli,
log: log,
}, nil
}
// Implement reconcile.Reconciler so the controller can reconcile objects
var _ reconcile.Reconciler = &OperatorConditionGeneratorReconciler{}
func (r *OperatorConditionGeneratorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
// Set up a convenient log object so we don't have to type request over and over again
log := r.log.WithValues("request", req).V(1)
metrics.EmitOperatorConditionGeneratorReconcile(req.Namespace, req.Name)
in := &operatorsv1alpha1.ClusterServiceVersion{}
if err := r.Client.Get(ctx, req.NamespacedName, in); err != nil {
log.Info("Unable to find ClusterServiceVersion")
return ctrl.Result{}, client.IgnoreNotFound(err)
}
operatorCondition := &operatorsv2.OperatorCondition{
ObjectMeta: metav1.ObjectMeta{
// For now, only generate an OperatorCondition with the same name as the csv.
Name: in.GetName(),
Namespace: in.GetNamespace(),
},
Spec: operatorsv2.OperatorConditionSpec{
ServiceAccounts: getServiceAccountNames(*in),
Deployments: getDeploymentNames(*in),
},
}
ownerutil.AddOwner(operatorCondition, in, false, true)
if err := r.ensureOperatorCondition(*operatorCondition); err != nil {
log.Info("Error ensuring OperatorCondition")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func getServiceAccountNames(csv operatorsv1alpha1.ClusterServiceVersion) []string {
result := []string{}
for _, clusterPermissions := range csv.Spec.InstallStrategy.StrategySpec.ClusterPermissions {
if clusterPermissions.ServiceAccountName != "" {
result = append(result, clusterPermissions.ServiceAccountName)
}
}
for _, permissions := range csv.Spec.InstallStrategy.StrategySpec.Permissions {
if permissions.ServiceAccountName != "" {
result = append(result, permissions.ServiceAccountName)
}
}
if len(result) == 0 {
result = []string{"default"}
}
return result
}
func getDeploymentNames(csv operatorsv1alpha1.ClusterServiceVersion) []string {
result := []string{}
for _, deploymentSpec := range csv.Spec.InstallStrategy.StrategySpec.DeploymentSpecs {
if deploymentSpec.Name != "" {
result = append(result, deploymentSpec.Name)
}
}
return result
}
func (r *OperatorConditionGeneratorReconciler) ensureOperatorCondition(operatorCondition operatorsv2.OperatorCondition) error {
existingOperatorCondition := &operatorsv2.OperatorCondition{}
err := r.Client.Get(context.TODO(), client.ObjectKey{Name: operatorCondition.GetName(), Namespace: operatorCondition.GetNamespace()}, existingOperatorCondition)
if err != nil {
if !apierrors.IsNotFound(err) {
return err
}
return r.Client.Create(context.TODO(), &operatorCondition)
}
if reflect.DeepEqual(operatorCondition.OwnerReferences, existingOperatorCondition.OwnerReferences) &&
reflect.DeepEqual(operatorCondition.Spec.Deployments, existingOperatorCondition.Spec.Deployments) &&
reflect.DeepEqual(operatorCondition.Spec.ServiceAccounts, existingOperatorCondition.Spec.ServiceAccounts) {
r.log.V(5).Info("Existing OperatorCondition does not need to be updated")
return nil
}
r.log.V(5).Info("Existing OperatorCondition needs to be updated")
existingOperatorCondition.OwnerReferences = operatorCondition.OwnerReferences
existingOperatorCondition.Spec.Deployments = operatorCondition.Spec.Deployments
existingOperatorCondition.Spec.ServiceAccounts = operatorCondition.Spec.ServiceAccounts
return r.Client.Update(context.TODO(), existingOperatorCondition)
}
|
package main
import "fmt"
func abs(a int) int {
if a > 0 {
return a
}
return -a
}
func findDisappearedNumbers(nums []int) []int {
var ret []int
for _, v := range nums {
if nums[abs(v)-1] >= 0 {
nums[abs(v)-1] = -nums[abs(v)-1]
}
}
for i, v := range nums {
if v>0 {
ret = append(ret, i+1)
}
}
return ret
}
func main() {
fmt.Println(findDisappearedNumbers([]int{4, 3, 2, 7, 8, 2, 3, 1}))
}
|
/*
Copyright 2017 Eliott Teissonniere
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package repo
import (
"encoding/json"
)
type About struct {
Pseudo string
Image string
ETHAddress string
Description string
}
func (a *About) Export() ([]byte, error) {
return json.Marshal(a)
}
func Import(b []byte) (About, error) {
var a About
err := json.Unmarshal(b, &a)
return a, err
}
|
package main
import "fmt"
func main() {
arrayDeclaration()
//arrayisValueType()
//arrayIteration()
//arraySlicing()
//variadicArrays()
//arrayBoundCheck([5]int{1, 2, 3, 4, 5}, 4)
//multiDimensionArray()
}
func arrayDeclaration() {
var x [5]int // An array of 5 integers with var declarationn
x[0] = 100
x[1] = 101
x[3] = 103
x[4] = 105
fmt.Printf("x[0] = %d, x[1] = %d, x[2] = %d\n", x[0], x[1], x[2])
fmt.Println("x = ", x)
x = [5]int{2, 4, 6, 8, 10} // short declaration with fixed length { array literal}
//infer the length of the array
x1 := [...]int{3, 5, 7, 9, 11, 13, 17}
fmt.Println("x1 = ", x1)
// array is comparable if length and type is same
x2 := [...]int{9, 89, 7, 9, 11, 13, 9}
fmt.Println(x1 == x2)
c := new([10]int) // pointer to array
(*c)[0] = 1
c[0] = 1
fmt.Println(c[0])
}
func arrayisValueType() {
a1 := [5]string{"English", "Japanese", "Spanish", "French", "Hindi"}
a2 := a1 // A copy of the array `a1` is assigned to `a2`
a2[1] = "German"
fmt.Println("a1 = ", a1) // The array `a1` remains unchanged
fmt.Println("a2 = ", a2)
}
func arrayIteration() {
a := [4]float64{3.5, 7.2, 4.8, 9.5}
sum := float64(0)
for i := 0; i < len(a); i++ {
sum = sum + a[i]
}
fmt.Printf("Sum of all the elements in array %v = %f\n", a, sum)
daysOfWeek := [7]string{"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"}
for index, value := range daysOfWeek {
fmt.Printf("Day %d of week = %s\n", index, value)
}
}
func arraySlicing() {
daysOfWeek := [7]string{"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"}
fmt.Printf("%T, %v, %p \n", daysOfWeek, daysOfWeek, &daysOfWeek)
sliceDays1 := daysOfWeek[2:] // all from 2
sliceDays2 := daysOfWeek[:3] // 0 to 3 => 3-0 = 3 items
sliceDays3 := daysOfWeek[2:3] // 3-2 => 1 item
sliceDays4 := daysOfWeek[:] // complete copy to slice
sliceDays5 := daysOfWeek[0:1:4] //set the capacity of slice
fmt.Printf("%T, %v,%d, %p\n", sliceDays1, sliceDays1, cap(sliceDays1), sliceDays1)
fmt.Printf("%T, %v,%d, %p\n", sliceDays2, sliceDays2, cap(sliceDays2), sliceDays2)
fmt.Printf("%T, %v,%d, %p\n", sliceDays3, sliceDays3, cap(sliceDays3), sliceDays3)
fmt.Printf("%T, %v,%d, %p\n", sliceDays4, sliceDays4, cap(sliceDays4), sliceDays4)
fmt.Printf("%T, %v,%d, %p\n", sliceDays5, sliceDays5, cap(sliceDays5), sliceDays5)
//fmt.Println(daysOfWeek[0:3:])
//var cArray [3]string
//cArray = daysOfWeek[:3]
}
func variadicArrays(i ...int) {
fmt.Println(i)
}
func callVariadicArrays() {
b := []int{1, 2, 3, 4}
variadicArrays(b...)
}
func multiDimensionArray() {
/* an array with 5 rows and 2 columns*/
var a = [5][2]int{{0, 0}, {1, 2}, {2, 4}, {3, 6}, {4, 8}}
var i, j int
/* output each array element's value */
for i = 0; i < 5; i++ {
for j = 0; j < 2; j++ {
fmt.Printf("a[%d][%d] = %d\n", i, j, a[i][j])
}
}
}
//go build -gcflags="-d=ssa/check_bce/debug=1" example1.go
//compiler optimization called Bounds Check Elimination or BCE.
// The idea behind BCE is to give the compiler hints that index-based memory access is guaranteed to be safe and therefore
// the compiler didn’t have to add extra code to check the memory access at runtime. The safe elimination of these integrity checks can help improve performance
func arrayBoundCheck(s [5]int, i int) {
_ = s[1] // bounds check
_ = s[i] // bounds check
_ = s[1] // bounds check eliminated!
_ = s[0] // bounds check eliminated!
}
|
package 排列组合问题
// 回溯 + 外部变量 实现全排列
var permuteSequence [][]int
func permuteUnique(nums []int) [][]int {
permuteSequence = make([][]int, 0)
permuteUniqueExec(nums, 0, len(nums)-1)
return permuteSequence
}
func permuteUniqueExec(nums []int, l int, r int) {
if l == r {
// 一定要加入深拷贝nums,否则之后对nums的修改会影响permuteSequence内部的切片
permuteSequence = append(permuteSequence, newSlice(nums))
return
}
isVisited := make(map[int]bool)
for i := l; i <= r; i++ {
// 因为nums中有重复数字,所以要标识数字是否已被使用
if isVisited[nums[i]] == true {
continue
}
isVisited[nums[i]] = true
nums[i], nums[l] = nums[l], nums[i]
permuteUniqueExec(nums, l+1, r)
// 由于切片是引用传递(浅拷贝),所以这里必须再交换回来,这样才能保证不会影响上层。
// 如果每次传给下层的切片都是新切片(深拷贝),那么就不需要交换,因为这样不会影响上层。
nums[i], nums[l] = nums[l], nums[i]
}
}
// 深拷贝
func newSlice(oldSlice []int) []int {
slice := make([]int, len(oldSlice))
copy(slice, oldSlice)
return slice
}
/*
题目链接: https://leetcode-cn.com/problems/permutations-ii/submissions/
*/
/*
总结
1. 这题目的本质是: 给定一个数组,求数组的全排列。 (数组可以有重复元素)
*/
|
package ionic
import (
"bytes"
"encoding/json"
"fmt"
"net/url"
"strings"
"github.com/ion-channel/ionic/pagination"
"github.com/ion-channel/ionic/teams"
)
// CreateTeamOptions represents all the values that can be provided for a team
// at the time of creation
type CreateTeamOptions struct {
Name string `json:"name"`
OrganizationID string `json:"organization_id"`
POCName string `json:"poc_name"`
POCEmail string `json:"poc_email"`
}
// CreateTeam takes a create team options, validates the minimum info is
// present, and makes the calls to create the team. It returns the team created
// and any errors it encounters with the API.
func (ic *IonClient) CreateTeam(opts CreateTeamOptions, token string) (*teams.Team, error) {
//no empty or whitespace-only names
if len(strings.TrimSpace(opts.Name)) == 0 {
return nil, fmt.Errorf("name cannot be empty or whitespace")
}
b, err := json.Marshal(opts)
if err != nil {
return nil, fmt.Errorf("failed to marshal request body: %v", err.Error())
}
buff := bytes.NewBuffer(b)
b, err = ic.Post(teams.TeamsCreateTeamEndpoint, token, nil, *buff, nil)
if err != nil {
return nil, fmt.Errorf("failed to create team: %v", err.Error())
}
var t teams.Team
err = json.Unmarshal(b, &t)
if err != nil {
return nil, fmt.Errorf("failed to parse team from response: %v", err.Error())
}
return &t, nil
}
// GetTeam takes a team id and returns the Ion Channel representation of that
// team. An error is returned for client communications and unmarshalling
// errors.
func (ic *IonClient) GetTeam(id, token string) (*teams.Team, error) {
params := url.Values{}
params.Set("someid", id)
b, _, err := ic.Get(teams.TeamsGetTeamEndpoint, token, params, nil, pagination.Pagination{})
if err != nil {
return nil, fmt.Errorf("failed to get team: %v", err.Error())
}
var team teams.Team
err = json.Unmarshal(b, &team)
if err != nil {
return nil, fmt.Errorf("cannot parse team: %v", err.Error())
}
return &team, nil
}
// GetTeams returns the Ion Channel representation of that
// team. An error is returned for client communications and unmarshalling
// errors.
func (ic *IonClient) GetTeams(token string) ([]teams.Team, error) {
b, _, err := ic.Get(teams.TeamsGetTeamsEndpoint, token, nil, nil, pagination.Pagination{})
if err != nil {
return nil, fmt.Errorf("failed to get teams: %v", err.Error())
}
var ts []teams.Team
err = json.Unmarshal(b, &ts)
if err != nil {
return nil, fmt.Errorf("cannot parse teams: %v", err.Error())
}
return ts, nil
}
// UpdateTeam takes a team ID and updates fields related to that team.
func (ic *IonClient) UpdateTeam(id, name, contactName, contactEmail, defaultDeployKey, token string) (*teams.Team, error) {
params := url.Values{}
params.Set("id", id)
teamRequest := teams.Team{
Name: name,
POCName: contactName,
POCEmail: contactEmail,
DefaultDeployKey: defaultDeployKey,
}
body, err := json.Marshal(teamRequest)
if err != nil {
return nil, err
}
buffer := bytes.NewBuffer(body)
response, err := ic.Put(teams.TeamsUpdateTeamEndpoint, token, params, *buffer, nil)
if err != nil {
return nil, fmt.Errorf("failed to update team: %w", err)
}
var team teams.Team
err = json.Unmarshal(response, &team)
if err != nil {
return nil, fmt.Errorf("cannot parse team: %w", err)
}
return &team, nil
}
|
/**
* (C) Copyright IBM Corp. 2021.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-ab7e866a-20210428-100631
*/
// Package ibmcloudobjectstorages3apiv2 : Operations and models for the IbmCloudObjectStorageS3ApiV2 service
package ibmcloudobjectstorages3apiv2
import (
"context"
"encoding/json"
"fmt"
common "github.com/IBM/experimental-go-sdk/common"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/go-openapi/strfmt"
"net/http"
"reflect"
"time"
)
// IbmCloudObjectStorageS3ApiV2 : <p/>
//
// Version: 2.5
// See: https://cloud.ibm.com/docs/services/cloud-object-storage/
type IbmCloudObjectStorageS3ApiV2 struct {
Service *core.BaseService
}
// DefaultServiceName is the default key used to find external configuration information.
const DefaultServiceName = "ibm_cloud_object_storage_s3_api"
// IbmCloudObjectStorageS3ApiV2Options : Service options
type IbmCloudObjectStorageS3ApiV2Options struct {
ServiceName string
URL string
Authenticator core.Authenticator
}
// NewIbmCloudObjectStorageS3ApiV2UsingExternalConfig : constructs an instance of IbmCloudObjectStorageS3ApiV2 with passed in options and external configuration.
func NewIbmCloudObjectStorageS3ApiV2UsingExternalConfig(options *IbmCloudObjectStorageS3ApiV2Options) (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2, err error) {
if options.ServiceName == "" {
options.ServiceName = DefaultServiceName
}
if options.Authenticator == nil {
options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName)
if err != nil {
return
}
}
ibmCloudObjectStorageS3Api, err = NewIbmCloudObjectStorageS3ApiV2(options)
if err != nil {
return
}
err = ibmCloudObjectStorageS3Api.Service.ConfigureService(options.ServiceName)
if err != nil {
return
}
if options.URL != "" {
err = ibmCloudObjectStorageS3Api.Service.SetServiceURL(options.URL)
}
return
}
// NewIbmCloudObjectStorageS3ApiV2 : constructs an instance of IbmCloudObjectStorageS3ApiV2 with passed in options.
func NewIbmCloudObjectStorageS3ApiV2(options *IbmCloudObjectStorageS3ApiV2Options) (service *IbmCloudObjectStorageS3ApiV2, err error) {
serviceOptions := &core.ServiceOptions{
Authenticator: options.Authenticator,
}
baseService, err := core.NewBaseService(serviceOptions)
if err != nil {
return
}
if options.URL != "" {
err = baseService.SetServiceURL(options.URL)
if err != nil {
return
}
}
service = &IbmCloudObjectStorageS3ApiV2{
Service: baseService,
}
return
}
// GetServiceURLForRegion returns the service URL to be used for the specified region
func GetServiceURLForRegion(region string) (string, error) {
return "", fmt.Errorf("service does not support regional URLs")
}
// Clone makes a copy of "ibmCloudObjectStorageS3Api" suitable for processing requests.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) Clone() *IbmCloudObjectStorageS3ApiV2 {
if core.IsNil(ibmCloudObjectStorageS3Api) {
return nil
}
clone := *ibmCloudObjectStorageS3Api
clone.Service = ibmCloudObjectStorageS3Api.Service.Clone()
return &clone
}
// SetServiceURL sets the service URL
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) SetServiceURL(url string) error {
return ibmCloudObjectStorageS3Api.Service.SetServiceURL(url)
}
// GetServiceURL returns the service URL
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetServiceURL() string {
return ibmCloudObjectStorageS3Api.Service.GetServiceURL()
}
// SetDefaultHeaders sets HTTP headers to be sent in every request
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) SetDefaultHeaders(headers http.Header) {
ibmCloudObjectStorageS3Api.Service.SetDefaultHeaders(headers)
}
// SetEnableGzipCompression sets the service's EnableGzipCompression field
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) SetEnableGzipCompression(enableGzip bool) {
ibmCloudObjectStorageS3Api.Service.SetEnableGzipCompression(enableGzip)
}
// GetEnableGzipCompression returns the service's EnableGzipCompression field
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetEnableGzipCompression() bool {
return ibmCloudObjectStorageS3Api.Service.GetEnableGzipCompression()
}
// EnableRetries enables automatic retries for requests invoked for this service instance.
// If either parameter is specified as 0, then a default value is used instead.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) EnableRetries(maxRetries int, maxRetryInterval time.Duration) {
ibmCloudObjectStorageS3Api.Service.EnableRetries(maxRetries, maxRetryInterval)
}
// DisableRetries disables automatic retries for requests invoked for this service instance.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DisableRetries() {
ibmCloudObjectStorageS3Api.Service.DisableRetries()
}
// HeadBucket : Read a bucket's headers
// This request is useful for checking whether a bucket has Key Protect enabled.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) HeadBucket(headBucketOptions *HeadBucketOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.HeadBucketWithContext(context.Background(), headBucketOptions)
}
// HeadBucketWithContext is an alternate form of the HeadBucket method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) HeadBucketWithContext(ctx context.Context, headBucketOptions *HeadBucketOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(headBucketOptions, "headBucketOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(headBucketOptions, "headBucketOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *headBucketOptions.Bucket,
}
builder := core.NewRequestBuilder(core.HEAD)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range headBucketOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "HeadBucket")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// HeadObject : Read object metadata
// The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if
// you're only interested in an object's metadata or it's existence.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) HeadObject(headObjectOptions *HeadObjectOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.HeadObjectWithContext(context.Background(), headObjectOptions)
}
// HeadObjectWithContext is an alternate form of the HeadObject method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) HeadObjectWithContext(ctx context.Context, headObjectOptions *HeadObjectOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(headObjectOptions, "headObjectOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(headObjectOptions, "headObjectOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *headObjectOptions.Bucket,
"Key": *headObjectOptions.Key,
}
builder := core.NewRequestBuilder(core.HEAD)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range headObjectOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "HeadObject")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
if headObjectOptions.IfMatch != nil {
builder.AddHeader("If-Match", fmt.Sprint(*headObjectOptions.IfMatch))
}
if headObjectOptions.IfModifiedSince != nil {
builder.AddHeader("If-Modified-Since", fmt.Sprint(*headObjectOptions.IfModifiedSince))
}
if headObjectOptions.IfNoneMatch != nil {
builder.AddHeader("If-None-Match", fmt.Sprint(*headObjectOptions.IfNoneMatch))
}
if headObjectOptions.IfUnmodifiedSince != nil {
builder.AddHeader("If-Unmodified-Since", fmt.Sprint(*headObjectOptions.IfUnmodifiedSince))
}
if headObjectOptions.Range != nil {
builder.AddHeader("Range", fmt.Sprint(*headObjectOptions.Range))
}
if headObjectOptions.XAmzServerSideEncryptionCustomerAlgorithm != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-algorithm", fmt.Sprint(*headObjectOptions.XAmzServerSideEncryptionCustomerAlgorithm))
}
if headObjectOptions.XAmzServerSideEncryptionCustomerKey != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key", fmt.Sprint(*headObjectOptions.XAmzServerSideEncryptionCustomerKey))
}
if headObjectOptions.XAmzServerSideEncryptionCustomerKeyMD5 != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key-MD5", fmt.Sprint(*headObjectOptions.XAmzServerSideEncryptionCustomerKeyMD5))
}
if headObjectOptions.PartNumber != nil {
builder.AddQuery("partNumber", fmt.Sprint(*headObjectOptions.PartNumber))
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// ListBuckets : List buckets in a service instance
// This operation lists all buckets within the specified service instance, regardless of location. Note that while any
// endpoint may be used to list all buckets, any operations that target a specific bucket must use the appropriate
// endpoint for that bucket's location.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) ListBuckets(listBucketsOptions *ListBucketsOptions) (result *BucketListing, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.ListBucketsWithContext(context.Background(), listBucketsOptions)
}
// ListBucketsWithContext is an alternate form of the ListBuckets method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) ListBucketsWithContext(ctx context.Context, listBucketsOptions *ListBucketsOptions) (result *BucketListing, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(listBucketsOptions, "listBucketsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(listBucketsOptions, "listBucketsOptions")
if err != nil {
return
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/`, nil)
if err != nil {
return
}
for headerName, headerValue := range listBucketsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "ListBuckets")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
if listBucketsOptions.IbmServiceInstanceID != nil {
builder.AddHeader("ibm-service-instance-id", fmt.Sprint(*listBucketsOptions.IbmServiceInstanceID))
}
if listBucketsOptions.Extended != nil {
builder.AddQuery("extended", fmt.Sprint(*listBucketsOptions.Extended))
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalBucketListing)
if err != nil {
return
}
response.Result = result
return
}
// CreateBucket : Create a new bucket
// To create a bucket, you must create a COS service instance, and create an API key or HMAC credentials to authenticate
// requests.
//
// Bucket names must be globally unique and DNS-compliant; names between 3 and 63 characters long must be made of
// lowercase letters, numbers, and dashes. Bucket names must begin and end with a lowercase letter or number. Bucket
// names resembling IP addresses are not allowed. Bucket names must be unique because all buckets in the public cloud
// share a global namespace, allowing access to buckets without the need to provide any service instance or account
// information. It is not possible to create a bucket with a name beginning with `cosv1-` or `account-` as these
// prefixes are reserved by the system.
//
// Buckets are created in the location specified in the endpoint used to make the request. Once a bucket is created, it
// can be accessed at that location using any of the three networks: public, private, or direct. Any requests targeting
// an existing bucket using an endpoint with an incorrect location will result in a `404 NoSuchKey` error.
//
// All data in IBM Cloud Object Storage is encrypted at rest. This technology individually encrypts each object by using
// per-object generated keys. These keys are secured and reliably stored by using the same Information Dispersal
// Algorithms that protect object data by using an All-or-Nothing Transform (AONT). Key data is impossible to recover,
// even if individual nodes or hard disks are compromised. If it is necessary to control the encryption keys used, an
// IBM Key Protect or Hyper Protect Crypto Services root key CRN can be provided during bucket creation.
//
// It is possible to create a bucket with a "storage class" that alters the way storage charges are incurred based on
// frequency of access. This can be helpful when dealing with "cool" data that might need to be accessed without the
// delay of restoring from an archive, but is unlikely to be accessed frequently. A provisioning code can be passed in
// the S3 API `LocationConstraint` parameter to specify the storage class of a new bucket. A storage class can not be
// altered after a bucket is created.
//
// The S3 API concept of a "bucket owner" is not an individual user, but instead is considered to be the Service
// Instance associated with the bucket.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) CreateBucket(createBucketOptions *CreateBucketOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.CreateBucketWithContext(context.Background(), createBucketOptions)
}
// CreateBucketWithContext is an alternate form of the CreateBucket method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) CreateBucketWithContext(ctx context.Context, createBucketOptions *CreateBucketOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(createBucketOptions, "createBucketOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(createBucketOptions, "createBucketOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *createBucketOptions.Bucket,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range createBucketOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "CreateBucket")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Content-Type", "text/xml")
if createBucketOptions.IbmServiceInstanceID != nil {
builder.AddHeader("ibm-service-instance-id", fmt.Sprint(*createBucketOptions.IbmServiceInstanceID))
}
if createBucketOptions.IbmSseKpEncryptionAlgorithm != nil {
builder.AddHeader("ibm-sse-kp-encryption-algorithm", fmt.Sprint(*createBucketOptions.IbmSseKpEncryptionAlgorithm))
}
if createBucketOptions.IbmSseKpCustomerRootKeyCrn != nil {
builder.AddHeader("ibm-sse-kp-customer-root-key-crn", fmt.Sprint(*createBucketOptions.IbmSseKpCustomerRootKeyCrn))
}
if createBucketOptions.XAmzAcl != nil {
builder.AddHeader("x-amz-acl", fmt.Sprint(*createBucketOptions.XAmzAcl))
}
_, err = builder.SetBodyContent("text/xml", nil, nil, createBucketOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// DeleteBucket : Delete a bucket
// Only empty buckets may be deleted. A bucket name is returned to the available namespace approximately 10 minutes
// after deletion.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteBucket(deleteBucketOptions *DeleteBucketOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.DeleteBucketWithContext(context.Background(), deleteBucketOptions)
}
// DeleteBucketWithContext is an alternate form of the DeleteBucket method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteBucketWithContext(ctx context.Context, deleteBucketOptions *DeleteBucketOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(deleteBucketOptions, "deleteBucketOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(deleteBucketOptions, "deleteBucketOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *deleteBucketOptions.Bucket,
}
builder := core.NewRequestBuilder(core.DELETE)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range deleteBucketOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "DeleteBucket")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// ListObjects : List objects in a bucket (v1)
// Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection
// criteria to return a subset of the objects in a bucket. A `200 OK` response can contain valid or invalid XML. Make
// sure to design your application to parse the contents of the response and handle it appropriately. This version (v1)
// uses a `marker` parameter to list objects starting with a given object. Version 2 of this API provides a continuation
// token instead, making it a bit more straightforward to chain listing requests for buckets with large numbers of
// objects.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) ListObjects(listObjectsOptions *ListObjectsOptions) (result *ListObjectsOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.ListObjectsWithContext(context.Background(), listObjectsOptions)
}
// ListObjectsWithContext is an alternate form of the ListObjects method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) ListObjectsWithContext(ctx context.Context, listObjectsOptions *ListObjectsOptions) (result *ListObjectsOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(listObjectsOptions, "listObjectsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(listObjectsOptions, "listObjectsOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *listObjectsOptions.Bucket,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range listObjectsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "ListObjects")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
if listObjectsOptions.Delimiter != nil {
builder.AddQuery("delimiter", fmt.Sprint(*listObjectsOptions.Delimiter))
}
if listObjectsOptions.EncodingType != nil {
builder.AddQuery("encoding-type", fmt.Sprint(*listObjectsOptions.EncodingType))
}
if listObjectsOptions.Marker != nil {
builder.AddQuery("marker", fmt.Sprint(*listObjectsOptions.Marker))
}
if listObjectsOptions.MaxKeys != nil {
builder.AddQuery("max-keys", fmt.Sprint(*listObjectsOptions.MaxKeys))
}
if listObjectsOptions.Prefix != nil {
builder.AddQuery("prefix", fmt.Sprint(*listObjectsOptions.Prefix))
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListObjectsOutput)
if err != nil {
return
}
response.Result = result
return
}
// ListObjectsV2 : List objects in a bucket (v2)
// Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection
// criteria to return a subset of the objects in a bucket. A `200 OK` response can contain valid or invalid XML. Make
// sure to design your application to parse the contents of the response and handle it appropriately.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) ListObjectsV2(listObjectsV2Options *ListObjectsV2Options) (result *ListObjectsV2Output, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.ListObjectsV2WithContext(context.Background(), listObjectsV2Options)
}
// ListObjectsV2WithContext is an alternate form of the ListObjectsV2 method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) ListObjectsV2WithContext(ctx context.Context, listObjectsV2Options *ListObjectsV2Options) (result *ListObjectsV2Output, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(listObjectsV2Options, "listObjectsV2Options cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(listObjectsV2Options, "listObjectsV2Options")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *listObjectsV2Options.Bucket,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?list-type=2`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range listObjectsV2Options.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "ListObjectsV2")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddQuery("list-type", fmt.Sprint(*listObjectsV2Options.ListType))
if listObjectsV2Options.Delimiter != nil {
builder.AddQuery("delimiter", fmt.Sprint(*listObjectsV2Options.Delimiter))
}
if listObjectsV2Options.EncodingType != nil {
builder.AddQuery("encoding-type", fmt.Sprint(*listObjectsV2Options.EncodingType))
}
if listObjectsV2Options.MaxKeys != nil {
builder.AddQuery("max-keys", fmt.Sprint(*listObjectsV2Options.MaxKeys))
}
if listObjectsV2Options.Prefix != nil {
builder.AddQuery("prefix", fmt.Sprint(*listObjectsV2Options.Prefix))
}
if listObjectsV2Options.ContinuationToken != nil {
builder.AddQuery("continuation-token", fmt.Sprint(*listObjectsV2Options.ContinuationToken))
}
if listObjectsV2Options.FetchOwner != nil {
builder.AddQuery("fetch-owner", fmt.Sprint(*listObjectsV2Options.FetchOwner))
}
if listObjectsV2Options.StartAfter != nil {
builder.AddQuery("start-after", fmt.Sprint(*listObjectsV2Options.StartAfter))
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListObjectsV2Output)
if err != nil {
return
}
response.Result = result
return
}
// PutObject : Create (upload) an object
// Adds an object to a bucket using a single request. IBM COS never adds partial objects; if you receive a success
// response, IBM COS added the entire object to the bucket. IBM COS is a distributed system. If it receives multiple
// write requests for the same object simultaneously, it overwrites all but the last object written. IBM COS does not
// provide object locking; if you need this, make sure to build it into your application layer.
//
// All objects written to IBM COS are encrypted by default using SecureSlice. If you require possession of encryption
// keys, you can use Key Protect or SSE-C.
//
// To ensure that data is not corrupted traversing the network, use the `Content-MD5` header. When you use this header,
// IBM COS checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally,
// you can calculate the MD5 while putting an object to IBM COS and compare the returned ETag to the calculated MD5
// value. The `Content-MD5` header is required for any request to upload an object with a retention period configured
// using Immutable Object Storage.
//
// Larger objects (greater than 100 MiB) may benefit from breaking the object into multiple parts and uploading the
// parts in parallel. For more information, see the **Multipart uploads** methods.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutObject(putObjectOptions *PutObjectOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.PutObjectWithContext(context.Background(), putObjectOptions)
}
// PutObjectWithContext is an alternate form of the PutObject method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutObjectWithContext(ctx context.Context, putObjectOptions *PutObjectOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(putObjectOptions, "putObjectOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(putObjectOptions, "putObjectOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *putObjectOptions.Bucket,
"Key": *putObjectOptions.Key,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range putObjectOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "PutObject")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddHeader("Content-Type", "text/xml")
if putObjectOptions.XAmzAcl != nil {
builder.AddHeader("x-amz-acl", fmt.Sprint(*putObjectOptions.XAmzAcl))
}
if putObjectOptions.IfMatch != nil {
builder.AddHeader("If-Match", fmt.Sprint(*putObjectOptions.IfMatch))
}
if putObjectOptions.IfNoneMatch != nil {
builder.AddHeader("If-None-Match", fmt.Sprint(*putObjectOptions.IfNoneMatch))
}
if putObjectOptions.IfUnmodifiedSince != nil {
builder.AddHeader("If-Unmodified-Since", fmt.Sprint(*putObjectOptions.IfUnmodifiedSince))
}
if putObjectOptions.CacheControl != nil {
builder.AddHeader("Cache-Control", fmt.Sprint(*putObjectOptions.CacheControl))
}
if putObjectOptions.ContentDisposition != nil {
builder.AddHeader("Content-Disposition", fmt.Sprint(*putObjectOptions.ContentDisposition))
}
if putObjectOptions.ContentEncoding != nil {
builder.AddHeader("Content-Encoding", fmt.Sprint(*putObjectOptions.ContentEncoding))
}
if putObjectOptions.ContentLanguage != nil {
builder.AddHeader("Content-Language", fmt.Sprint(*putObjectOptions.ContentLanguage))
}
if putObjectOptions.ContentLength != nil {
builder.AddHeader("Content-Length", fmt.Sprint(*putObjectOptions.ContentLength))
}
if putObjectOptions.ContentMD5 != nil {
builder.AddHeader("Content-MD5", fmt.Sprint(*putObjectOptions.ContentMD5))
}
if putObjectOptions.Expires != nil {
builder.AddHeader("Expires", fmt.Sprint(*putObjectOptions.Expires))
}
if putObjectOptions.XAmzServerSideEncryption != nil {
builder.AddHeader("x-amz-server-side-encryption", fmt.Sprint(*putObjectOptions.XAmzServerSideEncryption))
}
if putObjectOptions.XAmzWebsiteRedirectLocation != nil {
builder.AddHeader("x-amz-website-redirect-location", fmt.Sprint(*putObjectOptions.XAmzWebsiteRedirectLocation))
}
if putObjectOptions.XAmzServerSideEncryptionCustomerAlgorithm != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-algorithm", fmt.Sprint(*putObjectOptions.XAmzServerSideEncryptionCustomerAlgorithm))
}
if putObjectOptions.XAmzServerSideEncryptionCustomerKey != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key", fmt.Sprint(*putObjectOptions.XAmzServerSideEncryptionCustomerKey))
}
if putObjectOptions.XAmzServerSideEncryptionCustomerKeyMD5 != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key-MD5", fmt.Sprint(*putObjectOptions.XAmzServerSideEncryptionCustomerKeyMD5))
}
if putObjectOptions.XAmzTagging != nil {
builder.AddHeader("x-amz-tagging", fmt.Sprint(*putObjectOptions.XAmzTagging))
}
_, err = builder.SetBodyContent("text/xml", nil, nil, putObjectOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// GetObject : Read (download) an object
// Retrieves objects from IBM COS. If the object you are retrieving has been archived, before you can retrieve the
// object you must first restore a copy. Otherwise, this operation returns an `InvalidObjectStateError` error. If you
// encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the
// object in IBM COS, then when you GET the object, you must use the following headers:
// * `x-amz-server-side-encryption-customer-algorithm`
// * `x-amz-server-side-encryption-customer-key`
// * `x-amz-server-side-encryption-customer-key-MD5`.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetObject(getObjectOptions *GetObjectOptions) (result *GetObjectOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.GetObjectWithContext(context.Background(), getObjectOptions)
}
// GetObjectWithContext is an alternate form of the GetObject method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetObjectWithContext(ctx context.Context, getObjectOptions *GetObjectOptions) (result *GetObjectOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getObjectOptions, "getObjectOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getObjectOptions, "getObjectOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *getObjectOptions.Bucket,
"Key": *getObjectOptions.Key,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getObjectOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "GetObject")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
if getObjectOptions.IfMatch != nil {
builder.AddHeader("If-Match", fmt.Sprint(*getObjectOptions.IfMatch))
}
if getObjectOptions.IfModifiedSince != nil {
builder.AddHeader("If-Modified-Since", fmt.Sprint(*getObjectOptions.IfModifiedSince))
}
if getObjectOptions.IfNoneMatch != nil {
builder.AddHeader("If-None-Match", fmt.Sprint(*getObjectOptions.IfNoneMatch))
}
if getObjectOptions.IfUnmodifiedSince != nil {
builder.AddHeader("If-Unmodified-Since", fmt.Sprint(*getObjectOptions.IfUnmodifiedSince))
}
if getObjectOptions.Range != nil {
builder.AddHeader("Range", fmt.Sprint(*getObjectOptions.Range))
}
if getObjectOptions.XAmzServerSideEncryptionCustomerAlgorithm != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-algorithm", fmt.Sprint(*getObjectOptions.XAmzServerSideEncryptionCustomerAlgorithm))
}
if getObjectOptions.XAmzServerSideEncryptionCustomerKey != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key", fmt.Sprint(*getObjectOptions.XAmzServerSideEncryptionCustomerKey))
}
if getObjectOptions.XAmzServerSideEncryptionCustomerKeyMD5 != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key-MD5", fmt.Sprint(*getObjectOptions.XAmzServerSideEncryptionCustomerKeyMD5))
}
if getObjectOptions.ResponseCacheControl != nil {
builder.AddQuery("response-cache-control", fmt.Sprint(*getObjectOptions.ResponseCacheControl))
}
if getObjectOptions.ResponseContentDisposition != nil {
builder.AddQuery("response-content-disposition", fmt.Sprint(*getObjectOptions.ResponseContentDisposition))
}
if getObjectOptions.ResponseContentEncoding != nil {
builder.AddQuery("response-content-encoding", fmt.Sprint(*getObjectOptions.ResponseContentEncoding))
}
if getObjectOptions.ResponseContentLanguage != nil {
builder.AddQuery("response-content-language", fmt.Sprint(*getObjectOptions.ResponseContentLanguage))
}
if getObjectOptions.ResponseContentType != nil {
builder.AddQuery("response-content-type", fmt.Sprint(*getObjectOptions.ResponseContentType))
}
if getObjectOptions.ResponseExpires != nil {
builder.AddQuery("response-expires", fmt.Sprint(*getObjectOptions.ResponseExpires))
}
if getObjectOptions.PartNumber != nil {
builder.AddQuery("partNumber", fmt.Sprint(*getObjectOptions.PartNumber))
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetObjectOutput)
if err != nil {
return
}
response.Result = result
return
}
// DeleteObject : Delete an object
// Permantently deletes an object. This operation is final - there is no way to recover a deleted object. Data stored in
// IBM COS is erasure coded and distributed to multiple individual storage devices in multiple data centers. When data
// is deleted, various mechanisms exist which prevent recovery or reconstruction of the deleted objects.
//
// Deletion of an object undergoes various stages. First, the metadata is marked to indicate the object is deleted,
// then, the data is removed. Eventually, deleted metadata is overwritten by a process of compaction and the deleted
// data blocks are overwritten with new data in the course of normal operations. As soon as the metadata is marked
// deleted, it is not possible to read an object remotely. IBM's provider-managed encryption and erasure coding prevents
// data (both before and after deletion) from being accessible from within individual data centers.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteObject(deleteObjectOptions *DeleteObjectOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.DeleteObjectWithContext(context.Background(), deleteObjectOptions)
}
// DeleteObjectWithContext is an alternate form of the DeleteObject method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteObjectWithContext(ctx context.Context, deleteObjectOptions *DeleteObjectOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(deleteObjectOptions, "deleteObjectOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(deleteObjectOptions, "deleteObjectOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *deleteObjectOptions.Bucket,
"Key": *deleteObjectOptions.Key,
}
builder := core.NewRequestBuilder(core.DELETE)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range deleteObjectOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "DeleteObject")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// CopyObject : Copy an object
// Copies an object.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) CopyObject(copyObjectOptions *CopyObjectOptions) (result *CopyObjectOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.CopyObjectWithContext(context.Background(), copyObjectOptions)
}
// CopyObjectWithContext is an alternate form of the CopyObject method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) CopyObjectWithContext(ctx context.Context, copyObjectOptions *CopyObjectOptions) (result *CopyObjectOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(copyObjectOptions, "copyObjectOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(copyObjectOptions, "copyObjectOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *copyObjectOptions.Bucket,
"TargetKey": *copyObjectOptions.TargetKey,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{TargetKey}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range copyObjectOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "CopyObject")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddHeader("Content-Type", "text/xml")
if copyObjectOptions.XAmzCopySource != nil {
builder.AddHeader("x-amz-copy-source", fmt.Sprint(*copyObjectOptions.XAmzCopySource))
}
if copyObjectOptions.XAmzAcl != nil {
builder.AddHeader("x-amz-acl", fmt.Sprint(*copyObjectOptions.XAmzAcl))
}
if copyObjectOptions.CacheControl != nil {
builder.AddHeader("Cache-Control", fmt.Sprint(*copyObjectOptions.CacheControl))
}
if copyObjectOptions.ContentDisposition != nil {
builder.AddHeader("Content-Disposition", fmt.Sprint(*copyObjectOptions.ContentDisposition))
}
if copyObjectOptions.ContentEncoding != nil {
builder.AddHeader("Content-Encoding", fmt.Sprint(*copyObjectOptions.ContentEncoding))
}
if copyObjectOptions.ContentLanguage != nil {
builder.AddHeader("Content-Language", fmt.Sprint(*copyObjectOptions.ContentLanguage))
}
if copyObjectOptions.XAmzCopySourceIfMatch != nil {
builder.AddHeader("x-amz-copy-source-if-match", fmt.Sprint(*copyObjectOptions.XAmzCopySourceIfMatch))
}
if copyObjectOptions.XAmzCopySourceIfModifiedSince != nil {
builder.AddHeader("x-amz-copy-source-if-modified-since", fmt.Sprint(*copyObjectOptions.XAmzCopySourceIfModifiedSince))
}
if copyObjectOptions.XAmzCopySourceIfNoneMatch != nil {
builder.AddHeader("x-amz-copy-source-if-none-match", fmt.Sprint(*copyObjectOptions.XAmzCopySourceIfNoneMatch))
}
if copyObjectOptions.XAmzCopySourceIfUnmodifiedSince != nil {
builder.AddHeader("x-amz-copy-source-if-unmodified-since", fmt.Sprint(*copyObjectOptions.XAmzCopySourceIfUnmodifiedSince))
}
if copyObjectOptions.Expires != nil {
builder.AddHeader("Expires", fmt.Sprint(*copyObjectOptions.Expires))
}
if copyObjectOptions.XAmzMetadataDirective != nil {
builder.AddHeader("x-amz-metadata-directive", fmt.Sprint(*copyObjectOptions.XAmzMetadataDirective))
}
if copyObjectOptions.XAmzTaggingDirective != nil {
builder.AddHeader("x-amz-tagging-directive", fmt.Sprint(*copyObjectOptions.XAmzTaggingDirective))
}
if copyObjectOptions.XAmzServerSideEncryption != nil {
builder.AddHeader("x-amz-server-side-encryption", fmt.Sprint(*copyObjectOptions.XAmzServerSideEncryption))
}
if copyObjectOptions.XAmzWebsiteRedirectLocation != nil {
builder.AddHeader("x-amz-website-redirect-location", fmt.Sprint(*copyObjectOptions.XAmzWebsiteRedirectLocation))
}
if copyObjectOptions.XAmzServerSideEncryptionCustomerAlgorithm != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-algorithm", fmt.Sprint(*copyObjectOptions.XAmzServerSideEncryptionCustomerAlgorithm))
}
if copyObjectOptions.XAmzServerSideEncryptionCustomerKey != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key", fmt.Sprint(*copyObjectOptions.XAmzServerSideEncryptionCustomerKey))
}
if copyObjectOptions.XAmzServerSideEncryptionCustomerKeyMD5 != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key-MD5", fmt.Sprint(*copyObjectOptions.XAmzServerSideEncryptionCustomerKeyMD5))
}
if copyObjectOptions.XAmzCopySourceServerSideEncryptionCustomerAlgorithm != nil {
builder.AddHeader("x-amz-copy-source-server-side-encryption-customer-algorithm", fmt.Sprint(*copyObjectOptions.XAmzCopySourceServerSideEncryptionCustomerAlgorithm))
}
if copyObjectOptions.XAmzCopySourceServerSideEncryptionCustomerKey != nil {
builder.AddHeader("x-amz-copy-source-server-side-encryption-customer-key", fmt.Sprint(*copyObjectOptions.XAmzCopySourceServerSideEncryptionCustomerKey))
}
if copyObjectOptions.XAmzCopySourceServerSideEncryptionCustomerKeyMD5 != nil {
builder.AddHeader("x-amz-copy-source-server-side-encryption-customer-key-MD5", fmt.Sprint(*copyObjectOptions.XAmzCopySourceServerSideEncryptionCustomerKeyMD5))
}
if copyObjectOptions.XAmzTagging != nil {
builder.AddHeader("x-amz-tagging", fmt.Sprint(*copyObjectOptions.XAmzTagging))
}
_, err = builder.SetBodyContent("text/xml", nil, nil, copyObjectOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCopyObjectOutput)
if err != nil {
return
}
response.Result = result
return
}
// DeleteObjects : Delete multiple objects
// This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the
// object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete
// requests, reducing per-request overhead. The request contains a list of up to 1000 keys that you want to delete. In
// the XML, you provide the object key names. For each key, IBM COS performs a delete operation and returns the result
// of that delete, success, or failure, in the response. Note that if the object specified in the request is not found,
// IBM COS returns the result as deleted. The operation supports two modes for the response: verbose and quiet. By
// default, the operation uses verbose mode in which the response includes the result of deletion of each key in your
// request. In quiet mode the response includes only keys where the delete operation encountered an error. For a
// successful deletion, the operation does not return any information about the delete in the response body.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteObjects(deleteObjectsOptions *DeleteObjectsOptions) (result *DeleteObjectsOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.DeleteObjectsWithContext(context.Background(), deleteObjectsOptions)
}
// DeleteObjectsWithContext is an alternate form of the DeleteObjects method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteObjectsWithContext(ctx context.Context, deleteObjectsOptions *DeleteObjectsOptions) (result *DeleteObjectsOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(deleteObjectsOptions, "deleteObjectsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(deleteObjectsOptions, "deleteObjectsOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *deleteObjectsOptions.Bucket,
}
builder := core.NewRequestBuilder(core.POST)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?delete`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range deleteObjectsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "DeleteObjects")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddHeader("Content-Type", "text/xml")
builder.AddQuery("delete", fmt.Sprint(*deleteObjectsOptions.Delete))
_, err = builder.SetBodyContent("text/xml", nil, nil, deleteObjectsOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalDeleteObjectsOutput)
if err != nil {
return
}
response.Result = result
return
}
// PutBucketProtectionConfiguration : Create a protection configuration
// Creates a new protection configuration (this term is interchangeable with "retention policy") for the bucket or
// replaces an existing protection configuration. You specify the protection configuration in your request body. The
// protection configuration is specified as XML.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutBucketProtectionConfiguration(putBucketProtectionConfigurationOptions *PutBucketProtectionConfigurationOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.PutBucketProtectionConfigurationWithContext(context.Background(), putBucketProtectionConfigurationOptions)
}
// PutBucketProtectionConfigurationWithContext is an alternate form of the PutBucketProtectionConfiguration method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutBucketProtectionConfigurationWithContext(ctx context.Context, putBucketProtectionConfigurationOptions *PutBucketProtectionConfigurationOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(putBucketProtectionConfigurationOptions, "putBucketProtectionConfigurationOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(putBucketProtectionConfigurationOptions, "putBucketProtectionConfigurationOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *putBucketProtectionConfigurationOptions.Bucket,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?protection`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range putBucketProtectionConfigurationOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "PutBucketProtectionConfiguration")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Content-Type", "text/xml")
builder.AddQuery("protection", fmt.Sprint(*putBucketProtectionConfigurationOptions.Protection))
_, err = builder.SetBodyContent("text/xml", nil, nil, putBucketProtectionConfigurationOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// PutBucketLifecycleConfiguration : Create a lifecycle configuration
// Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. You specify the
// lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or
// more rules. Each rule consists of the following:
//
// * Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix,
// object tags, or a combination of both.
// * Status whether the rule is in effect.
// * One or more lifecycle transition and expiration actions that you want IBM COS to perform on the objects identified
// by the filter. Note that there can only be one `Transistion` rule, and filters are not supported for `Transition`
// rules.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutBucketLifecycleConfiguration(putBucketLifecycleConfigurationOptions *PutBucketLifecycleConfigurationOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.PutBucketLifecycleConfigurationWithContext(context.Background(), putBucketLifecycleConfigurationOptions)
}
// PutBucketLifecycleConfigurationWithContext is an alternate form of the PutBucketLifecycleConfiguration method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutBucketLifecycleConfigurationWithContext(ctx context.Context, putBucketLifecycleConfigurationOptions *PutBucketLifecycleConfigurationOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(putBucketLifecycleConfigurationOptions, "putBucketLifecycleConfigurationOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(putBucketLifecycleConfigurationOptions, "putBucketLifecycleConfigurationOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *putBucketLifecycleConfigurationOptions.Bucket,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?lifecycle`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range putBucketLifecycleConfigurationOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "PutBucketLifecycleConfiguration")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Content-Type", "text/xml")
builder.AddQuery("lifecycle", fmt.Sprint(*putBucketLifecycleConfigurationOptions.Lifecycle))
_, err = builder.SetBodyContent("text/xml", nil, nil, putBucketLifecycleConfigurationOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// GetBucketLifecycleConfiguration : Read a lifecycle configuration
// Returns the lifecycle configuration information set on the bucket.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetBucketLifecycleConfiguration(getBucketLifecycleConfigurationOptions *GetBucketLifecycleConfigurationOptions) (result *GetBucketLifecycleConfigurationOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.GetBucketLifecycleConfigurationWithContext(context.Background(), getBucketLifecycleConfigurationOptions)
}
// GetBucketLifecycleConfigurationWithContext is an alternate form of the GetBucketLifecycleConfiguration method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetBucketLifecycleConfigurationWithContext(ctx context.Context, getBucketLifecycleConfigurationOptions *GetBucketLifecycleConfigurationOptions) (result *GetBucketLifecycleConfigurationOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getBucketLifecycleConfigurationOptions, "getBucketLifecycleConfigurationOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getBucketLifecycleConfigurationOptions, "getBucketLifecycleConfigurationOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *getBucketLifecycleConfigurationOptions.Bucket,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?lifecycle`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getBucketLifecycleConfigurationOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "GetBucketLifecycleConfiguration")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddQuery("lifecycle", fmt.Sprint(*getBucketLifecycleConfigurationOptions.Lifecycle))
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetBucketLifecycleConfigurationOutput)
if err != nil {
return
}
response.Result = result
return
}
// DeleteBucketLifecycle : Delete a lifecycle configuration
// Deletes the lifecycle configuration from the specified bucket. IBM COS removes all the lifecycle configuration rules
// in the lifecycle subresource associated with the bucket. Your objects never expire, and IBM COS no longer
// automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. There is
// usually some time lag before lifecycle configuration deletion is fully propagated across the IBM COS system.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteBucketLifecycle(deleteBucketLifecycleOptions *DeleteBucketLifecycleOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.DeleteBucketLifecycleWithContext(context.Background(), deleteBucketLifecycleOptions)
}
// DeleteBucketLifecycleWithContext is an alternate form of the DeleteBucketLifecycle method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteBucketLifecycleWithContext(ctx context.Context, deleteBucketLifecycleOptions *DeleteBucketLifecycleOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(deleteBucketLifecycleOptions, "deleteBucketLifecycleOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(deleteBucketLifecycleOptions, "deleteBucketLifecycleOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *deleteBucketLifecycleOptions.Bucket,
}
builder := core.NewRequestBuilder(core.DELETE)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?lifecycle`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range deleteBucketLifecycleOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "DeleteBucketLifecycle")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddQuery("lifecycle", fmt.Sprint(*deleteBucketLifecycleOptions.Lifecycle))
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// RestoreObject : Temporarily restore an archived object
// Restores an archived copy of an object back into IBM COS. To access an archived object, you must first initiate a
// restore request. This restores a temporary copy of the archived object. In a restore request, you specify the number
// of days that you want the restored copy to exist. After the specified period, IBM COS deletes the temporary copy but
// the object remains archived.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) RestoreObject(restoreObjectOptions *RestoreObjectOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.RestoreObjectWithContext(context.Background(), restoreObjectOptions)
}
// RestoreObjectWithContext is an alternate form of the RestoreObject method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) RestoreObjectWithContext(ctx context.Context, restoreObjectOptions *RestoreObjectOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(restoreObjectOptions, "restoreObjectOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(restoreObjectOptions, "restoreObjectOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *restoreObjectOptions.Bucket,
"Key": *restoreObjectOptions.Key,
}
builder := core.NewRequestBuilder(core.POST)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}?restore`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range restoreObjectOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "RestoreObject")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddHeader("Content-Type", "text/xml")
builder.AddQuery("restore", fmt.Sprint(*restoreObjectOptions.Restore))
_, err = builder.SetBodyContent("text/xml", nil, nil, restoreObjectOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// InitiateMultipartUpload : Initiate a multipart upload
// This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the
// parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests.
// You also include this upload ID in the final request to either complete or abort the multipart upload request.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) InitiateMultipartUpload(initiateMultipartUploadOptions *InitiateMultipartUploadOptions) (result *CreateMultipartUploadOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.InitiateMultipartUploadWithContext(context.Background(), initiateMultipartUploadOptions)
}
// InitiateMultipartUploadWithContext is an alternate form of the InitiateMultipartUpload method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) InitiateMultipartUploadWithContext(ctx context.Context, initiateMultipartUploadOptions *InitiateMultipartUploadOptions) (result *CreateMultipartUploadOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(initiateMultipartUploadOptions, "initiateMultipartUploadOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(initiateMultipartUploadOptions, "initiateMultipartUploadOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *initiateMultipartUploadOptions.Bucket,
"Key": *initiateMultipartUploadOptions.Key,
}
builder := core.NewRequestBuilder(core.POST)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}?uploads`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range initiateMultipartUploadOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "InitiateMultipartUpload")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddHeader("Content-Type", "text/xml")
if initiateMultipartUploadOptions.IfMatch != nil {
builder.AddHeader("If-Match", fmt.Sprint(*initiateMultipartUploadOptions.IfMatch))
}
if initiateMultipartUploadOptions.IfNoneMatch != nil {
builder.AddHeader("If-None-Match", fmt.Sprint(*initiateMultipartUploadOptions.IfNoneMatch))
}
if initiateMultipartUploadOptions.IfUnmodifiedSince != nil {
builder.AddHeader("If-Unmodified-Since", fmt.Sprint(*initiateMultipartUploadOptions.IfUnmodifiedSince))
}
if initiateMultipartUploadOptions.CacheControl != nil {
builder.AddHeader("Cache-Control", fmt.Sprint(*initiateMultipartUploadOptions.CacheControl))
}
if initiateMultipartUploadOptions.ContentDisposition != nil {
builder.AddHeader("Content-Disposition", fmt.Sprint(*initiateMultipartUploadOptions.ContentDisposition))
}
if initiateMultipartUploadOptions.ContentEncoding != nil {
builder.AddHeader("Content-Encoding", fmt.Sprint(*initiateMultipartUploadOptions.ContentEncoding))
}
if initiateMultipartUploadOptions.ContentLanguage != nil {
builder.AddHeader("Content-Language", fmt.Sprint(*initiateMultipartUploadOptions.ContentLanguage))
}
if initiateMultipartUploadOptions.Expires != nil {
builder.AddHeader("Expires", fmt.Sprint(*initiateMultipartUploadOptions.Expires))
}
if initiateMultipartUploadOptions.XAmzServerSideEncryption != nil {
builder.AddHeader("x-amz-server-side-encryption", fmt.Sprint(*initiateMultipartUploadOptions.XAmzServerSideEncryption))
}
if initiateMultipartUploadOptions.XAmzWebsiteRedirectLocation != nil {
builder.AddHeader("x-amz-website-redirect-location", fmt.Sprint(*initiateMultipartUploadOptions.XAmzWebsiteRedirectLocation))
}
if initiateMultipartUploadOptions.XAmzServerSideEncryptionCustomerAlgorithm != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-algorithm", fmt.Sprint(*initiateMultipartUploadOptions.XAmzServerSideEncryptionCustomerAlgorithm))
}
if initiateMultipartUploadOptions.XAmzServerSideEncryptionCustomerKey != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key", fmt.Sprint(*initiateMultipartUploadOptions.XAmzServerSideEncryptionCustomerKey))
}
if initiateMultipartUploadOptions.XAmzServerSideEncryptionCustomerKeyMD5 != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key-MD5", fmt.Sprint(*initiateMultipartUploadOptions.XAmzServerSideEncryptionCustomerKeyMD5))
}
if initiateMultipartUploadOptions.XAmzTagging != nil {
builder.AddHeader("x-amz-tagging", fmt.Sprint(*initiateMultipartUploadOptions.XAmzTagging))
}
if initiateMultipartUploadOptions.XAmzAcl != nil {
builder.AddHeader("x-amz-acl", fmt.Sprint(*initiateMultipartUploadOptions.XAmzAcl))
}
builder.AddQuery("uploads", fmt.Sprint(*initiateMultipartUploadOptions.Uploads))
_, err = builder.SetBodyContent("text/xml", nil, nil, initiateMultipartUploadOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCreateMultipartUploadOutput)
if err != nil {
return
}
response.Result = result
return
}
// CompleteMultipartUpload : Complete a multipart upload
// Completes a multipart upload by assembling previously uploaded parts.
//
// After successfully uploading all parts of an upload, you call this operation to complete the upload. Upon receiving
// this request, IBM COS concatenates all the parts in ascending order by part number to create a new object. In the
// Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete.
// This operation concatenates the parts that you provide in the list. For each part in the list, you must provide the
// part number and the `ETag` value, returned after that part was uploaded.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) CompleteMultipartUpload(completeMultipartUploadOptions *CompleteMultipartUploadOptions) (result *CompleteMultipartUploadOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.CompleteMultipartUploadWithContext(context.Background(), completeMultipartUploadOptions)
}
// CompleteMultipartUploadWithContext is an alternate form of the CompleteMultipartUpload method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) CompleteMultipartUploadWithContext(ctx context.Context, completeMultipartUploadOptions *CompleteMultipartUploadOptions) (result *CompleteMultipartUploadOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(completeMultipartUploadOptions, "completeMultipartUploadOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(completeMultipartUploadOptions, "completeMultipartUploadOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *completeMultipartUploadOptions.Bucket,
"Key": *completeMultipartUploadOptions.Key,
}
builder := core.NewRequestBuilder(core.POST)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}?uploadId={uploadId}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range completeMultipartUploadOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "CompleteMultipartUpload")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddHeader("Content-Type", "text/xml")
builder.AddQuery("uploadId", fmt.Sprint(*completeMultipartUploadOptions.UploadID))
_, err = builder.SetBodyContent("text/xml", nil, nil, completeMultipartUploadOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalCompleteMultipartUploadOutput)
if err != nil {
return
}
response.Result = result
return
}
// ListParts : List parts of a multipart upload
// Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID,
// which you obtain by sending the initiate multipart upload request. This request by default returns a maximum of 1,000
// uploaded parts. You can restrict the number of parts returned by specifying the `max-parts` request parameter. If
// your multipart upload consists of more than 1,000 parts, the response returns an `IsTruncated` field with the value
// of true, and a `NextPartNumberMarker` element. In subsequent `ListParts` requests you can include the
// `part-number-marker` query string parameter and set its value to the `NextPartNumberMarker` field value from the
// previous response.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) ListParts(listPartsOptions *ListPartsOptions) (result *ListPartsOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.ListPartsWithContext(context.Background(), listPartsOptions)
}
// ListPartsWithContext is an alternate form of the ListParts method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) ListPartsWithContext(ctx context.Context, listPartsOptions *ListPartsOptions) (result *ListPartsOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(listPartsOptions, "listPartsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(listPartsOptions, "listPartsOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *listPartsOptions.Bucket,
"Key": *listPartsOptions.Key,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}?uploadId={uploadId}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range listPartsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "ListParts")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddQuery("uploadId", fmt.Sprint(*listPartsOptions.UploadID))
if listPartsOptions.MaxParts != nil {
builder.AddQuery("max-parts", fmt.Sprint(*listPartsOptions.MaxParts))
}
if listPartsOptions.PartNumberMarker != nil {
builder.AddQuery("part-number-marker", fmt.Sprint(*listPartsOptions.PartNumberMarker))
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListPartsOutput)
if err != nil {
return
}
response.Result = result
return
}
// AbortMultipartUpload : Abort a multipart upload
// Stops a current multipart upload and removes any parts of an incomplete upload, which would otherwise incur storage
// costs.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) AbortMultipartUpload(abortMultipartUploadOptions *AbortMultipartUploadOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.AbortMultipartUploadWithContext(context.Background(), abortMultipartUploadOptions)
}
// AbortMultipartUploadWithContext is an alternate form of the AbortMultipartUpload method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) AbortMultipartUploadWithContext(ctx context.Context, abortMultipartUploadOptions *AbortMultipartUploadOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(abortMultipartUploadOptions, "abortMultipartUploadOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(abortMultipartUploadOptions, "abortMultipartUploadOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *abortMultipartUploadOptions.Bucket,
"Key": *abortMultipartUploadOptions.Key,
}
builder := core.NewRequestBuilder(core.DELETE)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}?uploadId={uploadId}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range abortMultipartUploadOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "AbortMultipartUpload")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddQuery("uploadId", fmt.Sprint(*abortMultipartUploadOptions.UploadID))
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// ListMultipartUploads : List active multipart uploads
// This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has
// been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted. This operation
// returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a
// response can include, which is also the default value. You can further limit the number of uploads in a response by
// specifying the `max-uploads` parameter in the response. If additional multipart uploads satisfy the list criteria,
// the response will contain an `IsTruncated` element with the value true. To list the additional multipart uploads, use
// the `key-marker` and `upload-id-marker` request parameters. In the response, the uploads are sorted by key. If your
// application has initiated more than one multipart upload using the same object key, then uploads in the response are
// first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation
// time.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) ListMultipartUploads(listMultipartUploadsOptions *ListMultipartUploadsOptions) (result *ListMultipartUploadsOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.ListMultipartUploadsWithContext(context.Background(), listMultipartUploadsOptions)
}
// ListMultipartUploadsWithContext is an alternate form of the ListMultipartUploads method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) ListMultipartUploadsWithContext(ctx context.Context, listMultipartUploadsOptions *ListMultipartUploadsOptions) (result *ListMultipartUploadsOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(listMultipartUploadsOptions, "listMultipartUploadsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(listMultipartUploadsOptions, "listMultipartUploadsOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *listMultipartUploadsOptions.Bucket,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?uploads`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range listMultipartUploadsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "ListMultipartUploads")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddQuery("uploads", fmt.Sprint(*listMultipartUploadsOptions.Uploads))
if listMultipartUploadsOptions.Delimiter != nil {
builder.AddQuery("delimiter", fmt.Sprint(*listMultipartUploadsOptions.Delimiter))
}
if listMultipartUploadsOptions.EncodingType != nil {
builder.AddQuery("encoding-type", fmt.Sprint(*listMultipartUploadsOptions.EncodingType))
}
if listMultipartUploadsOptions.KeyMarker != nil {
builder.AddQuery("key-marker", fmt.Sprint(*listMultipartUploadsOptions.KeyMarker))
}
if listMultipartUploadsOptions.MaxUploads != nil {
builder.AddQuery("max-uploads", fmt.Sprint(*listMultipartUploadsOptions.MaxUploads))
}
if listMultipartUploadsOptions.Prefix != nil {
builder.AddQuery("prefix", fmt.Sprint(*listMultipartUploadsOptions.Prefix))
}
if listMultipartUploadsOptions.UploadIdMarker != nil {
builder.AddQuery("upload-id-marker", fmt.Sprint(*listMultipartUploadsOptions.UploadIdMarker))
}
if listMultipartUploadsOptions.PaginationLimit != nil {
builder.AddQuery("PaginationLimit", fmt.Sprint(*listMultipartUploadsOptions.PaginationLimit))
}
if listMultipartUploadsOptions.PaginationToken != nil {
builder.AddQuery("PaginationToken", fmt.Sprint(*listMultipartUploadsOptions.PaginationToken))
}
if listMultipartUploadsOptions.PaginationToken != nil {
builder.AddQuery("PaginationToken", fmt.Sprint(*listMultipartUploadsOptions.PaginationToken))
}
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalListMultipartUploadsOutput)
if err != nil {
return
}
response.Result = result
return
}
// UploadPart : Upload a part of an object
// Uploads a part in a multipart upload. In this operation, you provide part data in your request. However, you have an
// option to specify your existing IBM COS object as a data source for the part you are uploading. To upload a part from
// an existing object, you use the `UploadPartCopy` operation. You must initiate a multipart upload (see
// `CreateMultipartUpload`) before you can upload any part. In response to your initiate request, IBM COS returns an
// upload ID (a unique identifier) that you must include in your upload part requests.Part numbers can be any number
// from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object
// being created. If you upload a new part using the same part number that was used with a previous part, the previously
// uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit
// on the last part of your multipart upload.
//
// To ensure that data is not corrupted when traversing the network, specify the `Content-MD5` header in the upload part
// request. IBM COS checks the part data against the provided MD5 value. If they do not match, IBM COS returns an error.
// If the upload request uses HMAC authentication (AWS Signature Version 4), then IBM COS uses the
// `x-amz-content-sha256` header as a checksum instead of `Content-MD5`.
//
// After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload
// in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart
// upload, IBM COS frees up the parts storage and stops charging you for the parts storage.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) UploadPart(uploadPartOptions *UploadPartOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.UploadPartWithContext(context.Background(), uploadPartOptions)
}
// UploadPartWithContext is an alternate form of the UploadPart method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) UploadPartWithContext(ctx context.Context, uploadPartOptions *UploadPartOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(uploadPartOptions, "uploadPartOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(uploadPartOptions, "uploadPartOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *uploadPartOptions.Bucket,
"Key": *uploadPartOptions.Key,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}?partNumber={partNumber}&uploadId={uploadId}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range uploadPartOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "UploadPart")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddHeader("Content-Type", "text/xml")
if uploadPartOptions.ContentLength != nil {
builder.AddHeader("Content-Length", fmt.Sprint(*uploadPartOptions.ContentLength))
}
if uploadPartOptions.ContentMD5 != nil {
builder.AddHeader("Content-MD5", fmt.Sprint(*uploadPartOptions.ContentMD5))
}
if uploadPartOptions.XAmzServerSideEncryptionCustomerAlgorithm != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-algorithm", fmt.Sprint(*uploadPartOptions.XAmzServerSideEncryptionCustomerAlgorithm))
}
if uploadPartOptions.XAmzServerSideEncryptionCustomerKey != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key", fmt.Sprint(*uploadPartOptions.XAmzServerSideEncryptionCustomerKey))
}
if uploadPartOptions.XAmzServerSideEncryptionCustomerKeyMD5 != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key-MD5", fmt.Sprint(*uploadPartOptions.XAmzServerSideEncryptionCustomerKeyMD5))
}
if uploadPartOptions.XAmzRequestPayer != nil {
builder.AddHeader("x-amz-request-payer", fmt.Sprint(*uploadPartOptions.XAmzRequestPayer))
}
if uploadPartOptions.XAmzExpectedBucketOwner != nil {
builder.AddHeader("x-amz-expected-bucket-owner", fmt.Sprint(*uploadPartOptions.XAmzExpectedBucketOwner))
}
builder.AddQuery("partNumber", fmt.Sprint(*uploadPartOptions.PartNumber))
builder.AddQuery("uploadId", fmt.Sprint(*uploadPartOptions.UploadID))
_, err = builder.SetBodyContent("text/xml", nil, nil, uploadPartOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// UploadPartCopy : Use an existing object as a part of a new object
// Uploads a part by copying data from an existing object as data source. You specify the data source by adding the
// request header `x-amz-copy-source` in your request and a byte range by adding the request header
// `x-amz-copy-source-range` in your request. The minimum allowable part size for a multipart upload is 5 MB. Instead of
// using an existing object as part data, you might use the `UploadPart`operation and provide data in your request.
//
// You must initiate a multipart upload before you can upload any part. In response to your initiate request. IBM COS
// returns a unique identifier, the upload ID, that you must include in your upload part request.
//
// Note the following additional considerations about the request headers `x-amz-copy-source-if-match`,
// `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, and
// `x-amz-copy-source-if-modified-since`:
// * If both of the `x-amz-copy-source-if-match` and `x-amz-copy-source-if-unmodified-since` headers are present in the
// request as follows: `x-amz-copy-source-if-match` condition evaluates to `true`, and
// `x-amz-copy-source-if-unmodified-since` condition evaluates to `false`, then IBM COS returns `200 OK` and copies the
// data.
// * If both of the `x-amz-copy-source-if-none-match` and `x-amz-copy-source-if-modified-since` headers are present in
// the request as follows: `x-amz-copy-source-if-none-match` condition evaluates to `false`, and
// `x-amz-copy-source-if-modified-since` condition evaluates to `true` IBM COS returns `412 Precondition Failed`
// response code.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) UploadPartCopy(uploadPartCopyOptions *UploadPartCopyOptions) (result *UploadPartCopyOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.UploadPartCopyWithContext(context.Background(), uploadPartCopyOptions)
}
// UploadPartCopyWithContext is an alternate form of the UploadPartCopy method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) UploadPartCopyWithContext(ctx context.Context, uploadPartCopyOptions *UploadPartCopyOptions) (result *UploadPartCopyOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(uploadPartCopyOptions, "uploadPartCopyOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(uploadPartCopyOptions, "uploadPartCopyOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *uploadPartCopyOptions.Bucket,
"Key": *uploadPartCopyOptions.Key,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{TargetKey}?partNumber={partNumber}&uploadId={uploadId}`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range uploadPartCopyOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "UploadPartCopy")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
if uploadPartCopyOptions.XAmzCopySource != nil {
builder.AddHeader("x-amz-copy-source", fmt.Sprint(*uploadPartCopyOptions.XAmzCopySource))
}
if uploadPartCopyOptions.XAmzCopySourceIfMatch != nil {
builder.AddHeader("x-amz-copy-source-if-match", fmt.Sprint(*uploadPartCopyOptions.XAmzCopySourceIfMatch))
}
if uploadPartCopyOptions.XAmzCopySourceIfModifiedSince != nil {
builder.AddHeader("x-amz-copy-source-if-modified-since", fmt.Sprint(*uploadPartCopyOptions.XAmzCopySourceIfModifiedSince))
}
if uploadPartCopyOptions.XAmzCopySourceIfNoneMatch != nil {
builder.AddHeader("x-amz-copy-source-if-none-match", fmt.Sprint(*uploadPartCopyOptions.XAmzCopySourceIfNoneMatch))
}
if uploadPartCopyOptions.XAmzCopySourceIfUnmodifiedSince != nil {
builder.AddHeader("x-amz-copy-source-if-unmodified-since", fmt.Sprint(*uploadPartCopyOptions.XAmzCopySourceIfUnmodifiedSince))
}
if uploadPartCopyOptions.XAmzCopySourceRange != nil {
builder.AddHeader("x-amz-copy-source-range", fmt.Sprint(*uploadPartCopyOptions.XAmzCopySourceRange))
}
if uploadPartCopyOptions.XAmzServerSideEncryptionCustomerAlgorithm != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-algorithm", fmt.Sprint(*uploadPartCopyOptions.XAmzServerSideEncryptionCustomerAlgorithm))
}
if uploadPartCopyOptions.XAmzServerSideEncryptionCustomerKey != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key", fmt.Sprint(*uploadPartCopyOptions.XAmzServerSideEncryptionCustomerKey))
}
if uploadPartCopyOptions.XAmzServerSideEncryptionCustomerKeyMD5 != nil {
builder.AddHeader("x-amz-server-side-encryption-customer-key-MD5", fmt.Sprint(*uploadPartCopyOptions.XAmzServerSideEncryptionCustomerKeyMD5))
}
if uploadPartCopyOptions.XAmzCopySourceServerSideEncryptionCustomerAlgorithm != nil {
builder.AddHeader("x-amz-copy-source-server-side-encryption-customer-algorithm", fmt.Sprint(*uploadPartCopyOptions.XAmzCopySourceServerSideEncryptionCustomerAlgorithm))
}
if uploadPartCopyOptions.XAmzCopySourceServerSideEncryptionCustomerKey != nil {
builder.AddHeader("x-amz-copy-source-server-side-encryption-customer-key", fmt.Sprint(*uploadPartCopyOptions.XAmzCopySourceServerSideEncryptionCustomerKey))
}
if uploadPartCopyOptions.XAmzCopySourceServerSideEncryptionCustomerKeyMD5 != nil {
builder.AddHeader("x-amz-copy-source-server-side-encryption-customer-key-MD5", fmt.Sprint(*uploadPartCopyOptions.XAmzCopySourceServerSideEncryptionCustomerKeyMD5))
}
builder.AddQuery("partNumber", fmt.Sprint(*uploadPartCopyOptions.PartNumber))
builder.AddQuery("uploadId", fmt.Sprint(*uploadPartCopyOptions.UploadID))
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalUploadPartCopyOutput)
if err != nil {
return
}
response.Result = result
return
}
// PutPublicAccessBlock : Create a public ACL block configuration
// Creates or modifies the `PublicAccessBlock` configuration for an IBM COS bucket.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutPublicAccessBlock(putPublicAccessBlockOptions *PutPublicAccessBlockOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.PutPublicAccessBlockWithContext(context.Background(), putPublicAccessBlockOptions)
}
// PutPublicAccessBlockWithContext is an alternate form of the PutPublicAccessBlock method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutPublicAccessBlockWithContext(ctx context.Context, putPublicAccessBlockOptions *PutPublicAccessBlockOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(putPublicAccessBlockOptions, "putPublicAccessBlockOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(putPublicAccessBlockOptions, "putPublicAccessBlockOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *putPublicAccessBlockOptions.Bucket,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?publicAccessBlock`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range putPublicAccessBlockOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "PutPublicAccessBlock")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Content-Type", "text/xml")
if putPublicAccessBlockOptions.ContentMD5 != nil {
builder.AddHeader("Content-MD5", fmt.Sprint(*putPublicAccessBlockOptions.ContentMD5))
}
builder.AddQuery("publicAccessBlock", fmt.Sprint(*putPublicAccessBlockOptions.PublicAccessBlock))
_, err = builder.SetBodyContent("text/xml", nil, nil, putPublicAccessBlockOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// GetPublicAccessBlock : Read a public ACL block configuration
// Retrieves the `PublicAccessBlock` configuration for an IBM COS bucket.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetPublicAccessBlock(getPublicAccessBlockOptions *GetPublicAccessBlockOptions) (result *GetPublicAccessBlockOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.GetPublicAccessBlockWithContext(context.Background(), getPublicAccessBlockOptions)
}
// GetPublicAccessBlockWithContext is an alternate form of the GetPublicAccessBlock method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetPublicAccessBlockWithContext(ctx context.Context, getPublicAccessBlockOptions *GetPublicAccessBlockOptions) (result *GetPublicAccessBlockOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getPublicAccessBlockOptions, "getPublicAccessBlockOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getPublicAccessBlockOptions, "getPublicAccessBlockOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *getPublicAccessBlockOptions.Bucket,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?publicAccessBlock`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getPublicAccessBlockOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "GetPublicAccessBlock")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddQuery("publicAccessBlock", fmt.Sprint(*getPublicAccessBlockOptions.PublicAccessBlock))
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetPublicAccessBlockOutput)
if err != nil {
return
}
response.Result = result
return
}
// DeletePublicAccessBlock : Delete a public ACL block configuration
// Removes the `PublicAccessBlock` configuration for an IBM COS bucket.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeletePublicAccessBlock(deletePublicAccessBlockOptions *DeletePublicAccessBlockOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.DeletePublicAccessBlockWithContext(context.Background(), deletePublicAccessBlockOptions)
}
// DeletePublicAccessBlockWithContext is an alternate form of the DeletePublicAccessBlock method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeletePublicAccessBlockWithContext(ctx context.Context, deletePublicAccessBlockOptions *DeletePublicAccessBlockOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(deletePublicAccessBlockOptions, "deletePublicAccessBlockOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(deletePublicAccessBlockOptions, "deletePublicAccessBlockOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *deletePublicAccessBlockOptions.Bucket,
}
builder := core.NewRequestBuilder(core.DELETE)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?publicAccessBlock`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range deletePublicAccessBlockOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "DeletePublicAccessBlock")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddQuery("publicAccessBlock", fmt.Sprint(*deletePublicAccessBlockOptions.PublicAccessBlock))
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// GetBucketAcl : Read a bucket ACL
// This implementation of the `GET` operation uses the `acl` subresource to return the access control list (ACL) of a
// bucket.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetBucketAcl(getBucketAclOptions *GetBucketAclOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.GetBucketAclWithContext(context.Background(), getBucketAclOptions)
}
// GetBucketAclWithContext is an alternate form of the GetBucketAcl method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetBucketAclWithContext(ctx context.Context, getBucketAclOptions *GetBucketAclOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getBucketAclOptions, "getBucketAclOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getBucketAclOptions, "getBucketAclOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *getBucketAclOptions.Bucket,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?acl`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getBucketAclOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "GetBucketAcl")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddQuery("acl", fmt.Sprint(*getBucketAclOptions.Acl))
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// PutBucketAcl : Create a bucket ACL
// This operation should not be used. Instead, use IAM policies to grant public access.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutBucketAcl(putBucketAclOptions *PutBucketAclOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.PutBucketAclWithContext(context.Background(), putBucketAclOptions)
}
// PutBucketAclWithContext is an alternate form of the PutBucketAcl method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutBucketAclWithContext(ctx context.Context, putBucketAclOptions *PutBucketAclOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(putBucketAclOptions, "putBucketAclOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(putBucketAclOptions, "putBucketAclOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *putBucketAclOptions.Bucket,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?acl`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range putBucketAclOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "PutBucketAcl")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
if putBucketAclOptions.XAmzAcl != nil {
builder.AddHeader("x-amz-acl", fmt.Sprint(*putBucketAclOptions.XAmzAcl))
}
builder.AddQuery("acl", fmt.Sprint(*putBucketAclOptions.Acl))
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// GetObjectAcl : Read an object ACL
// This operation should not be used. Instead, use IAM policies to grant public access.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetObjectAcl(getObjectAclOptions *GetObjectAclOptions) (result *GetObjectAclOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.GetObjectAclWithContext(context.Background(), getObjectAclOptions)
}
// GetObjectAclWithContext is an alternate form of the GetObjectAcl method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetObjectAclWithContext(ctx context.Context, getObjectAclOptions *GetObjectAclOptions) (result *GetObjectAclOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getObjectAclOptions, "getObjectAclOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getObjectAclOptions, "getObjectAclOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *getObjectAclOptions.Bucket,
"Key": *getObjectAclOptions.Key,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}?acl`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getObjectAclOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "GetObjectAcl")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddQuery("acl", fmt.Sprint(*getObjectAclOptions.Acl))
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetObjectAclOutput)
if err != nil {
return
}
response.Result = result
return
}
// PutObjectAcl : Make an object publicly accessible
// This operation should not be used. Instead, use IAM policies to grant public access. This operation can be used to
// make a single object in a bucket publicly accessible, but it is discouraged. Instead, create a new bucket with a
// Public Access policy and copy the object to that bucket.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutObjectAcl(putObjectAclOptions *PutObjectAclOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.PutObjectAclWithContext(context.Background(), putObjectAclOptions)
}
// PutObjectAclWithContext is an alternate form of the PutObjectAcl method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutObjectAclWithContext(ctx context.Context, putObjectAclOptions *PutObjectAclOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(putObjectAclOptions, "putObjectAclOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(putObjectAclOptions, "putObjectAclOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *putObjectAclOptions.Bucket,
"Key": *putObjectAclOptions.Key,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}?acl`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range putObjectAclOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "PutObjectAcl")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddHeader("Content-Type", "text/xml")
if putObjectAclOptions.XAmzAcl != nil {
builder.AddHeader("x-amz-acl", fmt.Sprint(*putObjectAclOptions.XAmzAcl))
}
if putObjectAclOptions.ContentMD5 != nil {
builder.AddHeader("Content-MD5", fmt.Sprint(*putObjectAclOptions.ContentMD5))
}
if putObjectAclOptions.XAmzGrantFullControl != nil {
builder.AddHeader("x-amz-grant-full-control", fmt.Sprint(*putObjectAclOptions.XAmzGrantFullControl))
}
if putObjectAclOptions.XAmzGrantRead != nil {
builder.AddHeader("x-amz-grant-read", fmt.Sprint(*putObjectAclOptions.XAmzGrantRead))
}
if putObjectAclOptions.XAmzGrantReadAcp != nil {
builder.AddHeader("x-amz-grant-read-acp", fmt.Sprint(*putObjectAclOptions.XAmzGrantReadAcp))
}
if putObjectAclOptions.XAmzGrantWrite != nil {
builder.AddHeader("x-amz-grant-write", fmt.Sprint(*putObjectAclOptions.XAmzGrantWrite))
}
if putObjectAclOptions.XAmzGrantWriteAcp != nil {
builder.AddHeader("x-amz-grant-write-acp", fmt.Sprint(*putObjectAclOptions.XAmzGrantWriteAcp))
}
builder.AddQuery("acl", fmt.Sprint(*putObjectAclOptions.Acl))
_, err = builder.SetBodyContent("text/xml", nil, nil, putObjectAclOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// PutObjectTagging : Add a set of tags to an object
// Sets tags on an object that already exists. A tag is a key-value pair.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutObjectTagging(putObjectTaggingOptions *PutObjectTaggingOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.PutObjectTaggingWithContext(context.Background(), putObjectTaggingOptions)
}
// PutObjectTaggingWithContext is an alternate form of the PutObjectTagging method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutObjectTaggingWithContext(ctx context.Context, putObjectTaggingOptions *PutObjectTaggingOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(putObjectTaggingOptions, "putObjectTaggingOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(putObjectTaggingOptions, "putObjectTaggingOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *putObjectTaggingOptions.Bucket,
"Key": *putObjectTaggingOptions.Key,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}?tagging`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range putObjectTaggingOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "PutObjectTagging")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddHeader("Content-Type", "text/xml")
if putObjectTaggingOptions.ContentMD5 != nil {
builder.AddHeader("Content-MD5", fmt.Sprint(*putObjectTaggingOptions.ContentMD5))
}
builder.AddQuery("tagging", fmt.Sprint(*putObjectTaggingOptions.Tagging))
_, err = builder.SetBodyContent("text/xml", nil, nil, putObjectTaggingOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// GetObjectTagging : Read a set of object tags
// Returns the tags of an object.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetObjectTagging(getObjectTaggingOptions *GetObjectTaggingOptions) (result *GetObjectTaggingOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.GetObjectTaggingWithContext(context.Background(), getObjectTaggingOptions)
}
// GetObjectTaggingWithContext is an alternate form of the GetObjectTagging method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetObjectTaggingWithContext(ctx context.Context, getObjectTaggingOptions *GetObjectTaggingOptions) (result *GetObjectTaggingOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getObjectTaggingOptions, "getObjectTaggingOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getObjectTaggingOptions, "getObjectTaggingOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *getObjectTaggingOptions.Bucket,
"Key": *getObjectTaggingOptions.Key,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}?tagging`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getObjectTaggingOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "GetObjectTagging")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddQuery("tagging", fmt.Sprint(*getObjectTaggingOptions.Tagging))
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetObjectTaggingOutput)
if err != nil {
return
}
response.Result = result
return
}
// DeleteObjectTagging : Delete a set of object tags
// Removes the entire tag set from the specified object.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteObjectTagging(deleteObjectTaggingOptions *DeleteObjectTaggingOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.DeleteObjectTaggingWithContext(context.Background(), deleteObjectTaggingOptions)
}
// DeleteObjectTaggingWithContext is an alternate form of the DeleteObjectTagging method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteObjectTaggingWithContext(ctx context.Context, deleteObjectTaggingOptions *DeleteObjectTaggingOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(deleteObjectTaggingOptions, "deleteObjectTaggingOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(deleteObjectTaggingOptions, "deleteObjectTaggingOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *deleteObjectTaggingOptions.Bucket,
"Key": *deleteObjectTaggingOptions.Key,
}
builder := core.NewRequestBuilder(core.DELETE)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}/{Key}?tagging`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range deleteObjectTaggingOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "DeleteObjectTagging")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddQuery("tagging", fmt.Sprint(*deleteObjectTaggingOptions.Tagging))
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// PutBucketWebsite : Create a website configuration
// Sets the configuration of the website that is specified in the `website` subresource. To configure a bucket as a
// website, you can add this subresource on the bucket with website configuration information such as the file name of
// the index document and any redirect rules.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutBucketWebsite(putBucketWebsiteOptions *PutBucketWebsiteOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.PutBucketWebsiteWithContext(context.Background(), putBucketWebsiteOptions)
}
// PutBucketWebsiteWithContext is an alternate form of the PutBucketWebsite method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutBucketWebsiteWithContext(ctx context.Context, putBucketWebsiteOptions *PutBucketWebsiteOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(putBucketWebsiteOptions, "putBucketWebsiteOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(putBucketWebsiteOptions, "putBucketWebsiteOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *putBucketWebsiteOptions.Bucket,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?website`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range putBucketWebsiteOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "PutBucketWebsite")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Content-Type", "text/xml")
if putBucketWebsiteOptions.ContentMD5 != nil {
builder.AddHeader("Content-MD5", fmt.Sprint(*putBucketWebsiteOptions.ContentMD5))
}
builder.AddQuery("website", fmt.Sprint(*putBucketWebsiteOptions.Website))
_, err = builder.SetBodyContent("text/xml", nil, nil, putBucketWebsiteOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// GetBucketWebsite : Read a website configuration
// Returns the website configuration for a bucket.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetBucketWebsite(getBucketWebsiteOptions *GetBucketWebsiteOptions) (result *GetBucketWebsiteOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.GetBucketWebsiteWithContext(context.Background(), getBucketWebsiteOptions)
}
// GetBucketWebsiteWithContext is an alternate form of the GetBucketWebsite method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetBucketWebsiteWithContext(ctx context.Context, getBucketWebsiteOptions *GetBucketWebsiteOptions) (result *GetBucketWebsiteOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getBucketWebsiteOptions, "getBucketWebsiteOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getBucketWebsiteOptions, "getBucketWebsiteOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *getBucketWebsiteOptions.Bucket,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?website`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getBucketWebsiteOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "GetBucketWebsite")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddQuery("website", fmt.Sprint(*getBucketWebsiteOptions.Website))
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetBucketWebsiteOutput)
if err != nil {
return
}
response.Result = result
return
}
// DeleteBucketWebsite : Delete a website configuration
// This operation removes the website configuration for a bucket. IBM COS returns a `200 OK` response upon successfully
// deleting a website configuration on the specified bucket. You will get a `200 OK` response if the website
// configuration you are trying to delete does not exist on the bucket. IBM COS returns a `404` response if the bucket
// specified in the request does not exist.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteBucketWebsite(deleteBucketWebsiteOptions *DeleteBucketWebsiteOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.DeleteBucketWebsiteWithContext(context.Background(), deleteBucketWebsiteOptions)
}
// DeleteBucketWebsiteWithContext is an alternate form of the DeleteBucketWebsite method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteBucketWebsiteWithContext(ctx context.Context, deleteBucketWebsiteOptions *DeleteBucketWebsiteOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(deleteBucketWebsiteOptions, "deleteBucketWebsiteOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(deleteBucketWebsiteOptions, "deleteBucketWebsiteOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *deleteBucketWebsiteOptions.Bucket,
}
builder := core.NewRequestBuilder(core.DELETE)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?website`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range deleteBucketWebsiteOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "DeleteBucketWebsite")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddQuery("website", fmt.Sprint(*deleteBucketWebsiteOptions.Website))
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// PutBucketCors : Configure CORS
// Sets the CORS configuration for your bucket. If the configuration exists, it will be overwritten and replaced.
//
// You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might
// want to enable a request whose origin is `http://www.example.com` to access your bucket at `my.example.bucket.com` by
// using `XMLHttpRequest` in a browser.
//
// To enable cross-origin resource sharing (CORS) on a bucket, you create a XML configuration in which you configure
// rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64
// KB in size. When IBM COS receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it
// evaluates the CORS configuration on the bucket and uses the first `CORSRule` rule that matches the incoming browser
// request to enable a cross-origin request. For a rule to match, the following conditions must be met:
//
// * The request's `Origin` header must match `AllowedOrigin` elements.
// * The request method (for example, GET, PUT, HEAD, and so on) or the `Access-Control-Request-Method` header in case
// of a pre-flight `OPTIONS` request must be one of the `AllowedMethod` elements.
// * Every header specified in the `Access-Control-Request-Headers` request header of a pre-flight request must match an
// `AllowedHeader` element.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutBucketCors(putBucketCorsOptions *PutBucketCorsOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.PutBucketCorsWithContext(context.Background(), putBucketCorsOptions)
}
// PutBucketCorsWithContext is an alternate form of the PutBucketCors method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) PutBucketCorsWithContext(ctx context.Context, putBucketCorsOptions *PutBucketCorsOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(putBucketCorsOptions, "putBucketCorsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(putBucketCorsOptions, "putBucketCorsOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *putBucketCorsOptions.Bucket,
}
builder := core.NewRequestBuilder(core.PUT)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?cors`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range putBucketCorsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "PutBucketCors")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Content-Type", "text/xml")
if putBucketCorsOptions.ContentMD5 != nil {
builder.AddHeader("Content-MD5", fmt.Sprint(*putBucketCorsOptions.ContentMD5))
}
builder.AddQuery("cors", fmt.Sprint(*putBucketCorsOptions.Cors))
_, err = builder.SetBodyContent("text/xml", nil, nil, putBucketCorsOptions.Body)
if err != nil {
return
}
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// GetBucketCors : Read a CORS configuration
// Returns the CORS configuration information set for the bucket.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetBucketCors(getBucketCorsOptions *GetBucketCorsOptions) (result *GetBucketCorsOutput, response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.GetBucketCorsWithContext(context.Background(), getBucketCorsOptions)
}
// GetBucketCorsWithContext is an alternate form of the GetBucketCors method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) GetBucketCorsWithContext(ctx context.Context, getBucketCorsOptions *GetBucketCorsOptions) (result *GetBucketCorsOutput, response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(getBucketCorsOptions, "getBucketCorsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(getBucketCorsOptions, "getBucketCorsOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *getBucketCorsOptions.Bucket,
}
builder := core.NewRequestBuilder(core.GET)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?cors`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range getBucketCorsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "GetBucketCors")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddHeader("Accept", "text/xml")
builder.AddQuery("cors", fmt.Sprint(*getBucketCorsOptions.Cors))
request, err := builder.Build()
if err != nil {
return
}
var rawResponse map[string]json.RawMessage
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, &rawResponse)
if err != nil {
return
}
err = core.UnmarshalModel(rawResponse, "", &result, UnmarshalGetBucketCorsOutput)
if err != nil {
return
}
response.Result = result
return
}
// DeleteBucketCors : Delete a CORS configuration
// Deletes the CORS configuration information set for the bucket.
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteBucketCors(deleteBucketCorsOptions *DeleteBucketCorsOptions) (response *core.DetailedResponse, err error) {
return ibmCloudObjectStorageS3Api.DeleteBucketCorsWithContext(context.Background(), deleteBucketCorsOptions)
}
// DeleteBucketCorsWithContext is an alternate form of the DeleteBucketCors method which supports a Context parameter
func (ibmCloudObjectStorageS3Api *IbmCloudObjectStorageS3ApiV2) DeleteBucketCorsWithContext(ctx context.Context, deleteBucketCorsOptions *DeleteBucketCorsOptions) (response *core.DetailedResponse, err error) {
err = core.ValidateNotNil(deleteBucketCorsOptions, "deleteBucketCorsOptions cannot be nil")
if err != nil {
return
}
err = core.ValidateStruct(deleteBucketCorsOptions, "deleteBucketCorsOptions")
if err != nil {
return
}
pathParamsMap := map[string]string{
"Bucket": *deleteBucketCorsOptions.Bucket,
}
builder := core.NewRequestBuilder(core.DELETE)
builder = builder.WithContext(ctx)
builder.EnableGzipCompression = ibmCloudObjectStorageS3Api.GetEnableGzipCompression()
_, err = builder.ResolveRequestURL(ibmCloudObjectStorageS3Api.Service.Options.URL, `/{Bucket}?cors`, pathParamsMap)
if err != nil {
return
}
for headerName, headerValue := range deleteBucketCorsOptions.Headers {
builder.AddHeader(headerName, headerValue)
}
sdkHeaders := common.GetSdkHeaders("ibm_cloud_object_storage_s3_api", "V2", "DeleteBucketCors")
for headerName, headerValue := range sdkHeaders {
builder.AddHeader(headerName, headerValue)
}
builder.AddQuery("cors", fmt.Sprint(*deleteBucketCorsOptions.Cors))
request, err := builder.Build()
if err != nil {
return
}
response, err = ibmCloudObjectStorageS3Api.Service.Request(request, nil)
return
}
// AbortMultipartUploadOptions : The AbortMultipartUpload options.
type AbortMultipartUploadOptions struct {
// The destination bucket for the upload.
Bucket *string `validate:"required,ne="`
// Key of the object for which the multipart upload was initiated.
Key *string `validate:"required,ne="`
// Upload ID that identifies the multipart upload.
UploadID *string `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewAbortMultipartUploadOptions : Instantiate AbortMultipartUploadOptions
func (*IbmCloudObjectStorageS3ApiV2) NewAbortMultipartUploadOptions(bucket string, key string, uploadID string) *AbortMultipartUploadOptions {
return &AbortMultipartUploadOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
UploadID: core.StringPtr(uploadID),
}
}
// SetBucket : Allow user to set Bucket
func (options *AbortMultipartUploadOptions) SetBucket(bucket string) *AbortMultipartUploadOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *AbortMultipartUploadOptions) SetKey(key string) *AbortMultipartUploadOptions {
options.Key = core.StringPtr(key)
return options
}
// SetUploadID : Allow user to set UploadID
func (options *AbortMultipartUploadOptions) SetUploadID(uploadID string) *AbortMultipartUploadOptions {
options.UploadID = core.StringPtr(uploadID)
return options
}
// SetHeaders : Allow user to set Headers
func (options *AbortMultipartUploadOptions) SetHeaders(param map[string]string) *AbortMultipartUploadOptions {
options.Headers = param
return options
}
// BucketListing : This operation returns a list of all buckets within a service instance.
type BucketListing struct {
Owner *BucketListingOwner `json:"owner,omitempty"`
Buckets []BucketListingBucketsItem `json:"buckets,omitempty"`
}
// UnmarshalBucketListing unmarshals an instance of BucketListing from the specified map of raw messages.
func UnmarshalBucketListing(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(BucketListing)
err = core.UnmarshalModel(m, "owner", &obj.Owner, UnmarshalBucketListingOwner)
if err != nil {
return
}
err = core.UnmarshalModel(m, "buckets", &obj.Buckets, UnmarshalBucketListingBucketsItem)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// BucketListingBucketsItem : BucketListingBucketsItem struct
type BucketListingBucketsItem struct {
// Bucket name.
Name *string `json:"name,omitempty"`
// Timestamp of bucket creation.
CreationDate *string `json:"creationDate,omitempty"`
}
// UnmarshalBucketListingBucketsItem unmarshals an instance of BucketListingBucketsItem from the specified map of raw messages.
func UnmarshalBucketListingBucketsItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(BucketListingBucketsItem)
err = core.UnmarshalPrimitive(m, "name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "creationDate", &obj.CreationDate)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// BucketListingOwner : BucketListingOwner struct
type BucketListingOwner struct {
// Service instance ID.
ID *string `json:"id,omitempty"`
// Service instance ID.
DisplayName *string `json:"displayName,omitempty"`
}
// UnmarshalBucketListingOwner unmarshals an instance of BucketListingOwner from the specified map of raw messages.
func UnmarshalBucketListingOwner(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(BucketListingOwner)
err = core.UnmarshalPrimitive(m, "id", &obj.ID)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "displayName", &obj.DisplayName)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CORSRule : Specifies a cross-origin access rule for an IBM COS bucket.
type CORSRule struct {
// Headers that are specified in the `Access-Control-Request-Headers` header. These headers are allowed in a preflight
// OPTIONS request. In response to any preflight OPTIONS request, IBM COS returns any requested headers that are
// allowed.
AllowedHeaders *CORSRuleAllowedHeaders `json:"AllowedHeaders,omitempty"`
// An HTTP method that you allow the origin to execute. Valid values are `GET`, `PUT`, `HEAD`, `POST`, and `DELETE`.
AllowedMethods *CORSRuleAllowedMethods `json:"AllowedMethods" validate:"required"`
// One or more origins you want customers to be able to access the bucket from.
AllowedOrigins *CORSRuleAllowedOrigins `json:"AllowedOrigins" validate:"required"`
// One or more headers in the response that you want customers to be able to access from their applications (for
// example, from a JavaScript `XMLHttpRequest` object).
ExposeHeaders *CORSRuleExposeHeaders `json:"ExposeHeaders,omitempty"`
// The time in seconds that your browser is to cache the preflight response for the specified resource.
MaxAgeSeconds *int64 `json:"MaxAgeSeconds,omitempty"`
}
// UnmarshalCORSRule unmarshals an instance of CORSRule from the specified map of raw messages.
func UnmarshalCORSRule(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CORSRule)
err = core.UnmarshalModel(m, "AllowedHeaders", &obj.AllowedHeaders, UnmarshalCORSRuleAllowedHeaders)
if err != nil {
return
}
err = core.UnmarshalModel(m, "AllowedMethods", &obj.AllowedMethods, UnmarshalCORSRuleAllowedMethods)
if err != nil {
return
}
err = core.UnmarshalModel(m, "AllowedOrigins", &obj.AllowedOrigins, UnmarshalCORSRuleAllowedOrigins)
if err != nil {
return
}
err = core.UnmarshalModel(m, "ExposeHeaders", &obj.ExposeHeaders, UnmarshalCORSRuleExposeHeaders)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "MaxAgeSeconds", &obj.MaxAgeSeconds)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CommonPrefix : Container for all (if there are any) keys between Prefix and the next occurrence of the string specified by a
// delimiter. CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix. For example,
// if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/.
type CommonPrefix struct {
// Container for the specified common prefix.
Prefix *string `json:"Prefix,omitempty"`
}
// UnmarshalCommonPrefix unmarshals an instance of CommonPrefix from the specified map of raw messages.
func UnmarshalCommonPrefix(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CommonPrefix)
err = core.UnmarshalPrimitive(m, "Prefix", &obj.Prefix)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CompleteMultipartUploadOptions : The CompleteMultipartUpload options.
type CompleteMultipartUploadOptions struct {
// Name of the bucket to which the multipart upload was initiated.
Bucket *string `validate:"required,ne="`
// Object key for which the multipart upload was initiated.
Key *string `validate:"required,ne="`
// ID for the initiated multipart upload.
UploadID *string `validate:"required"`
Body *string `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewCompleteMultipartUploadOptions : Instantiate CompleteMultipartUploadOptions
func (*IbmCloudObjectStorageS3ApiV2) NewCompleteMultipartUploadOptions(bucket string, key string, uploadID string, body string) *CompleteMultipartUploadOptions {
return &CompleteMultipartUploadOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
UploadID: core.StringPtr(uploadID),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *CompleteMultipartUploadOptions) SetBucket(bucket string) *CompleteMultipartUploadOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *CompleteMultipartUploadOptions) SetKey(key string) *CompleteMultipartUploadOptions {
options.Key = core.StringPtr(key)
return options
}
// SetUploadID : Allow user to set UploadID
func (options *CompleteMultipartUploadOptions) SetUploadID(uploadID string) *CompleteMultipartUploadOptions {
options.UploadID = core.StringPtr(uploadID)
return options
}
// SetBody : Allow user to set Body
func (options *CompleteMultipartUploadOptions) SetBody(body string) *CompleteMultipartUploadOptions {
options.Body = core.StringPtr(body)
return options
}
// SetHeaders : Allow user to set Headers
func (options *CompleteMultipartUploadOptions) SetHeaders(param map[string]string) *CompleteMultipartUploadOptions {
options.Headers = param
return options
}
// CompleteMultipartUploadOutput : CompleteMultipartUploadOutput struct
type CompleteMultipartUploadOutput struct {
// The URI that identifies the newly created object.
Location *string `json:"Location,omitempty"`
// The name of the bucket that contains the newly created object.
Bucket *string `json:"Bucket,omitempty"`
// The object key of the newly created object.
Key *string `json:"Key,omitempty"`
// Entity tag that identifies the newly created object data. Objects with different object data will have different
// entity tags. The entity tag is an opaque string. The entity tag is an MD5 digest of the object data, unless it was
// uploaded using SSE-C (and is randomly generated).
ETag *string `json:"ETag,omitempty"`
}
// UnmarshalCompleteMultipartUploadOutput unmarshals an instance of CompleteMultipartUploadOutput from the specified map of raw messages.
func UnmarshalCompleteMultipartUploadOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CompleteMultipartUploadOutput)
err = core.UnmarshalPrimitive(m, "Location", &obj.Location)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Bucket", &obj.Bucket)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Key", &obj.Key)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "ETag", &obj.ETag)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Condition : A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If
// request is for pages in the `/docs` folder, redirect to the `/documents` folder. 2. If request results in HTTP error
// 4xx, redirect request to another host where you might process the error.
type Condition struct {
// The HTTP error code when the redirect is applied. In the event of an error, if the error code equals this value,
// then the specified redirect is applied. Required when parent element `Condition` is specified and sibling
// `KeyPrefixEquals` is not specified. If both are specified, then both must be true for the redirect to be applied.
HttpErrorCodeReturnedEquals *string `json:"HttpErrorCodeReturnedEquals,omitempty"`
// The object key name prefix when the redirect is applied. For example, to redirect requests for `ExamplePage.html`,
// the key prefix will be `ExamplePage.html`. To redirect request for all pages with the prefix `docs/`, the key prefix
// will be `/docs`, which identifies all objects in the `docs/` folder. Required when the parent element `Condition` is
// specified and sibling `HttpErrorCodeReturnedEquals` is not specified. If both conditions are specified, both must be
// true for the redirect to be applied.
KeyPrefixEquals *string `json:"KeyPrefixEquals,omitempty"`
}
// UnmarshalCondition unmarshals an instance of Condition from the specified map of raw messages.
func UnmarshalCondition(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Condition)
err = core.UnmarshalPrimitive(m, "HttpErrorCodeReturnedEquals", &obj.HttpErrorCodeReturnedEquals)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "KeyPrefixEquals", &obj.KeyPrefixEquals)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CopyObjectOptions : The CopyObject options.
type CopyObjectOptions struct {
// The name of the destination bucket.
Bucket *string `validate:"required,ne="`
// Specifies the source object for the copy operation.
XAmzCopySource *string `validate:"required"`
// The key of the destination object.
TargetKey *string `validate:"required,ne="`
Body *string `validate:"required"`
// The canned ACL to apply to the object.
XAmzAcl *string
// Specifies caching behavior along the request/reply chain.
CacheControl *string
// Specifies presentational information for the object.
ContentDisposition *string
// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied
// to obtain the media-type referenced by the Content-Type header field. For more information, see [RFC
// 2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11).
ContentEncoding *string
// The language the content is in.
ContentLanguage *string
// Copies the object if its entity tag (ETag) matches the specified tag.
XAmzCopySourceIfMatch *string
// Copies the object if it has been modified since the specified time.
XAmzCopySourceIfModifiedSince *strfmt.DateTime
// Copies the object if its entity tag (ETag) is different than the specified ETag.
XAmzCopySourceIfNoneMatch *string
// Copies the object if it hasn't been modified since the specified time.
XAmzCopySourceIfUnmodifiedSince *strfmt.DateTime
// The date and time at which the object is no longer cacheable. For more information, [RFC
// 2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21).
Expires *strfmt.DateTime
// Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request.
XAmzMetadataDirective *string
// Specifies whether the object tag-set are copied from the source object or replaced with tag-set provided in the
// request.
XAmzTaggingDirective *string
// The server-side encryption algorithm used when storing this object.
XAmzServerSideEncryption *string
// If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or
// to an external URL.
XAmzWebsiteRedirectLocation *string
// Specifies the algorithm to use to when encrypting the object (for example, AES256).
XAmzServerSideEncryptionCustomerAlgorithm *string
// Specifies the customer-provided encryption key for use in encrypting data.
XAmzServerSideEncryptionCustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Used as a message integrity check to
// ensure that the encryption key was transmitted without error.
XAmzServerSideEncryptionCustomerKeyMD5 *string
// Specifies the algorithm to use when decrypting the source object (for example, AES256).
XAmzCopySourceServerSideEncryptionCustomerAlgorithm *string
// Specifies the customer-provided encryption key for IBM COS to use to decrypt the source object. The encryption key
// provided in this header must be one that was used when the source object was created.
XAmzCopySourceServerSideEncryptionCustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Used as a message integrity check to
// ensure that the encryption key was transmitted without error.
XAmzCopySourceServerSideEncryptionCustomerKeyMD5 *string
// The tag-set for the object destination object this value must be used in conjunction with the `TaggingDirective`.
// The tag-set must be encoded as URL Query parameters.
XAmzTagging *string
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the CopyObjectOptions.XAmzAcl property.
// The canned ACL to apply to the object.
const (
CopyObjectOptions_XAmzAcl_Private = "private"
CopyObjectOptions_XAmzAcl_PublicRead = "public-read"
)
// Constants associated with the CopyObjectOptions.XAmzMetadataDirective property.
// Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request.
const (
CopyObjectOptions_XAmzMetadataDirective_Copy = "COPY"
CopyObjectOptions_XAmzMetadataDirective_Replace = "REPLACE"
)
// Constants associated with the CopyObjectOptions.XAmzTaggingDirective property.
// Specifies whether the object tag-set are copied from the source object or replaced with tag-set provided in the
// request.
const (
CopyObjectOptions_XAmzTaggingDirective_Copy = "COPY"
CopyObjectOptions_XAmzTaggingDirective_Replace = "REPLACE"
)
// Constants associated with the CopyObjectOptions.XAmzServerSideEncryption property.
// The server-side encryption algorithm used when storing this object.
const (
CopyObjectOptions_XAmzServerSideEncryption_Aes256 = "AES256"
)
// NewCopyObjectOptions : Instantiate CopyObjectOptions
func (*IbmCloudObjectStorageS3ApiV2) NewCopyObjectOptions(bucket string, xAmzCopySource string, targetKey string, body string) *CopyObjectOptions {
return &CopyObjectOptions{
Bucket: core.StringPtr(bucket),
XAmzCopySource: core.StringPtr(xAmzCopySource),
TargetKey: core.StringPtr(targetKey),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *CopyObjectOptions) SetBucket(bucket string) *CopyObjectOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetXAmzCopySource : Allow user to set XAmzCopySource
func (options *CopyObjectOptions) SetXAmzCopySource(xAmzCopySource string) *CopyObjectOptions {
options.XAmzCopySource = core.StringPtr(xAmzCopySource)
return options
}
// SetTargetKey : Allow user to set TargetKey
func (options *CopyObjectOptions) SetTargetKey(targetKey string) *CopyObjectOptions {
options.TargetKey = core.StringPtr(targetKey)
return options
}
// SetBody : Allow user to set Body
func (options *CopyObjectOptions) SetBody(body string) *CopyObjectOptions {
options.Body = core.StringPtr(body)
return options
}
// SetXAmzAcl : Allow user to set XAmzAcl
func (options *CopyObjectOptions) SetXAmzAcl(xAmzAcl string) *CopyObjectOptions {
options.XAmzAcl = core.StringPtr(xAmzAcl)
return options
}
// SetCacheControl : Allow user to set CacheControl
func (options *CopyObjectOptions) SetCacheControl(cacheControl string) *CopyObjectOptions {
options.CacheControl = core.StringPtr(cacheControl)
return options
}
// SetContentDisposition : Allow user to set ContentDisposition
func (options *CopyObjectOptions) SetContentDisposition(contentDisposition string) *CopyObjectOptions {
options.ContentDisposition = core.StringPtr(contentDisposition)
return options
}
// SetContentEncoding : Allow user to set ContentEncoding
func (options *CopyObjectOptions) SetContentEncoding(contentEncoding string) *CopyObjectOptions {
options.ContentEncoding = core.StringPtr(contentEncoding)
return options
}
// SetContentLanguage : Allow user to set ContentLanguage
func (options *CopyObjectOptions) SetContentLanguage(contentLanguage string) *CopyObjectOptions {
options.ContentLanguage = core.StringPtr(contentLanguage)
return options
}
// SetXAmzCopySourceIfMatch : Allow user to set XAmzCopySourceIfMatch
func (options *CopyObjectOptions) SetXAmzCopySourceIfMatch(xAmzCopySourceIfMatch string) *CopyObjectOptions {
options.XAmzCopySourceIfMatch = core.StringPtr(xAmzCopySourceIfMatch)
return options
}
// SetXAmzCopySourceIfModifiedSince : Allow user to set XAmzCopySourceIfModifiedSince
func (options *CopyObjectOptions) SetXAmzCopySourceIfModifiedSince(xAmzCopySourceIfModifiedSince *strfmt.DateTime) *CopyObjectOptions {
options.XAmzCopySourceIfModifiedSince = xAmzCopySourceIfModifiedSince
return options
}
// SetXAmzCopySourceIfNoneMatch : Allow user to set XAmzCopySourceIfNoneMatch
func (options *CopyObjectOptions) SetXAmzCopySourceIfNoneMatch(xAmzCopySourceIfNoneMatch string) *CopyObjectOptions {
options.XAmzCopySourceIfNoneMatch = core.StringPtr(xAmzCopySourceIfNoneMatch)
return options
}
// SetXAmzCopySourceIfUnmodifiedSince : Allow user to set XAmzCopySourceIfUnmodifiedSince
func (options *CopyObjectOptions) SetXAmzCopySourceIfUnmodifiedSince(xAmzCopySourceIfUnmodifiedSince *strfmt.DateTime) *CopyObjectOptions {
options.XAmzCopySourceIfUnmodifiedSince = xAmzCopySourceIfUnmodifiedSince
return options
}
// SetExpires : Allow user to set Expires
func (options *CopyObjectOptions) SetExpires(expires *strfmt.DateTime) *CopyObjectOptions {
options.Expires = expires
return options
}
// SetXAmzMetadataDirective : Allow user to set XAmzMetadataDirective
func (options *CopyObjectOptions) SetXAmzMetadataDirective(xAmzMetadataDirective string) *CopyObjectOptions {
options.XAmzMetadataDirective = core.StringPtr(xAmzMetadataDirective)
return options
}
// SetXAmzTaggingDirective : Allow user to set XAmzTaggingDirective
func (options *CopyObjectOptions) SetXAmzTaggingDirective(xAmzTaggingDirective string) *CopyObjectOptions {
options.XAmzTaggingDirective = core.StringPtr(xAmzTaggingDirective)
return options
}
// SetXAmzServerSideEncryption : Allow user to set XAmzServerSideEncryption
func (options *CopyObjectOptions) SetXAmzServerSideEncryption(xAmzServerSideEncryption string) *CopyObjectOptions {
options.XAmzServerSideEncryption = core.StringPtr(xAmzServerSideEncryption)
return options
}
// SetXAmzWebsiteRedirectLocation : Allow user to set XAmzWebsiteRedirectLocation
func (options *CopyObjectOptions) SetXAmzWebsiteRedirectLocation(xAmzWebsiteRedirectLocation string) *CopyObjectOptions {
options.XAmzWebsiteRedirectLocation = core.StringPtr(xAmzWebsiteRedirectLocation)
return options
}
// SetXAmzServerSideEncryptionCustomerAlgorithm : Allow user to set XAmzServerSideEncryptionCustomerAlgorithm
func (options *CopyObjectOptions) SetXAmzServerSideEncryptionCustomerAlgorithm(xAmzServerSideEncryptionCustomerAlgorithm string) *CopyObjectOptions {
options.XAmzServerSideEncryptionCustomerAlgorithm = core.StringPtr(xAmzServerSideEncryptionCustomerAlgorithm)
return options
}
// SetXAmzServerSideEncryptionCustomerKey : Allow user to set XAmzServerSideEncryptionCustomerKey
func (options *CopyObjectOptions) SetXAmzServerSideEncryptionCustomerKey(xAmzServerSideEncryptionCustomerKey string) *CopyObjectOptions {
options.XAmzServerSideEncryptionCustomerKey = core.StringPtr(xAmzServerSideEncryptionCustomerKey)
return options
}
// SetXAmzServerSideEncryptionCustomerKeyMD5 : Allow user to set XAmzServerSideEncryptionCustomerKeyMD5
func (options *CopyObjectOptions) SetXAmzServerSideEncryptionCustomerKeyMD5(xAmzServerSideEncryptionCustomerKeyMD5 string) *CopyObjectOptions {
options.XAmzServerSideEncryptionCustomerKeyMD5 = core.StringPtr(xAmzServerSideEncryptionCustomerKeyMD5)
return options
}
// SetXAmzCopySourceServerSideEncryptionCustomerAlgorithm : Allow user to set XAmzCopySourceServerSideEncryptionCustomerAlgorithm
func (options *CopyObjectOptions) SetXAmzCopySourceServerSideEncryptionCustomerAlgorithm(xAmzCopySourceServerSideEncryptionCustomerAlgorithm string) *CopyObjectOptions {
options.XAmzCopySourceServerSideEncryptionCustomerAlgorithm = core.StringPtr(xAmzCopySourceServerSideEncryptionCustomerAlgorithm)
return options
}
// SetXAmzCopySourceServerSideEncryptionCustomerKey : Allow user to set XAmzCopySourceServerSideEncryptionCustomerKey
func (options *CopyObjectOptions) SetXAmzCopySourceServerSideEncryptionCustomerKey(xAmzCopySourceServerSideEncryptionCustomerKey string) *CopyObjectOptions {
options.XAmzCopySourceServerSideEncryptionCustomerKey = core.StringPtr(xAmzCopySourceServerSideEncryptionCustomerKey)
return options
}
// SetXAmzCopySourceServerSideEncryptionCustomerKeyMD5 : Allow user to set XAmzCopySourceServerSideEncryptionCustomerKeyMD5
func (options *CopyObjectOptions) SetXAmzCopySourceServerSideEncryptionCustomerKeyMD5(xAmzCopySourceServerSideEncryptionCustomerKeyMD5 string) *CopyObjectOptions {
options.XAmzCopySourceServerSideEncryptionCustomerKeyMD5 = core.StringPtr(xAmzCopySourceServerSideEncryptionCustomerKeyMD5)
return options
}
// SetXAmzTagging : Allow user to set XAmzTagging
func (options *CopyObjectOptions) SetXAmzTagging(xAmzTagging string) *CopyObjectOptions {
options.XAmzTagging = core.StringPtr(xAmzTagging)
return options
}
// SetHeaders : Allow user to set Headers
func (options *CopyObjectOptions) SetHeaders(param map[string]string) *CopyObjectOptions {
options.Headers = param
return options
}
// CopyObjectOutput : CopyObjectOutput struct
type CopyObjectOutput struct {
// Container for all response elements.
CopyObjectResult *CopyObjectResult `json:"CopyObjectResult,omitempty"`
}
// UnmarshalCopyObjectOutput unmarshals an instance of CopyObjectOutput from the specified map of raw messages.
func UnmarshalCopyObjectOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CopyObjectOutput)
err = core.UnmarshalModel(m, "CopyObjectResult", &obj.CopyObjectResult, UnmarshalCopyObjectResult)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CopyObjectResult : Container for all response elements.
type CopyObjectResult struct {
// Returns the ETag of the new object. The ETag reflects only changes to the contents of an object, not its metadata.
// The source and destination ETag is identical for a successfully copied object.
ETag *string `json:"ETag,omitempty"`
// Returns the date that the object was last modified.
LastModified *strfmt.DateTime `json:"LastModified,omitempty"`
}
// UnmarshalCopyObjectResult unmarshals an instance of CopyObjectResult from the specified map of raw messages.
func UnmarshalCopyObjectResult(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CopyObjectResult)
err = core.UnmarshalPrimitive(m, "ETag", &obj.ETag)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "LastModified", &obj.LastModified)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CopyPartResult : Container for all response elements.
type CopyPartResult struct {
// Entity tag of the object.
ETag *string `json:"ETag,omitempty"`
// Date and time at which the object was uploaded.
LastModified *strfmt.DateTime `json:"LastModified,omitempty"`
}
// UnmarshalCopyPartResult unmarshals an instance of CopyPartResult from the specified map of raw messages.
func UnmarshalCopyPartResult(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CopyPartResult)
err = core.UnmarshalPrimitive(m, "ETag", &obj.ETag)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "LastModified", &obj.LastModified)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CreateBucketOptions : The CreateBucket options.
type CreateBucketOptions struct {
// The name of the bucket to create.
Bucket *string `validate:"required,ne="`
// This header references the service instance where the bucket will be created and to which data usage will be billed.
// This value can be either the full Cloud Resource Name (CRN) or just the GUID segment that identifies the service
// instance.
IbmServiceInstanceID *string `validate:"required"`
Body *string
// The algorithm and key size used to for the managed encryption root key. Required if
// `ibm-sse-kp-customer-root-key-crn` is also present.
IbmSseKpEncryptionAlgorithm *string
// The CRN of the root key used to encrypt the bucket. Required if `ibm-sse-kp-encryption-algorithm` is also present.
IbmSseKpCustomerRootKeyCrn *string
// The canned ACL to apply to the bucket. This header should not be used - instead create an IAM policy to grant public
// access to a bucket.
XAmzAcl *string
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the CreateBucketOptions.IbmSseKpEncryptionAlgorithm property.
// The algorithm and key size used to for the managed encryption root key. Required if
// `ibm-sse-kp-customer-root-key-crn` is also present.
const (
CreateBucketOptions_IbmSseKpEncryptionAlgorithm_Aes256 = "AES256"
)
// Constants associated with the CreateBucketOptions.XAmzAcl property.
// The canned ACL to apply to the bucket. This header should not be used - instead create an IAM policy to grant public
// access to a bucket.
const (
CreateBucketOptions_XAmzAcl_Private = "private"
CreateBucketOptions_XAmzAcl_PublicRead = "public-read"
)
// NewCreateBucketOptions : Instantiate CreateBucketOptions
func (*IbmCloudObjectStorageS3ApiV2) NewCreateBucketOptions(bucket string, ibmServiceInstanceID string) *CreateBucketOptions {
return &CreateBucketOptions{
Bucket: core.StringPtr(bucket),
IbmServiceInstanceID: core.StringPtr(ibmServiceInstanceID),
}
}
// SetBucket : Allow user to set Bucket
func (options *CreateBucketOptions) SetBucket(bucket string) *CreateBucketOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetIbmServiceInstanceID : Allow user to set IbmServiceInstanceID
func (options *CreateBucketOptions) SetIbmServiceInstanceID(ibmServiceInstanceID string) *CreateBucketOptions {
options.IbmServiceInstanceID = core.StringPtr(ibmServiceInstanceID)
return options
}
// SetBody : Allow user to set Body
func (options *CreateBucketOptions) SetBody(body string) *CreateBucketOptions {
options.Body = core.StringPtr(body)
return options
}
// SetIbmSseKpEncryptionAlgorithm : Allow user to set IbmSseKpEncryptionAlgorithm
func (options *CreateBucketOptions) SetIbmSseKpEncryptionAlgorithm(ibmSseKpEncryptionAlgorithm string) *CreateBucketOptions {
options.IbmSseKpEncryptionAlgorithm = core.StringPtr(ibmSseKpEncryptionAlgorithm)
return options
}
// SetIbmSseKpCustomerRootKeyCrn : Allow user to set IbmSseKpCustomerRootKeyCrn
func (options *CreateBucketOptions) SetIbmSseKpCustomerRootKeyCrn(ibmSseKpCustomerRootKeyCrn string) *CreateBucketOptions {
options.IbmSseKpCustomerRootKeyCrn = core.StringPtr(ibmSseKpCustomerRootKeyCrn)
return options
}
// SetXAmzAcl : Allow user to set XAmzAcl
func (options *CreateBucketOptions) SetXAmzAcl(xAmzAcl string) *CreateBucketOptions {
options.XAmzAcl = core.StringPtr(xAmzAcl)
return options
}
// SetHeaders : Allow user to set Headers
func (options *CreateBucketOptions) SetHeaders(param map[string]string) *CreateBucketOptions {
options.Headers = param
return options
}
// CreateMultipartUploadOutput : CreateMultipartUploadOutput struct
type CreateMultipartUploadOutput struct {
// <p>The name of the bucket to which the multipart upload was initiated. </p> <p>When using this API with an access
// point, you must direct requests to the access point hostname. The access point hostname takes the form
// <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com. When using this operation with
// an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see <a
// href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html">Using Access Points</a> in the
// <i>Amazon Simple Storage Service Developer Guide</i>.</p> <p>When using this API with IBM COS on Outposts, you must
// direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
// <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com. When using this
// operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket
// name. For more information about S3 on Outposts ARNs, see <a
// href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html">Using S3 on Outposts</a> in the <i>Amazon
// Simple Storage Service Developer Guide</i>.</p>.
Bucket *string `json:"Bucket,omitempty"`
// Object key for which the multipart upload was initiated.
Key *string `json:"Key,omitempty"`
// ID for the initiated multipart upload.
UploadID *string `json:"UploadId,omitempty"`
}
// UnmarshalCreateMultipartUploadOutput unmarshals an instance of CreateMultipartUploadOutput from the specified map of raw messages.
func UnmarshalCreateMultipartUploadOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CreateMultipartUploadOutput)
err = core.UnmarshalPrimitive(m, "Bucket", &obj.Bucket)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Key", &obj.Key)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "UploadId", &obj.UploadID)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// DeleteBucketCorsOptions : The DeleteBucketCors options.
type DeleteBucketCorsOptions struct {
// Specifies the bucket whose CORS configuration is being deleted.
Bucket *string `validate:"required,ne="`
Cors *bool `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewDeleteBucketCorsOptions : Instantiate DeleteBucketCorsOptions
func (*IbmCloudObjectStorageS3ApiV2) NewDeleteBucketCorsOptions(bucket string, cors bool) *DeleteBucketCorsOptions {
return &DeleteBucketCorsOptions{
Bucket: core.StringPtr(bucket),
Cors: core.BoolPtr(cors),
}
}
// SetBucket : Allow user to set Bucket
func (options *DeleteBucketCorsOptions) SetBucket(bucket string) *DeleteBucketCorsOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetCors : Allow user to set Cors
func (options *DeleteBucketCorsOptions) SetCors(cors bool) *DeleteBucketCorsOptions {
options.Cors = core.BoolPtr(cors)
return options
}
// SetHeaders : Allow user to set Headers
func (options *DeleteBucketCorsOptions) SetHeaders(param map[string]string) *DeleteBucketCorsOptions {
options.Headers = param
return options
}
// DeleteBucketLifecycleOptions : The DeleteBucketLifecycle options.
type DeleteBucketLifecycleOptions struct {
// The bucket name of the lifecycle to delete.
Bucket *string `validate:"required,ne="`
Lifecycle *bool `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewDeleteBucketLifecycleOptions : Instantiate DeleteBucketLifecycleOptions
func (*IbmCloudObjectStorageS3ApiV2) NewDeleteBucketLifecycleOptions(bucket string, lifecycle bool) *DeleteBucketLifecycleOptions {
return &DeleteBucketLifecycleOptions{
Bucket: core.StringPtr(bucket),
Lifecycle: core.BoolPtr(lifecycle),
}
}
// SetBucket : Allow user to set Bucket
func (options *DeleteBucketLifecycleOptions) SetBucket(bucket string) *DeleteBucketLifecycleOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetLifecycle : Allow user to set Lifecycle
func (options *DeleteBucketLifecycleOptions) SetLifecycle(lifecycle bool) *DeleteBucketLifecycleOptions {
options.Lifecycle = core.BoolPtr(lifecycle)
return options
}
// SetHeaders : Allow user to set Headers
func (options *DeleteBucketLifecycleOptions) SetHeaders(param map[string]string) *DeleteBucketLifecycleOptions {
options.Headers = param
return options
}
// DeleteBucketOptions : The DeleteBucket options.
type DeleteBucketOptions struct {
// The name of the bucket to delete.
Bucket *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewDeleteBucketOptions : Instantiate DeleteBucketOptions
func (*IbmCloudObjectStorageS3ApiV2) NewDeleteBucketOptions(bucket string) *DeleteBucketOptions {
return &DeleteBucketOptions{
Bucket: core.StringPtr(bucket),
}
}
// SetBucket : Allow user to set Bucket
func (options *DeleteBucketOptions) SetBucket(bucket string) *DeleteBucketOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetHeaders : Allow user to set Headers
func (options *DeleteBucketOptions) SetHeaders(param map[string]string) *DeleteBucketOptions {
options.Headers = param
return options
}
// DeleteBucketWebsiteOptions : The DeleteBucketWebsite options.
type DeleteBucketWebsiteOptions struct {
// The bucket for which you want to remove the website configuration.
Bucket *string `validate:"required,ne="`
Website *bool `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewDeleteBucketWebsiteOptions : Instantiate DeleteBucketWebsiteOptions
func (*IbmCloudObjectStorageS3ApiV2) NewDeleteBucketWebsiteOptions(bucket string, website bool) *DeleteBucketWebsiteOptions {
return &DeleteBucketWebsiteOptions{
Bucket: core.StringPtr(bucket),
Website: core.BoolPtr(website),
}
}
// SetBucket : Allow user to set Bucket
func (options *DeleteBucketWebsiteOptions) SetBucket(bucket string) *DeleteBucketWebsiteOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetWebsite : Allow user to set Website
func (options *DeleteBucketWebsiteOptions) SetWebsite(website bool) *DeleteBucketWebsiteOptions {
options.Website = core.BoolPtr(website)
return options
}
// SetHeaders : Allow user to set Headers
func (options *DeleteBucketWebsiteOptions) SetHeaders(param map[string]string) *DeleteBucketWebsiteOptions {
options.Headers = param
return options
}
// DeleteObjectOptions : The DeleteObject options.
type DeleteObjectOptions struct {
// The bucket containing the object.
Bucket *string `validate:"required,ne="`
// Key name of the object to delete.
Key *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewDeleteObjectOptions : Instantiate DeleteObjectOptions
func (*IbmCloudObjectStorageS3ApiV2) NewDeleteObjectOptions(bucket string, key string) *DeleteObjectOptions {
return &DeleteObjectOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
}
}
// SetBucket : Allow user to set Bucket
func (options *DeleteObjectOptions) SetBucket(bucket string) *DeleteObjectOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *DeleteObjectOptions) SetKey(key string) *DeleteObjectOptions {
options.Key = core.StringPtr(key)
return options
}
// SetHeaders : Allow user to set Headers
func (options *DeleteObjectOptions) SetHeaders(param map[string]string) *DeleteObjectOptions {
options.Headers = param
return options
}
// DeleteObjectTaggingOptions : The DeleteObjectTagging options.
type DeleteObjectTaggingOptions struct {
// The bucket that contains the object.
Bucket *string `validate:"required,ne="`
// Name of the object key.
Key *string `validate:"required,ne="`
Tagging *bool `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewDeleteObjectTaggingOptions : Instantiate DeleteObjectTaggingOptions
func (*IbmCloudObjectStorageS3ApiV2) NewDeleteObjectTaggingOptions(bucket string, key string, tagging bool) *DeleteObjectTaggingOptions {
return &DeleteObjectTaggingOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
Tagging: core.BoolPtr(tagging),
}
}
// SetBucket : Allow user to set Bucket
func (options *DeleteObjectTaggingOptions) SetBucket(bucket string) *DeleteObjectTaggingOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *DeleteObjectTaggingOptions) SetKey(key string) *DeleteObjectTaggingOptions {
options.Key = core.StringPtr(key)
return options
}
// SetTagging : Allow user to set Tagging
func (options *DeleteObjectTaggingOptions) SetTagging(tagging bool) *DeleteObjectTaggingOptions {
options.Tagging = core.BoolPtr(tagging)
return options
}
// SetHeaders : Allow user to set Headers
func (options *DeleteObjectTaggingOptions) SetHeaders(param map[string]string) *DeleteObjectTaggingOptions {
options.Headers = param
return options
}
// DeleteObjectsOptions : The DeleteObjects options.
type DeleteObjectsOptions struct {
// The bucket name containing the objects to delete.
Bucket *string `validate:"required,ne="`
Delete *bool `validate:"required"`
Body *string `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewDeleteObjectsOptions : Instantiate DeleteObjectsOptions
func (*IbmCloudObjectStorageS3ApiV2) NewDeleteObjectsOptions(bucket string, deleteVar bool, body string) *DeleteObjectsOptions {
return &DeleteObjectsOptions{
Bucket: core.StringPtr(bucket),
Delete: core.BoolPtr(deleteVar),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *DeleteObjectsOptions) SetBucket(bucket string) *DeleteObjectsOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetDelete : Allow user to set Delete
func (options *DeleteObjectsOptions) SetDelete(deleteVar bool) *DeleteObjectsOptions {
options.Delete = core.BoolPtr(deleteVar)
return options
}
// SetBody : Allow user to set Body
func (options *DeleteObjectsOptions) SetBody(body string) *DeleteObjectsOptions {
options.Body = core.StringPtr(body)
return options
}
// SetHeaders : Allow user to set Headers
func (options *DeleteObjectsOptions) SetHeaders(param map[string]string) *DeleteObjectsOptions {
options.Headers = param
return options
}
// DeleteObjectsOutput : DeleteObjectsOutput struct
type DeleteObjectsOutput struct {
// Container for a failed delete operation that describes the object that IBM COS attempted to delete and the error it
// encountered.
Errors *DeleteObjectsOutputErrors `json:"Errors,omitempty"`
}
// UnmarshalDeleteObjectsOutput unmarshals an instance of DeleteObjectsOutput from the specified map of raw messages.
func UnmarshalDeleteObjectsOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(DeleteObjectsOutput)
err = core.UnmarshalModel(m, "Errors", &obj.Errors, UnmarshalDeleteObjectsOutputErrors)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// DeletePublicAccessBlockOptions : The DeletePublicAccessBlock options.
type DeletePublicAccessBlockOptions struct {
// The IBM COS bucket whose `PublicAccessBlock` configuration you want to delete.
Bucket *string `validate:"required,ne="`
PublicAccessBlock *bool `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewDeletePublicAccessBlockOptions : Instantiate DeletePublicAccessBlockOptions
func (*IbmCloudObjectStorageS3ApiV2) NewDeletePublicAccessBlockOptions(bucket string, publicAccessBlock bool) *DeletePublicAccessBlockOptions {
return &DeletePublicAccessBlockOptions{
Bucket: core.StringPtr(bucket),
PublicAccessBlock: core.BoolPtr(publicAccessBlock),
}
}
// SetBucket : Allow user to set Bucket
func (options *DeletePublicAccessBlockOptions) SetBucket(bucket string) *DeletePublicAccessBlockOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetPublicAccessBlock : Allow user to set PublicAccessBlock
func (options *DeletePublicAccessBlockOptions) SetPublicAccessBlock(publicAccessBlock bool) *DeletePublicAccessBlockOptions {
options.PublicAccessBlock = core.BoolPtr(publicAccessBlock)
return options
}
// SetHeaders : Allow user to set Headers
func (options *DeletePublicAccessBlockOptions) SetHeaders(param map[string]string) *DeletePublicAccessBlockOptions {
options.Headers = param
return options
}
// Error : Container for all error elements.
type Error struct {
// The error key.
Key *string `json:"Key,omitempty"`
Code *string `json:"Code,omitempty"`
// The error message contains a generic description of the error condition in English. It is intended for a human
// audience. Simple programs display the message directly to the end user if they encounter an error condition they
// don't know how or don't care to handle. Sophisticated programs with more exhaustive error handling and proper
// internationalization are more likely to ignore the error message.
Message *string `json:"Message,omitempty"`
}
// UnmarshalError unmarshals an instance of Error from the specified map of raw messages.
func UnmarshalError(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Error)
err = core.UnmarshalPrimitive(m, "Key", &obj.Key)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Code", &obj.Code)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Message", &obj.Message)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ErrorDocument : The error information.
type ErrorDocument struct {
// The object key name to use when a 4XX class error occurs.
Key *string `json:"Key" validate:"required"`
}
// UnmarshalErrorDocument unmarshals an instance of ErrorDocument from the specified map of raw messages.
func UnmarshalErrorDocument(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ErrorDocument)
err = core.UnmarshalPrimitive(m, "Key", &obj.Key)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetBucketAclOptions : The GetBucketAcl options.
type GetBucketAclOptions struct {
// Specifies the bucket whose ACL is being requested.
Bucket *string `validate:"required,ne="`
Acl *bool `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetBucketAclOptions : Instantiate GetBucketAclOptions
func (*IbmCloudObjectStorageS3ApiV2) NewGetBucketAclOptions(bucket string, acl bool) *GetBucketAclOptions {
return &GetBucketAclOptions{
Bucket: core.StringPtr(bucket),
Acl: core.BoolPtr(acl),
}
}
// SetBucket : Allow user to set Bucket
func (options *GetBucketAclOptions) SetBucket(bucket string) *GetBucketAclOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetAcl : Allow user to set Acl
func (options *GetBucketAclOptions) SetAcl(acl bool) *GetBucketAclOptions {
options.Acl = core.BoolPtr(acl)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetBucketAclOptions) SetHeaders(param map[string]string) *GetBucketAclOptions {
options.Headers = param
return options
}
// GetBucketCorsOptions : The GetBucketCors options.
type GetBucketCorsOptions struct {
// The bucket name for which to get the CORS configuration.
Bucket *string `validate:"required,ne="`
Cors *bool `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetBucketCorsOptions : Instantiate GetBucketCorsOptions
func (*IbmCloudObjectStorageS3ApiV2) NewGetBucketCorsOptions(bucket string, cors bool) *GetBucketCorsOptions {
return &GetBucketCorsOptions{
Bucket: core.StringPtr(bucket),
Cors: core.BoolPtr(cors),
}
}
// SetBucket : Allow user to set Bucket
func (options *GetBucketCorsOptions) SetBucket(bucket string) *GetBucketCorsOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetCors : Allow user to set Cors
func (options *GetBucketCorsOptions) SetCors(cors bool) *GetBucketCorsOptions {
options.Cors = core.BoolPtr(cors)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetBucketCorsOptions) SetHeaders(param map[string]string) *GetBucketCorsOptions {
options.Headers = param
return options
}
// GetBucketCorsOutput : GetBucketCorsOutput struct
type GetBucketCorsOutput struct {
// A set of origins and methods (cross-origin access that you want to allow). You can add up to 100 rules to the
// configuration.
CORSRules *GetBucketCorsOutputCORSRules `json:"CORSRules,omitempty"`
}
// UnmarshalGetBucketCorsOutput unmarshals an instance of GetBucketCorsOutput from the specified map of raw messages.
func UnmarshalGetBucketCorsOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetBucketCorsOutput)
err = core.UnmarshalModel(m, "CORSRules", &obj.CORSRules, UnmarshalGetBucketCorsOutputCORSRules)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetBucketLifecycleConfigurationOptions : The GetBucketLifecycleConfiguration options.
type GetBucketLifecycleConfigurationOptions struct {
// The name of the bucket for which to get the lifecycle information.
Bucket *string `validate:"required,ne="`
Lifecycle *bool `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetBucketLifecycleConfigurationOptions : Instantiate GetBucketLifecycleConfigurationOptions
func (*IbmCloudObjectStorageS3ApiV2) NewGetBucketLifecycleConfigurationOptions(bucket string, lifecycle bool) *GetBucketLifecycleConfigurationOptions {
return &GetBucketLifecycleConfigurationOptions{
Bucket: core.StringPtr(bucket),
Lifecycle: core.BoolPtr(lifecycle),
}
}
// SetBucket : Allow user to set Bucket
func (options *GetBucketLifecycleConfigurationOptions) SetBucket(bucket string) *GetBucketLifecycleConfigurationOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetLifecycle : Allow user to set Lifecycle
func (options *GetBucketLifecycleConfigurationOptions) SetLifecycle(lifecycle bool) *GetBucketLifecycleConfigurationOptions {
options.Lifecycle = core.BoolPtr(lifecycle)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetBucketLifecycleConfigurationOptions) SetHeaders(param map[string]string) *GetBucketLifecycleConfigurationOptions {
options.Headers = param
return options
}
// GetBucketLifecycleConfigurationOutput : GetBucketLifecycleConfigurationOutput struct
type GetBucketLifecycleConfigurationOutput struct {
// Container for a lifecycle rule.
Rules *GetBucketLifecycleConfigurationOutputRules `json:"Rules,omitempty"`
}
// UnmarshalGetBucketLifecycleConfigurationOutput unmarshals an instance of GetBucketLifecycleConfigurationOutput from the specified map of raw messages.
func UnmarshalGetBucketLifecycleConfigurationOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetBucketLifecycleConfigurationOutput)
err = core.UnmarshalModel(m, "Rules", &obj.Rules, UnmarshalGetBucketLifecycleConfigurationOutputRules)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetBucketWebsiteOptions : The GetBucketWebsite options.
type GetBucketWebsiteOptions struct {
// The bucket name for which to get the website configuration.
Bucket *string `validate:"required,ne="`
Website *bool `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetBucketWebsiteOptions : Instantiate GetBucketWebsiteOptions
func (*IbmCloudObjectStorageS3ApiV2) NewGetBucketWebsiteOptions(bucket string, website bool) *GetBucketWebsiteOptions {
return &GetBucketWebsiteOptions{
Bucket: core.StringPtr(bucket),
Website: core.BoolPtr(website),
}
}
// SetBucket : Allow user to set Bucket
func (options *GetBucketWebsiteOptions) SetBucket(bucket string) *GetBucketWebsiteOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetWebsite : Allow user to set Website
func (options *GetBucketWebsiteOptions) SetWebsite(website bool) *GetBucketWebsiteOptions {
options.Website = core.BoolPtr(website)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetBucketWebsiteOptions) SetHeaders(param map[string]string) *GetBucketWebsiteOptions {
options.Headers = param
return options
}
// GetBucketWebsiteOutput : GetBucketWebsiteOutput struct
type GetBucketWebsiteOutput struct {
// Specifies the redirect behavior of all requests to a website endpoint of an IBM COS bucket.
RedirectAllRequestsTo *RedirectAllRequestsTo `json:"RedirectAllRequestsTo,omitempty"`
// The name of the index document for the website (for example `index.html`).
IndexDocument *IndexDocument `json:"IndexDocument,omitempty"`
// The object key name of the website error document to use for 4XX class errors.
ErrorDocument *ErrorDocument `json:"ErrorDocument,omitempty"`
// Rules that define when a redirect is applied and the redirect behavior.
RoutingRules *RoutingRules `json:"RoutingRules,omitempty"`
}
// UnmarshalGetBucketWebsiteOutput unmarshals an instance of GetBucketWebsiteOutput from the specified map of raw messages.
func UnmarshalGetBucketWebsiteOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetBucketWebsiteOutput)
err = core.UnmarshalModel(m, "RedirectAllRequestsTo", &obj.RedirectAllRequestsTo, UnmarshalRedirectAllRequestsTo)
if err != nil {
return
}
err = core.UnmarshalModel(m, "IndexDocument", &obj.IndexDocument, UnmarshalIndexDocument)
if err != nil {
return
}
err = core.UnmarshalModel(m, "ErrorDocument", &obj.ErrorDocument, UnmarshalErrorDocument)
if err != nil {
return
}
err = core.UnmarshalModel(m, "RoutingRules", &obj.RoutingRules, UnmarshalRoutingRules)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetObjectAclOptions : The GetObjectAcl options.
type GetObjectAclOptions struct {
// The name of the bucket containing the object being requested.
Bucket *string `validate:"required,ne="`
// The key of the object for which to get the ACL information.
Key *string `validate:"required,ne="`
Acl *bool `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetObjectAclOptions : Instantiate GetObjectAclOptions
func (*IbmCloudObjectStorageS3ApiV2) NewGetObjectAclOptions(bucket string, key string, acl bool) *GetObjectAclOptions {
return &GetObjectAclOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
Acl: core.BoolPtr(acl),
}
}
// SetBucket : Allow user to set Bucket
func (options *GetObjectAclOptions) SetBucket(bucket string) *GetObjectAclOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *GetObjectAclOptions) SetKey(key string) *GetObjectAclOptions {
options.Key = core.StringPtr(key)
return options
}
// SetAcl : Allow user to set Acl
func (options *GetObjectAclOptions) SetAcl(acl bool) *GetObjectAclOptions {
options.Acl = core.BoolPtr(acl)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetObjectAclOptions) SetHeaders(param map[string]string) *GetObjectAclOptions {
options.Headers = param
return options
}
// GetObjectAclOutput : GetObjectAclOutput struct
type GetObjectAclOutput struct {
// Container for the bucket owner's display name and ID.
Owner *Owner `json:"Owner,omitempty"`
// A list of grants.
Grants *GetObjectAclOutputGrants `json:"Grants,omitempty"`
}
// UnmarshalGetObjectAclOutput unmarshals an instance of GetObjectAclOutput from the specified map of raw messages.
func UnmarshalGetObjectAclOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetObjectAclOutput)
err = core.UnmarshalModel(m, "Owner", &obj.Owner, UnmarshalOwner)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Grants", &obj.Grants, UnmarshalGetObjectAclOutputGrants)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetObjectOptions : The GetObject options.
type GetObjectOptions struct {
// The bucket containing the object.
Bucket *string `validate:"required,ne="`
// Key of the object to get.
Key *string `validate:"required,ne="`
// Returns the object only if its entity tag (ETag) is the same as the one specified, otherwise returns a 412
// (precondition failed).
IfMatch *string
// Returns the object only if it has been modified since the specified time, otherwise returns a 304 (not modified).
IfModifiedSince *strfmt.DateTime
// Returns the object only if its entity tag (ETag) is different from the one specified, otherwise returns a 304 (not
// modified).
IfNoneMatch *string
// Returns the object only if it has not been modified since the specified time, otherwise return a 412 (precondition
// failed).'.
IfUnmodifiedSince *strfmt.DateTime
// Downloads the specified range bytes of an object. For more information about the HTTP Range header, see [RFC
// 2516](https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35).
Range *string
// Sets the `Cache-Control` header of the response.
ResponseCacheControl *string
// Sets the `Content-Disposition` header of the response.
ResponseContentDisposition *string
// Sets the `Content-Encoding` header of the response.
ResponseContentEncoding *string
// Sets the `Content-Language` header of the response.
ResponseContentLanguage *string
// Sets the `Content-Type` header of the response.
ResponseContentType *string
// Sets the `Expires` header of the response.
ResponseExpires *strfmt.DateTime
// Specifies the algorithm to use to when encrypting the object (for example, AES256).
XAmzServerSideEncryptionCustomerAlgorithm *string
// Specifies the customer-provided encryption key for IBM COS to use in encrypting data. This value is used to store
// the object and then it is discarded; IBM COS does not store the encryption key. The key must be appropriate for use
// with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm` header.
XAmzServerSideEncryptionCustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. IBM COS uses this header for a message
// integrity check to ensure that the encryption key was transmitted without error.
XAmzServerSideEncryptionCustomerKeyMD5 *string
// Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a
// "ranged" GET request for the part specified. Useful for downloading just a part of an object.
PartNumber *int64
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetObjectOptions : Instantiate GetObjectOptions
func (*IbmCloudObjectStorageS3ApiV2) NewGetObjectOptions(bucket string, key string) *GetObjectOptions {
return &GetObjectOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
}
}
// SetBucket : Allow user to set Bucket
func (options *GetObjectOptions) SetBucket(bucket string) *GetObjectOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *GetObjectOptions) SetKey(key string) *GetObjectOptions {
options.Key = core.StringPtr(key)
return options
}
// SetIfMatch : Allow user to set IfMatch
func (options *GetObjectOptions) SetIfMatch(ifMatch string) *GetObjectOptions {
options.IfMatch = core.StringPtr(ifMatch)
return options
}
// SetIfModifiedSince : Allow user to set IfModifiedSince
func (options *GetObjectOptions) SetIfModifiedSince(ifModifiedSince *strfmt.DateTime) *GetObjectOptions {
options.IfModifiedSince = ifModifiedSince
return options
}
// SetIfNoneMatch : Allow user to set IfNoneMatch
func (options *GetObjectOptions) SetIfNoneMatch(ifNoneMatch string) *GetObjectOptions {
options.IfNoneMatch = core.StringPtr(ifNoneMatch)
return options
}
// SetIfUnmodifiedSince : Allow user to set IfUnmodifiedSince
func (options *GetObjectOptions) SetIfUnmodifiedSince(ifUnmodifiedSince *strfmt.DateTime) *GetObjectOptions {
options.IfUnmodifiedSince = ifUnmodifiedSince
return options
}
// SetRange : Allow user to set Range
func (options *GetObjectOptions) SetRange(rangeVar string) *GetObjectOptions {
options.Range = core.StringPtr(rangeVar)
return options
}
// SetResponseCacheControl : Allow user to set ResponseCacheControl
func (options *GetObjectOptions) SetResponseCacheControl(responseCacheControl string) *GetObjectOptions {
options.ResponseCacheControl = core.StringPtr(responseCacheControl)
return options
}
// SetResponseContentDisposition : Allow user to set ResponseContentDisposition
func (options *GetObjectOptions) SetResponseContentDisposition(responseContentDisposition string) *GetObjectOptions {
options.ResponseContentDisposition = core.StringPtr(responseContentDisposition)
return options
}
// SetResponseContentEncoding : Allow user to set ResponseContentEncoding
func (options *GetObjectOptions) SetResponseContentEncoding(responseContentEncoding string) *GetObjectOptions {
options.ResponseContentEncoding = core.StringPtr(responseContentEncoding)
return options
}
// SetResponseContentLanguage : Allow user to set ResponseContentLanguage
func (options *GetObjectOptions) SetResponseContentLanguage(responseContentLanguage string) *GetObjectOptions {
options.ResponseContentLanguage = core.StringPtr(responseContentLanguage)
return options
}
// SetResponseContentType : Allow user to set ResponseContentType
func (options *GetObjectOptions) SetResponseContentType(responseContentType string) *GetObjectOptions {
options.ResponseContentType = core.StringPtr(responseContentType)
return options
}
// SetResponseExpires : Allow user to set ResponseExpires
func (options *GetObjectOptions) SetResponseExpires(responseExpires *strfmt.DateTime) *GetObjectOptions {
options.ResponseExpires = responseExpires
return options
}
// SetXAmzServerSideEncryptionCustomerAlgorithm : Allow user to set XAmzServerSideEncryptionCustomerAlgorithm
func (options *GetObjectOptions) SetXAmzServerSideEncryptionCustomerAlgorithm(xAmzServerSideEncryptionCustomerAlgorithm string) *GetObjectOptions {
options.XAmzServerSideEncryptionCustomerAlgorithm = core.StringPtr(xAmzServerSideEncryptionCustomerAlgorithm)
return options
}
// SetXAmzServerSideEncryptionCustomerKey : Allow user to set XAmzServerSideEncryptionCustomerKey
func (options *GetObjectOptions) SetXAmzServerSideEncryptionCustomerKey(xAmzServerSideEncryptionCustomerKey string) *GetObjectOptions {
options.XAmzServerSideEncryptionCustomerKey = core.StringPtr(xAmzServerSideEncryptionCustomerKey)
return options
}
// SetXAmzServerSideEncryptionCustomerKeyMD5 : Allow user to set XAmzServerSideEncryptionCustomerKeyMD5
func (options *GetObjectOptions) SetXAmzServerSideEncryptionCustomerKeyMD5(xAmzServerSideEncryptionCustomerKeyMD5 string) *GetObjectOptions {
options.XAmzServerSideEncryptionCustomerKeyMD5 = core.StringPtr(xAmzServerSideEncryptionCustomerKeyMD5)
return options
}
// SetPartNumber : Allow user to set PartNumber
func (options *GetObjectOptions) SetPartNumber(partNumber int64) *GetObjectOptions {
options.PartNumber = core.Int64Ptr(partNumber)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetObjectOptions) SetHeaders(param map[string]string) *GetObjectOptions {
options.Headers = param
return options
}
// GetObjectOutput : GetObjectOutput struct
type GetObjectOutput struct {
// Object data.
Body *string `json:"Body,omitempty"`
// A map of metadata to store with the object in S3.
Metadata *GetObjectOutputMetadata `json:"Metadata,omitempty"`
}
// UnmarshalGetObjectOutput unmarshals an instance of GetObjectOutput from the specified map of raw messages.
func UnmarshalGetObjectOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetObjectOutput)
err = core.UnmarshalPrimitive(m, "Body", &obj.Body)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Metadata", &obj.Metadata, UnmarshalGetObjectOutputMetadata)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetObjectOutputMetadata : A map of metadata to store with the object in S3.
type GetObjectOutputMetadata struct {
// Allows users to set arbitrary properties
additionalProperties map[string]*string
}
// SetProperty allows the user to set an arbitrary property on an instance of GetObjectOutputMetadata
func (o *GetObjectOutputMetadata) SetProperty(key string, value *string) {
if o.additionalProperties == nil {
o.additionalProperties = make(map[string]*string)
}
o.additionalProperties[key] = value
}
// GetProperty allows the user to retrieve an arbitrary property from an instance of GetObjectOutputMetadata
func (o *GetObjectOutputMetadata) GetProperty(key string) *string {
return o.additionalProperties[key]
}
// GetProperties allows the user to retrieve the map of arbitrary properties from an instance of GetObjectOutputMetadata
func (o *GetObjectOutputMetadata) GetProperties() map[string]*string {
return o.additionalProperties
}
// MarshalJSON performs custom serialization for instances of GetObjectOutputMetadata
func (o *GetObjectOutputMetadata) MarshalJSON() (buffer []byte, err error) {
m := make(map[string]interface{})
if len(o.additionalProperties) > 0 {
for k, v := range o.additionalProperties {
m[k] = v
}
}
buffer, err = json.Marshal(m)
return
}
// UnmarshalGetObjectOutputMetadata unmarshals an instance of GetObjectOutputMetadata from the specified map of raw messages.
func UnmarshalGetObjectOutputMetadata(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetObjectOutputMetadata)
for k := range m {
var v *string
e := core.UnmarshalPrimitive(m, k, &v)
if e != nil {
err = e
return
}
obj.SetProperty(k, v)
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetObjectTaggingOptions : The GetObjectTagging options.
type GetObjectTaggingOptions struct {
// The bucket containing the object.
Bucket *string `validate:"required,ne="`
// Object key for which to get the tagging information.
Key *string `validate:"required,ne="`
Tagging *bool `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetObjectTaggingOptions : Instantiate GetObjectTaggingOptions
func (*IbmCloudObjectStorageS3ApiV2) NewGetObjectTaggingOptions(bucket string, key string, tagging bool) *GetObjectTaggingOptions {
return &GetObjectTaggingOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
Tagging: core.BoolPtr(tagging),
}
}
// SetBucket : Allow user to set Bucket
func (options *GetObjectTaggingOptions) SetBucket(bucket string) *GetObjectTaggingOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *GetObjectTaggingOptions) SetKey(key string) *GetObjectTaggingOptions {
options.Key = core.StringPtr(key)
return options
}
// SetTagging : Allow user to set Tagging
func (options *GetObjectTaggingOptions) SetTagging(tagging bool) *GetObjectTaggingOptions {
options.Tagging = core.BoolPtr(tagging)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetObjectTaggingOptions) SetHeaders(param map[string]string) *GetObjectTaggingOptions {
options.Headers = param
return options
}
// GetObjectTaggingOutput : GetObjectTaggingOutput struct
type GetObjectTaggingOutput struct {
// Contains the tag set.
TagSet *TagSet `json:"TagSet" validate:"required"`
}
// UnmarshalGetObjectTaggingOutput unmarshals an instance of GetObjectTaggingOutput from the specified map of raw messages.
func UnmarshalGetObjectTaggingOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetObjectTaggingOutput)
err = core.UnmarshalModel(m, "TagSet", &obj.TagSet, UnmarshalTagSet)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetPublicAccessBlockOptions : The GetPublicAccessBlock options.
type GetPublicAccessBlockOptions struct {
// The name of the IBM COS bucket whose `PublicAccessBlock` configuration you want to retrieve.
Bucket *string `validate:"required,ne="`
PublicAccessBlock *bool `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewGetPublicAccessBlockOptions : Instantiate GetPublicAccessBlockOptions
func (*IbmCloudObjectStorageS3ApiV2) NewGetPublicAccessBlockOptions(bucket string, publicAccessBlock bool) *GetPublicAccessBlockOptions {
return &GetPublicAccessBlockOptions{
Bucket: core.StringPtr(bucket),
PublicAccessBlock: core.BoolPtr(publicAccessBlock),
}
}
// SetBucket : Allow user to set Bucket
func (options *GetPublicAccessBlockOptions) SetBucket(bucket string) *GetPublicAccessBlockOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetPublicAccessBlock : Allow user to set PublicAccessBlock
func (options *GetPublicAccessBlockOptions) SetPublicAccessBlock(publicAccessBlock bool) *GetPublicAccessBlockOptions {
options.PublicAccessBlock = core.BoolPtr(publicAccessBlock)
return options
}
// SetHeaders : Allow user to set Headers
func (options *GetPublicAccessBlockOptions) SetHeaders(param map[string]string) *GetPublicAccessBlockOptions {
options.Headers = param
return options
}
// GetPublicAccessBlockOutput : GetPublicAccessBlockOutput struct
type GetPublicAccessBlockOutput struct {
// The `PublicAccessBlock` configuration currently in effect for this IBM COS bucket.
PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `json:"PublicAccessBlockConfiguration,omitempty"`
}
// UnmarshalGetPublicAccessBlockOutput unmarshals an instance of GetPublicAccessBlockOutput from the specified map of raw messages.
func UnmarshalGetPublicAccessBlockOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetPublicAccessBlockOutput)
err = core.UnmarshalModel(m, "PublicAccessBlockConfiguration", &obj.PublicAccessBlockConfiguration, UnmarshalPublicAccessBlockConfiguration)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Grantee : Container for the person being granted permissions.
type Grantee struct {
// Screen name of the grantee.
DisplayName *string `json:"DisplayName,omitempty"`
// <p>Email address of the grantee.</p> <note> <p>Using email addresses to specify a grantee is only supported in the
// following AWS Regions: </p> <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li>
// <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p>
// </li> <li> <p>Asia Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São
// Paulo)</p> </li> </ul> <p>For a list of all the IBM COS supported Regions and endpoints, see <a
// href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions and Endpoints</a> in the AWS
// General Reference.</p> </note>.
EmailAddress *string `json:"EmailAddress,omitempty"`
// The canonical user ID of the grantee.
ID *string `json:"ID,omitempty"`
// Type of grantee.
Type *string `json:"Type" validate:"required"`
// URI of the grantee group.
URI *string `json:"URI,omitempty"`
}
// Constants associated with the Grantee.Type property.
// Type of grantee.
const (
Grantee_Type_Amazoncustomerbyemail = "AmazonCustomerByEmail"
Grantee_Type_Canonicaluser = "CanonicalUser"
Grantee_Type_Group = "Group"
)
// UnmarshalGrantee unmarshals an instance of Grantee from the specified map of raw messages.
func UnmarshalGrantee(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Grantee)
err = core.UnmarshalPrimitive(m, "DisplayName", &obj.DisplayName)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "EmailAddress", &obj.EmailAddress)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "ID", &obj.ID)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Type", &obj.Type)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "URI", &obj.URI)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GrantsItem : Container for grant information.
type GrantsItem struct {
// The person being granted permissions.
Grantee *Grantee `json:"Grantee,omitempty"`
// Specifies the permission given to the grantee.
Permission *string `json:"Permission,omitempty"`
}
// Constants associated with the GrantsItem.Permission property.
// Specifies the permission given to the grantee.
const (
GrantsItem_Permission_FullControl = "FULL_CONTROL"
GrantsItem_Permission_Read = "READ"
GrantsItem_Permission_ReadAcp = "READ_ACP"
GrantsItem_Permission_Write = "WRITE"
GrantsItem_Permission_WriteAcp = "WRITE_ACP"
)
// UnmarshalGrantsItem unmarshals an instance of GrantsItem from the specified map of raw messages.
func UnmarshalGrantsItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GrantsItem)
err = core.UnmarshalModel(m, "Grantee", &obj.Grantee, UnmarshalGrantee)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Permission", &obj.Permission)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// HeadBucketOptions : The HeadBucket options.
type HeadBucketOptions struct {
// The name of the bucket being checked. The bucket must exist in the location specified by the endpoint for the
// request to succeed.
Bucket *string `validate:"required,ne="`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewHeadBucketOptions : Instantiate HeadBucketOptions
func (*IbmCloudObjectStorageS3ApiV2) NewHeadBucketOptions(bucket string) *HeadBucketOptions {
return &HeadBucketOptions{
Bucket: core.StringPtr(bucket),
}
}
// SetBucket : Allow user to set Bucket
func (options *HeadBucketOptions) SetBucket(bucket string) *HeadBucketOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetHeaders : Allow user to set Headers
func (options *HeadBucketOptions) SetHeaders(param map[string]string) *HeadBucketOptions {
options.Headers = param
return options
}
// HeadObjectOptions : The HeadObject options.
type HeadObjectOptions struct {
// The bucket containing the object.
Bucket *string `validate:"required,ne="`
// The object key.
Key *string `validate:"required,ne="`
// Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412
// (precondition failed).
IfMatch *string
// Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).
IfModifiedSince *strfmt.DateTime
// Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not
// modified).
IfNoneMatch *string
// Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition
// failed).
IfUnmodifiedSince *strfmt.DateTime
// Downloads the specified range bytes of an object. For more information about the HTTP Range header, see [RFC
// 2516](https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35).
Range *string
// Specifies the algorithm to use to when encrypting the object (for example, AES256).
XAmzServerSideEncryptionCustomerAlgorithm *string
// Specifies the customer-provided encryption key for IBM COS to use in encrypting data. This value is used to store
// the object and then it is discarded; IBM COS does not store the encryption key. The key must be appropriate for use
// with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm` header.
XAmzServerSideEncryptionCustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. IBM COS uses this header for a message
// integrity check to ensure that the encryption key was transmitted without error.
XAmzServerSideEncryptionCustomerKeyMD5 *string
// Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a
// "ranged" HEAD request for the part specified. Useful querying about the size of the part and the number of parts in
// this object.
PartNumber *int64
// Allows users to set headers on API requests
Headers map[string]string
}
// NewHeadObjectOptions : Instantiate HeadObjectOptions
func (*IbmCloudObjectStorageS3ApiV2) NewHeadObjectOptions(bucket string, key string) *HeadObjectOptions {
return &HeadObjectOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
}
}
// SetBucket : Allow user to set Bucket
func (options *HeadObjectOptions) SetBucket(bucket string) *HeadObjectOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *HeadObjectOptions) SetKey(key string) *HeadObjectOptions {
options.Key = core.StringPtr(key)
return options
}
// SetIfMatch : Allow user to set IfMatch
func (options *HeadObjectOptions) SetIfMatch(ifMatch string) *HeadObjectOptions {
options.IfMatch = core.StringPtr(ifMatch)
return options
}
// SetIfModifiedSince : Allow user to set IfModifiedSince
func (options *HeadObjectOptions) SetIfModifiedSince(ifModifiedSince *strfmt.DateTime) *HeadObjectOptions {
options.IfModifiedSince = ifModifiedSince
return options
}
// SetIfNoneMatch : Allow user to set IfNoneMatch
func (options *HeadObjectOptions) SetIfNoneMatch(ifNoneMatch string) *HeadObjectOptions {
options.IfNoneMatch = core.StringPtr(ifNoneMatch)
return options
}
// SetIfUnmodifiedSince : Allow user to set IfUnmodifiedSince
func (options *HeadObjectOptions) SetIfUnmodifiedSince(ifUnmodifiedSince *strfmt.DateTime) *HeadObjectOptions {
options.IfUnmodifiedSince = ifUnmodifiedSince
return options
}
// SetRange : Allow user to set Range
func (options *HeadObjectOptions) SetRange(rangeVar string) *HeadObjectOptions {
options.Range = core.StringPtr(rangeVar)
return options
}
// SetXAmzServerSideEncryptionCustomerAlgorithm : Allow user to set XAmzServerSideEncryptionCustomerAlgorithm
func (options *HeadObjectOptions) SetXAmzServerSideEncryptionCustomerAlgorithm(xAmzServerSideEncryptionCustomerAlgorithm string) *HeadObjectOptions {
options.XAmzServerSideEncryptionCustomerAlgorithm = core.StringPtr(xAmzServerSideEncryptionCustomerAlgorithm)
return options
}
// SetXAmzServerSideEncryptionCustomerKey : Allow user to set XAmzServerSideEncryptionCustomerKey
func (options *HeadObjectOptions) SetXAmzServerSideEncryptionCustomerKey(xAmzServerSideEncryptionCustomerKey string) *HeadObjectOptions {
options.XAmzServerSideEncryptionCustomerKey = core.StringPtr(xAmzServerSideEncryptionCustomerKey)
return options
}
// SetXAmzServerSideEncryptionCustomerKeyMD5 : Allow user to set XAmzServerSideEncryptionCustomerKeyMD5
func (options *HeadObjectOptions) SetXAmzServerSideEncryptionCustomerKeyMD5(xAmzServerSideEncryptionCustomerKeyMD5 string) *HeadObjectOptions {
options.XAmzServerSideEncryptionCustomerKeyMD5 = core.StringPtr(xAmzServerSideEncryptionCustomerKeyMD5)
return options
}
// SetPartNumber : Allow user to set PartNumber
func (options *HeadObjectOptions) SetPartNumber(partNumber int64) *HeadObjectOptions {
options.PartNumber = core.Int64Ptr(partNumber)
return options
}
// SetHeaders : Allow user to set Headers
func (options *HeadObjectOptions) SetHeaders(param map[string]string) *HeadObjectOptions {
options.Headers = param
return options
}
// IndexDocument : Container for the `Suffix` element.
type IndexDocument struct {
// A suffix that is appended to a request that is for a directory on the website endpoint (for example,if the suffix is
// index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the
// key name images/index.html) The suffix must not be empty and must not include a slash character.
Suffix *string `json:"Suffix" validate:"required"`
}
// UnmarshalIndexDocument unmarshals an instance of IndexDocument from the specified map of raw messages.
func UnmarshalIndexDocument(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(IndexDocument)
err = core.UnmarshalPrimitive(m, "Suffix", &obj.Suffix)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// InitiateMultipartUploadOptions : The InitiateMultipartUpload options.
type InitiateMultipartUploadOptions struct {
// The name of the bucket to which to initiate the upload.
Bucket *string `validate:"required,ne="`
Uploads *bool `validate:"required"`
// Object key for which the multipart upload is to be initiated.
Key *string `validate:"required,ne="`
Body *string `validate:"required"`
// Upload the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412
// (precondition failed).
IfMatch *string
// Upload the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not
// modified).
IfNoneMatch *string
// Upload the object only if it has not been modified since the specified time, otherwise return a 412 (precondition
// failed).
IfUnmodifiedSince *strfmt.DateTime
// Specifies caching behavior along the request/reply chain.
CacheControl *string
// Specifies presentational information for the object.
ContentDisposition *string
// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied
// to obtain the media-type referenced by the Content-Type header field. For more information, see [RFC
// 2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11).
ContentEncoding *string
// The language the content is in.
ContentLanguage *string
// The date and time at which the object is no longer cacheable. For more information, [RFC
// 2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21).
Expires *strfmt.DateTime
// The server-side encryption algorithm used when storing this object (for example, AES256).
XAmzServerSideEncryption *string
// If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or
// to an external URL.
XAmzWebsiteRedirectLocation *string
// Specifies the algorithm to use to when encrypting the object (for example, AES256).
XAmzServerSideEncryptionCustomerAlgorithm *string
// Specifies the customer-provided encryption key for encrypting data. IBM COS does not store the encryption key - it
// is discarded use. The key must be appropriate for use with the algorithm specified in the
// `x-amz-server-side-encryption-customer-algorithm` header.
XAmzServerSideEncryptionCustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321, ensuring the encryption key is
// transmitted without error.
XAmzServerSideEncryptionCustomerKeyMD5 *string
// A set of key-value pairs, encoded as URL query parameters.
XAmzTagging *string
// The canned ACL to apply to the object.
XAmzAcl *string
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the InitiateMultipartUploadOptions.XAmzServerSideEncryption property.
// The server-side encryption algorithm used when storing this object (for example, AES256).
const (
InitiateMultipartUploadOptions_XAmzServerSideEncryption_Aes256 = "AES256"
)
// Constants associated with the InitiateMultipartUploadOptions.XAmzAcl property.
// The canned ACL to apply to the object.
const (
InitiateMultipartUploadOptions_XAmzAcl_Private = "private"
InitiateMultipartUploadOptions_XAmzAcl_PublicRead = "public-read"
)
// NewInitiateMultipartUploadOptions : Instantiate InitiateMultipartUploadOptions
func (*IbmCloudObjectStorageS3ApiV2) NewInitiateMultipartUploadOptions(bucket string, uploads bool, key string, body string) *InitiateMultipartUploadOptions {
return &InitiateMultipartUploadOptions{
Bucket: core.StringPtr(bucket),
Uploads: core.BoolPtr(uploads),
Key: core.StringPtr(key),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *InitiateMultipartUploadOptions) SetBucket(bucket string) *InitiateMultipartUploadOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetUploads : Allow user to set Uploads
func (options *InitiateMultipartUploadOptions) SetUploads(uploads bool) *InitiateMultipartUploadOptions {
options.Uploads = core.BoolPtr(uploads)
return options
}
// SetKey : Allow user to set Key
func (options *InitiateMultipartUploadOptions) SetKey(key string) *InitiateMultipartUploadOptions {
options.Key = core.StringPtr(key)
return options
}
// SetBody : Allow user to set Body
func (options *InitiateMultipartUploadOptions) SetBody(body string) *InitiateMultipartUploadOptions {
options.Body = core.StringPtr(body)
return options
}
// SetIfMatch : Allow user to set IfMatch
func (options *InitiateMultipartUploadOptions) SetIfMatch(ifMatch string) *InitiateMultipartUploadOptions {
options.IfMatch = core.StringPtr(ifMatch)
return options
}
// SetIfNoneMatch : Allow user to set IfNoneMatch
func (options *InitiateMultipartUploadOptions) SetIfNoneMatch(ifNoneMatch string) *InitiateMultipartUploadOptions {
options.IfNoneMatch = core.StringPtr(ifNoneMatch)
return options
}
// SetIfUnmodifiedSince : Allow user to set IfUnmodifiedSince
func (options *InitiateMultipartUploadOptions) SetIfUnmodifiedSince(ifUnmodifiedSince *strfmt.DateTime) *InitiateMultipartUploadOptions {
options.IfUnmodifiedSince = ifUnmodifiedSince
return options
}
// SetCacheControl : Allow user to set CacheControl
func (options *InitiateMultipartUploadOptions) SetCacheControl(cacheControl string) *InitiateMultipartUploadOptions {
options.CacheControl = core.StringPtr(cacheControl)
return options
}
// SetContentDisposition : Allow user to set ContentDisposition
func (options *InitiateMultipartUploadOptions) SetContentDisposition(contentDisposition string) *InitiateMultipartUploadOptions {
options.ContentDisposition = core.StringPtr(contentDisposition)
return options
}
// SetContentEncoding : Allow user to set ContentEncoding
func (options *InitiateMultipartUploadOptions) SetContentEncoding(contentEncoding string) *InitiateMultipartUploadOptions {
options.ContentEncoding = core.StringPtr(contentEncoding)
return options
}
// SetContentLanguage : Allow user to set ContentLanguage
func (options *InitiateMultipartUploadOptions) SetContentLanguage(contentLanguage string) *InitiateMultipartUploadOptions {
options.ContentLanguage = core.StringPtr(contentLanguage)
return options
}
// SetExpires : Allow user to set Expires
func (options *InitiateMultipartUploadOptions) SetExpires(expires *strfmt.DateTime) *InitiateMultipartUploadOptions {
options.Expires = expires
return options
}
// SetXAmzServerSideEncryption : Allow user to set XAmzServerSideEncryption
func (options *InitiateMultipartUploadOptions) SetXAmzServerSideEncryption(xAmzServerSideEncryption string) *InitiateMultipartUploadOptions {
options.XAmzServerSideEncryption = core.StringPtr(xAmzServerSideEncryption)
return options
}
// SetXAmzWebsiteRedirectLocation : Allow user to set XAmzWebsiteRedirectLocation
func (options *InitiateMultipartUploadOptions) SetXAmzWebsiteRedirectLocation(xAmzWebsiteRedirectLocation string) *InitiateMultipartUploadOptions {
options.XAmzWebsiteRedirectLocation = core.StringPtr(xAmzWebsiteRedirectLocation)
return options
}
// SetXAmzServerSideEncryptionCustomerAlgorithm : Allow user to set XAmzServerSideEncryptionCustomerAlgorithm
func (options *InitiateMultipartUploadOptions) SetXAmzServerSideEncryptionCustomerAlgorithm(xAmzServerSideEncryptionCustomerAlgorithm string) *InitiateMultipartUploadOptions {
options.XAmzServerSideEncryptionCustomerAlgorithm = core.StringPtr(xAmzServerSideEncryptionCustomerAlgorithm)
return options
}
// SetXAmzServerSideEncryptionCustomerKey : Allow user to set XAmzServerSideEncryptionCustomerKey
func (options *InitiateMultipartUploadOptions) SetXAmzServerSideEncryptionCustomerKey(xAmzServerSideEncryptionCustomerKey string) *InitiateMultipartUploadOptions {
options.XAmzServerSideEncryptionCustomerKey = core.StringPtr(xAmzServerSideEncryptionCustomerKey)
return options
}
// SetXAmzServerSideEncryptionCustomerKeyMD5 : Allow user to set XAmzServerSideEncryptionCustomerKeyMD5
func (options *InitiateMultipartUploadOptions) SetXAmzServerSideEncryptionCustomerKeyMD5(xAmzServerSideEncryptionCustomerKeyMD5 string) *InitiateMultipartUploadOptions {
options.XAmzServerSideEncryptionCustomerKeyMD5 = core.StringPtr(xAmzServerSideEncryptionCustomerKeyMD5)
return options
}
// SetXAmzTagging : Allow user to set XAmzTagging
func (options *InitiateMultipartUploadOptions) SetXAmzTagging(xAmzTagging string) *InitiateMultipartUploadOptions {
options.XAmzTagging = core.StringPtr(xAmzTagging)
return options
}
// SetXAmzAcl : Allow user to set XAmzAcl
func (options *InitiateMultipartUploadOptions) SetXAmzAcl(xAmzAcl string) *InitiateMultipartUploadOptions {
options.XAmzAcl = core.StringPtr(xAmzAcl)
return options
}
// SetHeaders : Allow user to set Headers
func (options *InitiateMultipartUploadOptions) SetHeaders(param map[string]string) *InitiateMultipartUploadOptions {
options.Headers = param
return options
}
// Initiator : Container element that identifies who initiated the multipart upload.
type Initiator struct {
// If the principal is an AWS account, it provides the Canonical User ID. If the principal is an IAM User, it provides
// a user ARN value.
ID *string `json:"ID,omitempty"`
// Name of the Principal.
DisplayName *string `json:"DisplayName,omitempty"`
}
// UnmarshalInitiator unmarshals an instance of Initiator from the specified map of raw messages.
func UnmarshalInitiator(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Initiator)
err = core.UnmarshalPrimitive(m, "ID", &obj.ID)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "DisplayName", &obj.DisplayName)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// LifecycleExpiration : Container for the expiration for the lifecycle of the object.
type LifecycleExpiration struct {
// Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.
Date *strfmt.DateTime `json:"Date,omitempty"`
// Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive
// integer.
Days *int64 `json:"Days,omitempty"`
// Indicates whether IBM COS will remove a delete marker with no noncurrent versions. If set to true, the delete marker
// will be expired; if set to false the policy takes no action. This cannot be specified with Days or Date in a
// Lifecycle Expiration Policy.
ExpiredObjectDeleteMarker *bool `json:"ExpiredObjectDeleteMarker,omitempty"`
}
// UnmarshalLifecycleExpiration unmarshals an instance of LifecycleExpiration from the specified map of raw messages.
func UnmarshalLifecycleExpiration(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(LifecycleExpiration)
err = core.UnmarshalPrimitive(m, "Date", &obj.Date)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Days", &obj.Days)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "ExpiredObjectDeleteMarker", &obj.ExpiredObjectDeleteMarker)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// LifecycleRule : A lifecycle rule for individual objects in an IBM COS bucket.
type LifecycleRule struct {
// Specifies the expiration for the lifecycle of the object in the form of date, days and, whether the object has a
// delete marker.
Expiration *LifecycleExpiration `json:"Expiration,omitempty"`
// Unique identifier for the rule. The value cannot be longer than 255 characters.
ID *string `json:"ID,omitempty"`
// Prefix identifying one or more objects to which the rule applies. This is No longer used; use `Filter` instead.
Prefix *string `json:"Prefix,omitempty"`
// The `Filter` is used to identify objects that a Lifecycle Rule applies to. A `Filter` must have exactly one of
// `Prefix`, `Tag`, or `And` specified.
Filter *LifecycleRuleFilter `json:"Filter,omitempty"`
// If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied.
Status *string `json:"Status" validate:"required"`
// Specifies when an IBM COS object transitions to a specified storage class.
Transitions *LifecycleRuleTransitions `json:"Transitions,omitempty"`
}
// Constants associated with the LifecycleRule.Status property.
// If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is not currently being applied.
const (
LifecycleRule_Status_Disabled = "Disabled"
LifecycleRule_Status_Enabled = "Enabled"
)
// UnmarshalLifecycleRule unmarshals an instance of LifecycleRule from the specified map of raw messages.
func UnmarshalLifecycleRule(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(LifecycleRule)
err = core.UnmarshalModel(m, "Expiration", &obj.Expiration, UnmarshalLifecycleExpiration)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "ID", &obj.ID)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Prefix", &obj.Prefix)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Filter", &obj.Filter, UnmarshalLifecycleRuleFilter)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Status", &obj.Status)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Transitions", &obj.Transitions, UnmarshalLifecycleRuleTransitions)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// LifecycleRuleAndOperator : This is used in a Lifecycle Rule Filter to apply a logical AND to two or more predicates. The Lifecycle Rule will
// apply to any object matching all of the predicates configured inside the And operator.
type LifecycleRuleAndOperator struct {
// Prefix identifying one or more objects to which the rule applies.
Prefix *string `json:"Prefix,omitempty"`
// All of these tags must exist in the object's tag set in order for the rule to apply.
Tags *LifecycleRuleAndOperatorTags `json:"Tags,omitempty"`
}
// UnmarshalLifecycleRuleAndOperator unmarshals an instance of LifecycleRuleAndOperator from the specified map of raw messages.
func UnmarshalLifecycleRuleAndOperator(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(LifecycleRuleAndOperator)
err = core.UnmarshalPrimitive(m, "Prefix", &obj.Prefix)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Tags", &obj.Tags, UnmarshalLifecycleRuleAndOperatorTags)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// LifecycleRuleFilter : The `Filter` is used to identify objects that a Lifecycle Rule applies to. A `Filter` must have exactly one of
// `Prefix`, `Tag`, or `And` specified.
type LifecycleRuleFilter struct {
// Prefix identifying one or more objects to which the rule applies.
Prefix *string `json:"Prefix,omitempty"`
// This tag must exist in the object's tag set in order for the rule to apply.
Tag *Tag `json:"Tag,omitempty"`
// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more predicates. The Lifecycle Rule will
// apply to any object matching all of the predicates configured inside the And operator.
And *LifecycleRuleAndOperator `json:"And,omitempty"`
}
// UnmarshalLifecycleRuleFilter unmarshals an instance of LifecycleRuleFilter from the specified map of raw messages.
func UnmarshalLifecycleRuleFilter(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(LifecycleRuleFilter)
err = core.UnmarshalPrimitive(m, "Prefix", &obj.Prefix)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Tag", &obj.Tag, UnmarshalTag)
if err != nil {
return
}
err = core.UnmarshalModel(m, "And", &obj.And, UnmarshalLifecycleRuleAndOperator)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ListBucketsOptions : The ListBuckets options.
type ListBucketsOptions struct {
// This header references the service instance that contains buckets to list. This value can be either the full Cloud
// Resource Name (CRN) or just the GUID segment that identifies the service instance.
IbmServiceInstanceID *string `validate:"required"`
// If supplied, the returned listing will also include the provisioning code (`LocationConstraint`) for each bucket.
// This allows for inferring a bucket's location and associated endpoint.
Extended *bool
// Allows users to set headers on API requests
Headers map[string]string
}
// NewListBucketsOptions : Instantiate ListBucketsOptions
func (*IbmCloudObjectStorageS3ApiV2) NewListBucketsOptions(ibmServiceInstanceID string) *ListBucketsOptions {
return &ListBucketsOptions{
IbmServiceInstanceID: core.StringPtr(ibmServiceInstanceID),
}
}
// SetIbmServiceInstanceID : Allow user to set IbmServiceInstanceID
func (options *ListBucketsOptions) SetIbmServiceInstanceID(ibmServiceInstanceID string) *ListBucketsOptions {
options.IbmServiceInstanceID = core.StringPtr(ibmServiceInstanceID)
return options
}
// SetExtended : Allow user to set Extended
func (options *ListBucketsOptions) SetExtended(extended bool) *ListBucketsOptions {
options.Extended = core.BoolPtr(extended)
return options
}
// SetHeaders : Allow user to set Headers
func (options *ListBucketsOptions) SetHeaders(param map[string]string) *ListBucketsOptions {
options.Headers = param
return options
}
// ListMultipartUploadsOptions : The ListMultipartUploads options.
type ListMultipartUploadsOptions struct {
// The name of the bucket to which the multipart upload was initiated.
Bucket *string `validate:"required,ne="`
Uploads *bool `validate:"required"`
// Character you use to group keys. All keys that contain the same string between the prefix, if specified, and the
// first occurrence of the delimiter after the prefix are grouped under a single result element, `CommonPrefixes`. If
// you don''t specify the prefix parameter, then the substring starts at the beginning of the key. The keys that are
// grouped under `CommonPrefixes` result element are not returned elsewhere in the response.
Delimiter *string
// Requests IBM COS to encode the object keys in the response and specifies the encoding method to use. An object key
// may contain any Unicode character; however, XML 1.0 parser cannot parse some characters, such as characters with an
// ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request
// that IBM COS encode the keys in the response.
EncodingType *string
// Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin. If
// `upload-id-marker` is not specified, only the keys lexicographically greater than the specified `key-marker` will be
// included in the list. If `upload-id-marker` is specified, any multipart uploads for a key equal to the `key-marker`
// might also be included, provided those multipart uploads have upload IDs lexicographically greater than the
// specified `upload-id-marker`.
KeyMarker *string
// Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the response body. 1,000 is the maximum
// number of uploads that can be returned in a response.
MaxUploads *int64
// Lists in-progress uploads only for those keys that begin with the specified prefix. You can use prefixes to separate
// a bucket into different grouping of keys. (You can think of using prefix to make groups in the same way you'd use a
// folder in a file system.).
Prefix *string
// Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not
// specified, the upload-id-marker parameter is ignored. Otherwise, any multipart uploads for a key equal to the
// key-marker might be included in the list only if they have an upload ID lexicographically greater than the specified
// `upload-id-marker`.
UploadIdMarker *string
// Pagination limit //PD changed MaxUploads to PaginationLimit.
PaginationLimit *string
// Pagination token // PD UploadIdMarker ->.
PaginationToken *string
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the ListMultipartUploadsOptions.EncodingType property.
// Requests IBM COS to encode the object keys in the response and specifies the encoding method to use. An object key
// may contain any Unicode character; however, XML 1.0 parser cannot parse some characters, such as characters with an
// ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request
// that IBM COS encode the keys in the response.
const (
ListMultipartUploadsOptions_EncodingType_URL = "url"
)
// NewListMultipartUploadsOptions : Instantiate ListMultipartUploadsOptions
func (*IbmCloudObjectStorageS3ApiV2) NewListMultipartUploadsOptions(bucket string, uploads bool) *ListMultipartUploadsOptions {
return &ListMultipartUploadsOptions{
Bucket: core.StringPtr(bucket),
Uploads: core.BoolPtr(uploads),
}
}
// SetBucket : Allow user to set Bucket
func (options *ListMultipartUploadsOptions) SetBucket(bucket string) *ListMultipartUploadsOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetUploads : Allow user to set Uploads
func (options *ListMultipartUploadsOptions) SetUploads(uploads bool) *ListMultipartUploadsOptions {
options.Uploads = core.BoolPtr(uploads)
return options
}
// SetDelimiter : Allow user to set Delimiter
func (options *ListMultipartUploadsOptions) SetDelimiter(delimiter string) *ListMultipartUploadsOptions {
options.Delimiter = core.StringPtr(delimiter)
return options
}
// SetEncodingType : Allow user to set EncodingType
func (options *ListMultipartUploadsOptions) SetEncodingType(encodingType string) *ListMultipartUploadsOptions {
options.EncodingType = core.StringPtr(encodingType)
return options
}
// SetKeyMarker : Allow user to set KeyMarker
func (options *ListMultipartUploadsOptions) SetKeyMarker(keyMarker string) *ListMultipartUploadsOptions {
options.KeyMarker = core.StringPtr(keyMarker)
return options
}
// SetMaxUploads : Allow user to set MaxUploads
func (options *ListMultipartUploadsOptions) SetMaxUploads(maxUploads int64) *ListMultipartUploadsOptions {
options.MaxUploads = core.Int64Ptr(maxUploads)
return options
}
// SetPrefix : Allow user to set Prefix
func (options *ListMultipartUploadsOptions) SetPrefix(prefix string) *ListMultipartUploadsOptions {
options.Prefix = core.StringPtr(prefix)
return options
}
// SetUploadIdMarker : Allow user to set UploadIdMarker
func (options *ListMultipartUploadsOptions) SetUploadIdMarker(uploadIdMarker string) *ListMultipartUploadsOptions {
options.UploadIdMarker = core.StringPtr(uploadIdMarker)
return options
}
// SetPaginationLimit : Allow user to set PaginationLimit
func (options *ListMultipartUploadsOptions) SetPaginationLimit(paginationLimit string) *ListMultipartUploadsOptions {
options.PaginationLimit = core.StringPtr(paginationLimit)
return options
}
// SetPaginationToken : Allow user to set PaginationToken
func (options *ListMultipartUploadsOptions) SetPaginationToken(paginationToken string) *ListMultipartUploadsOptions {
options.PaginationToken = core.StringPtr(paginationToken)
return options
}
// SetHeaders : Allow user to set Headers
func (options *ListMultipartUploadsOptions) SetHeaders(param map[string]string) *ListMultipartUploadsOptions {
options.Headers = param
return options
}
// ListMultipartUploadsOutput : ListMultipartUploadsOutput struct
type ListMultipartUploadsOutput struct {
// The name of the bucket to which the multipart upload was initiated.
Bucket *string `json:"Bucket,omitempty"`
// The key at or after which the listing began.
KeyMarker *string `json:"KeyMarker,omitempty"`
// Upload ID after which listing began.
UploadIdMarker *string `json:"UploadIdMarker,omitempty"`
// When a list is truncated, this element specifies the value that should be used for the key-marker request parameter
// in a subsequent request.
NextKeyMarker *string `json:"NextKeyMarker,omitempty"`
// When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys
// starting with the specified prefix.
Prefix *string `json:"Prefix,omitempty"`
// Contains the delimiter you specified in the request. If you don't specify a delimiter in your request, this element
// is absent from the response.
Delimiter *string `json:"Delimiter,omitempty"`
// When a list is truncated, this element specifies the value that should be used for the `upload-id-marker` request
// parameter in a subsequent request.
NextUploadIdMarker *string `json:"NextUploadIdMarker,omitempty"`
// Maximum number of multipart uploads that could have been included in the response.
MaxUploads *int64 `json:"MaxUploads,omitempty"`
// Indicates whether the returned list of multipart uploads is truncated. A value of true indicates that the list was
// truncated. The list can be truncated if the number of multipart uploads exceeds the limit allowed or specified by
// max uploads.
IsTruncated *bool `json:"IsTruncated,omitempty"`
// Container for elements related to a particular multipart upload. A response can contain zero or more `Upload`
// elements.
Uploads *ListMultipartUploadsOutputUploads `json:"Uploads,omitempty"`
// If you specify a delimiter in the request, then the result returns each distinct key prefix containing the delimiter
// in a `CommonPrefixes` element. The distinct key prefixes are returned in the `Prefix` child element.
CommonPrefixes *CommonPrefixList `json:"CommonPrefixes,omitempty"`
// Encoding type used by IBM COS to encode object keys in the response. If you specify `encoding-type` request
// parameter, IBM COS includes this element in the response, and returns encoded key name values in the following
// response elements: `Delimiter`, `KeyMarker`, `Prefix`, `NextKeyMarker`, `Key`.
EncodingType *string `json:"EncodingType,omitempty"`
}
// Constants associated with the ListMultipartUploadsOutput.EncodingType property.
// Encoding type used by IBM COS to encode object keys in the response. If you specify `encoding-type` request
// parameter, IBM COS includes this element in the response, and returns encoded key name values in the following
// response elements: `Delimiter`, `KeyMarker`, `Prefix`, `NextKeyMarker`, `Key`.
const (
ListMultipartUploadsOutput_EncodingType_URL = "url"
)
// UnmarshalListMultipartUploadsOutput unmarshals an instance of ListMultipartUploadsOutput from the specified map of raw messages.
func UnmarshalListMultipartUploadsOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ListMultipartUploadsOutput)
err = core.UnmarshalPrimitive(m, "Bucket", &obj.Bucket)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "KeyMarker", &obj.KeyMarker)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "UploadIdMarker", &obj.UploadIdMarker)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "NextKeyMarker", &obj.NextKeyMarker)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Prefix", &obj.Prefix)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Delimiter", &obj.Delimiter)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "NextUploadIdMarker", &obj.NextUploadIdMarker)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "MaxUploads", &obj.MaxUploads)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "IsTruncated", &obj.IsTruncated)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Uploads", &obj.Uploads, UnmarshalListMultipartUploadsOutputUploads)
if err != nil {
return
}
err = core.UnmarshalModel(m, "CommonPrefixes", &obj.CommonPrefixes, UnmarshalCommonPrefixList)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "EncodingType", &obj.EncodingType)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ListObjectsOptions : The ListObjects options.
type ListObjectsOptions struct {
// The name of the bucket to be listed.
Bucket *string `validate:"required,ne="`
// A delimiter is a character you use to group keys.
Delimiter *string
// Requests COS to url-encode the object keys in the response. Object keys may contain any Unicode character; however,
// XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters
// that are not supported in XML 1.0, you can add this parameter to request that COS encodes the keys in the response.
EncodingType *string
// Specifies the key to start with when listing objects in a bucket.
Marker *string
// Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The
// response might contain fewer keys but will never contain more.
MaxKeys *int64
// Limits the response to keys that begin with the specified prefix.
Prefix *string
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the ListObjectsOptions.EncodingType property.
// Requests COS to url-encode the object keys in the response. Object keys may contain any Unicode character; however,
// XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that
// are not supported in XML 1.0, you can add this parameter to request that COS encodes the keys in the response.
const (
ListObjectsOptions_EncodingType_URL = "url"
)
// NewListObjectsOptions : Instantiate ListObjectsOptions
func (*IbmCloudObjectStorageS3ApiV2) NewListObjectsOptions(bucket string) *ListObjectsOptions {
return &ListObjectsOptions{
Bucket: core.StringPtr(bucket),
}
}
// SetBucket : Allow user to set Bucket
func (options *ListObjectsOptions) SetBucket(bucket string) *ListObjectsOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetDelimiter : Allow user to set Delimiter
func (options *ListObjectsOptions) SetDelimiter(delimiter string) *ListObjectsOptions {
options.Delimiter = core.StringPtr(delimiter)
return options
}
// SetEncodingType : Allow user to set EncodingType
func (options *ListObjectsOptions) SetEncodingType(encodingType string) *ListObjectsOptions {
options.EncodingType = core.StringPtr(encodingType)
return options
}
// SetMarker : Allow user to set Marker
func (options *ListObjectsOptions) SetMarker(marker string) *ListObjectsOptions {
options.Marker = core.StringPtr(marker)
return options
}
// SetMaxKeys : Allow user to set MaxKeys
func (options *ListObjectsOptions) SetMaxKeys(maxKeys int64) *ListObjectsOptions {
options.MaxKeys = core.Int64Ptr(maxKeys)
return options
}
// SetPrefix : Allow user to set Prefix
func (options *ListObjectsOptions) SetPrefix(prefix string) *ListObjectsOptions {
options.Prefix = core.StringPtr(prefix)
return options
}
// SetHeaders : Allow user to set Headers
func (options *ListObjectsOptions) SetHeaders(param map[string]string) *ListObjectsOptions {
options.Headers = param
return options
}
// ListObjectsOutput : ListObjectsOutput struct
type ListObjectsOutput struct {
// A flag that indicates whether IBM COS returned all of the results that satisfied the search criteria.
IsTruncated *bool `json:"IsTruncated,omitempty"`
// Indicates where in the bucket listing begins. Marker is included in the response if it was sent with the request.
Marker *string `json:"Marker,omitempty"`
// When response is truncated (the `IsTruncated` element value in the response is true), you can use the key name in
// this field as marker in the subsequent request to get next set of objects. IBM COS lists objects in alphabetical
// order.
//
// **Note:** This element is returned only if you have delimiter request parameter specified. If response does not
// include the `NextMarker` and it is truncated, you can use the value of the last key in the response as the marker in
// the subsequent request to get the next set of object keys.
NextMarker *string `json:"NextMarker,omitempty"`
// Metadata about each object returned.
Contents *ObjectList `json:"Contents,omitempty"`
// The bucket name.
Name *string `json:"Name,omitempty"`
// Keys that begin with the indicated prefix.
Prefix *string `json:"Prefix,omitempty"`
// Causes keys that contain the same string between the prefix and the first occurrence of the delimiter to be rolled
// up into a single result element in the `CommonPrefixes` collection. These rolled-up keys are not returned elsewhere
// in the response. Each rolled-up result counts as only one return against the `MaxKeys` value.
Delimiter *string `json:"Delimiter,omitempty"`
// The maximum number of keys returned in the response body.
MaxKeys *int64 `json:"MaxKeys,omitempty"`
// <p>All of the keys rolled up in a common prefix count as a single return when calculating the number of returns.
// </p> <p>A response can contain CommonPrefixes only if you specify a delimiter.</p> <p>CommonPrefixes contains all
// (if there are any) keys between Prefix and the next occurrence of the string specified by the delimiter.</p> <p>
// CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.</p> <p>For example, if
// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/.
// All of the keys that roll up into a common prefix count as a single return when calculating the number of
// returns.</p>.
CommonPrefixes *CommonPrefixList `json:"CommonPrefixes,omitempty"`
// Encoding type used by IBM COS to encode object keys in the response.
EncodingType *string `json:"EncodingType,omitempty"`
}
// Constants associated with the ListObjectsOutput.EncodingType property.
// Encoding type used by IBM COS to encode object keys in the response.
const (
ListObjectsOutput_EncodingType_URL = "url"
)
// UnmarshalListObjectsOutput unmarshals an instance of ListObjectsOutput from the specified map of raw messages.
func UnmarshalListObjectsOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ListObjectsOutput)
err = core.UnmarshalPrimitive(m, "IsTruncated", &obj.IsTruncated)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Marker", &obj.Marker)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "NextMarker", &obj.NextMarker)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Contents", &obj.Contents, UnmarshalObjectList)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Prefix", &obj.Prefix)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Delimiter", &obj.Delimiter)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "MaxKeys", &obj.MaxKeys)
if err != nil {
return
}
err = core.UnmarshalModel(m, "CommonPrefixes", &obj.CommonPrefixes, UnmarshalCommonPrefixList)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "EncodingType", &obj.EncodingType)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ListObjectsV2Options : The ListObjectsV2 options.
type ListObjectsV2Options struct {
// Bucket name to list.
Bucket *string `validate:"required,ne="`
ListType *string `validate:"required"`
// A delimiter is a character you use to group keys.
Delimiter *string
// Encoding type used by IBM COS to encode object keys in the response.
EncodingType *string
// Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The
// response might contain fewer keys but will never contain more.
MaxKeys *int64
// Limits the response to keys that begin with the specified prefix.
Prefix *string
// `ContinuationToken` indicates IBM COS that the list is being continued on this bucket with a token.
// ContinuationToken is obfuscated and is not a real key.
ContinuationToken *string
// The owner field (Service Instance ID) is not present in listV2 by default, if you want to return the Service
// Instance ID with each key in the result, then set the fetch owner field to true.
FetchOwner *bool
// `StartAfter` is where you want IBM COS to start listing from. IBM COS starts listing after this specified key.
// `StartAfter` can be any key in the bucket.
StartAfter *string
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the ListObjectsV2Options.EncodingType property.
// Encoding type used by IBM COS to encode object keys in the response.
const (
ListObjectsV2Options_EncodingType_URL = "url"
)
// NewListObjectsV2Options : Instantiate ListObjectsV2Options
func (*IbmCloudObjectStorageS3ApiV2) NewListObjectsV2Options(bucket string, listType string) *ListObjectsV2Options {
return &ListObjectsV2Options{
Bucket: core.StringPtr(bucket),
ListType: core.StringPtr(listType),
}
}
// SetBucket : Allow user to set Bucket
func (options *ListObjectsV2Options) SetBucket(bucket string) *ListObjectsV2Options {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetListType : Allow user to set ListType
func (options *ListObjectsV2Options) SetListType(listType string) *ListObjectsV2Options {
options.ListType = core.StringPtr(listType)
return options
}
// SetDelimiter : Allow user to set Delimiter
func (options *ListObjectsV2Options) SetDelimiter(delimiter string) *ListObjectsV2Options {
options.Delimiter = core.StringPtr(delimiter)
return options
}
// SetEncodingType : Allow user to set EncodingType
func (options *ListObjectsV2Options) SetEncodingType(encodingType string) *ListObjectsV2Options {
options.EncodingType = core.StringPtr(encodingType)
return options
}
// SetMaxKeys : Allow user to set MaxKeys
func (options *ListObjectsV2Options) SetMaxKeys(maxKeys int64) *ListObjectsV2Options {
options.MaxKeys = core.Int64Ptr(maxKeys)
return options
}
// SetPrefix : Allow user to set Prefix
func (options *ListObjectsV2Options) SetPrefix(prefix string) *ListObjectsV2Options {
options.Prefix = core.StringPtr(prefix)
return options
}
// SetContinuationToken : Allow user to set ContinuationToken
func (options *ListObjectsV2Options) SetContinuationToken(continuationToken string) *ListObjectsV2Options {
options.ContinuationToken = core.StringPtr(continuationToken)
return options
}
// SetFetchOwner : Allow user to set FetchOwner
func (options *ListObjectsV2Options) SetFetchOwner(fetchOwner bool) *ListObjectsV2Options {
options.FetchOwner = core.BoolPtr(fetchOwner)
return options
}
// SetStartAfter : Allow user to set StartAfter
func (options *ListObjectsV2Options) SetStartAfter(startAfter string) *ListObjectsV2Options {
options.StartAfter = core.StringPtr(startAfter)
return options
}
// SetHeaders : Allow user to set Headers
func (options *ListObjectsV2Options) SetHeaders(param map[string]string) *ListObjectsV2Options {
options.Headers = param
return options
}
// ListObjectsV2Output : ListObjectsV2Output struct
type ListObjectsV2Output struct {
// Set to false if all of the results were returned. Set to true if more keys are available to return. If the number of
// results exceeds that specified by MaxKeys, all of the results might not be returned.
IsTruncated *bool `json:"IsTruncated,omitempty"`
// Metadata about each object returned.
Contents *ObjectList `json:"Contents,omitempty"`
// <p>The bucket name.</p> <p>When using this API with an access point, you must direct requests to the access point
// hostname. The access point hostname takes the form
// <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com. When using this operation with
// an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see <a
// href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html">Using Access Points</a> in the
// <i>Amazon Simple Storage Service Developer Guide</i>.</p> <p>When using this API with IBM COS on Outposts, you must
// direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
// <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com. When using this
// operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket
// name. For more information about S3 on Outposts ARNs, see <a
// href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html">Using S3 on Outposts</a> in the <i>Amazon
// Simple Storage Service Developer Guide</i>.</p>.
Name *string `json:"Name,omitempty"`
// Keys that begin with the indicated prefix.
Prefix *string `json:"Prefix,omitempty"`
// Causes keys that contain the same string between the prefix and the first occurrence of the delimiter to be rolled
// up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in
// the response. Each rolled-up result counts as only one return against the `MaxKeys` value.
Delimiter *string `json:"Delimiter,omitempty"`
// Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The
// response might contain fewer keys but will never contain more.
MaxKeys *int64 `json:"MaxKeys,omitempty"`
// <p>All of the keys rolled up into a common prefix count as a single return when calculating the number of
// returns.</p> <p>A response can contain `CommonPrefixes` only if you specify a delimiter.</p> <p> `CommonPrefixes`
// contains all (if there are any) keys between `Prefix` and the next occurrence of the string specified by a
// delimiter.</p> <p> `CommonPrefixes` lists keys that act like subdirectories in the directory specified by
// `Prefix`.</p> <p>For example, if the prefix is `notes/` and the delimiter is a slash (`/`) as in
// `notes/summer/july`, the common prefix is `notes/summer/`. All of the keys that roll up into a common prefix count
// as a single return when calculating the number of returns. </p>.
CommonPrefixes *CommonPrefixList `json:"CommonPrefixes,omitempty"`
// Encoding type used by IBM COS to encode object keys in the response. If you specify `encoding-type` request
// parameter, IBM COS includes this element in the response, and returns encoded key name values in the following
// response elements: `Delimiter`, `KeyMarker`, `Prefix`, `NextKeyMarker`, `Key`.
EncodingType *string `json:"EncodingType,omitempty"`
// KeyCount is the number of keys returned with this request. KeyCount will always be less than equals to MaxKeys
// field. Say you ask for 50 keys, your result will include less than equals 50 keys.
KeyCount *int64 `json:"KeyCount,omitempty"`
// If ContinuationToken was sent with the request, it is included in the response.
ContinuationToken *string `json:"ContinuationToken,omitempty"`
// `NextContinuationToken` is sent when `isTruncated` is true, which means there are more keys in the bucket that can
// be listed. The next list requests to IBM COS can be continued with this `NextContinuationToken`.
// `NextContinuationToken` is obfuscated and is not a real key.
NextContinuationToken *string `json:"NextContinuationToken,omitempty"`
// If StartAfter was sent with the request, it is included in the response.
StartAfter *string `json:"StartAfter,omitempty"`
}
// Constants associated with the ListObjectsV2Output.EncodingType property.
// Encoding type used by IBM COS to encode object keys in the response. If you specify `encoding-type` request
// parameter, IBM COS includes this element in the response, and returns encoded key name values in the following
// response elements: `Delimiter`, `KeyMarker`, `Prefix`, `NextKeyMarker`, `Key`.
const (
ListObjectsV2Output_EncodingType_URL = "url"
)
// UnmarshalListObjectsV2Output unmarshals an instance of ListObjectsV2Output from the specified map of raw messages.
func UnmarshalListObjectsV2Output(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ListObjectsV2Output)
err = core.UnmarshalPrimitive(m, "IsTruncated", &obj.IsTruncated)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Contents", &obj.Contents, UnmarshalObjectList)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Name", &obj.Name)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Prefix", &obj.Prefix)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Delimiter", &obj.Delimiter)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "MaxKeys", &obj.MaxKeys)
if err != nil {
return
}
err = core.UnmarshalModel(m, "CommonPrefixes", &obj.CommonPrefixes, UnmarshalCommonPrefixList)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "EncodingType", &obj.EncodingType)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "KeyCount", &obj.KeyCount)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "ContinuationToken", &obj.ContinuationToken)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "NextContinuationToken", &obj.NextContinuationToken)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "StartAfter", &obj.StartAfter)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ListPartsOptions : The ListParts options.
type ListPartsOptions struct {
// The bucket where the parts have been uploaded.
Bucket *string `validate:"required,ne="`
// Object key for which the multipart upload was initiated.
Key *string `validate:"required,ne="`
// Upload ID identifying the multipart upload whose parts are being listed.
UploadID *string `validate:"required"`
// Sets the maximum number of parts to return.
MaxParts *int64
// Specifies the part after which listing should begin. Only parts with higher part numbers will be listed.
PartNumberMarker *int64
// Allows users to set headers on API requests
Headers map[string]string
}
// NewListPartsOptions : Instantiate ListPartsOptions
func (*IbmCloudObjectStorageS3ApiV2) NewListPartsOptions(bucket string, key string, uploadID string) *ListPartsOptions {
return &ListPartsOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
UploadID: core.StringPtr(uploadID),
}
}
// SetBucket : Allow user to set Bucket
func (options *ListPartsOptions) SetBucket(bucket string) *ListPartsOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *ListPartsOptions) SetKey(key string) *ListPartsOptions {
options.Key = core.StringPtr(key)
return options
}
// SetUploadID : Allow user to set UploadID
func (options *ListPartsOptions) SetUploadID(uploadID string) *ListPartsOptions {
options.UploadID = core.StringPtr(uploadID)
return options
}
// SetMaxParts : Allow user to set MaxParts
func (options *ListPartsOptions) SetMaxParts(maxParts int64) *ListPartsOptions {
options.MaxParts = core.Int64Ptr(maxParts)
return options
}
// SetPartNumberMarker : Allow user to set PartNumberMarker
func (options *ListPartsOptions) SetPartNumberMarker(partNumberMarker int64) *ListPartsOptions {
options.PartNumberMarker = core.Int64Ptr(partNumberMarker)
return options
}
// SetHeaders : Allow user to set Headers
func (options *ListPartsOptions) SetHeaders(param map[string]string) *ListPartsOptions {
options.Headers = param
return options
}
// ListPartsOutput : ListPartsOutput struct
type ListPartsOutput struct {
// The name of the bucket to which the multipart upload was initiated.
Bucket *string `json:"Bucket,omitempty"`
// Object key for which the multipart upload was initiated.
Key *string `json:"Key,omitempty"`
// Upload ID identifying the multipart upload whose parts are being listed.
UploadID *string `json:"UploadId,omitempty"`
// When a list is truncated, this element specifies the last part in the list, as well as the value to use for the
// part-number-marker request parameter in a subsequent request.
PartNumberMarker *int64 `json:"PartNumberMarker,omitempty"`
// When a list is truncated, this element specifies the last part in the list, as well as the value to use for the
// part-number-marker request parameter in a subsequent request.
NextPartNumberMarker *int64 `json:"NextPartNumberMarker,omitempty"`
// Maximum number of parts that were allowed in the response.
MaxParts *int64 `json:"MaxParts,omitempty"`
// Indicates whether the returned list of parts is truncated. A true value indicates that the list was truncated. A
// list can be truncated if the number of parts exceeds the limit returned in the MaxParts element.
IsTruncated *bool `json:"IsTruncated,omitempty"`
// Container for elements related to a particular part. A response can contain zero or more `Part` elements.
Parts *ListPartsOutputParts `json:"Parts,omitempty"`
// Container element that identifies who initiated the multipart upload. If the initiator is an AWS account, this
// element provides the same information as the `Owner` element. If the initiator is an IAM User, this element provides
// the user ARN and display name.
Initiator *Initiator `json:"Initiator,omitempty"`
// Container element that identifies the object owner, after the object is created. If multipart upload is initiated by
// an IAM user, this element provides the parent account ID and display name.
Owner *Owner `json:"Owner,omitempty"`
// Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded object.
StorageClass *string `json:"StorageClass,omitempty"`
}
// Constants associated with the ListPartsOutput.StorageClass property.
// Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded object.
const (
ListPartsOutput_StorageClass_Accelerated = "ACCELERATED"
ListPartsOutput_StorageClass_Glacier = "GLACIER"
ListPartsOutput_StorageClass_Standard = "STANDARD"
)
// UnmarshalListPartsOutput unmarshals an instance of ListPartsOutput from the specified map of raw messages.
func UnmarshalListPartsOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ListPartsOutput)
err = core.UnmarshalPrimitive(m, "Bucket", &obj.Bucket)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Key", &obj.Key)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "UploadId", &obj.UploadID)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "PartNumberMarker", &obj.PartNumberMarker)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "NextPartNumberMarker", &obj.NextPartNumberMarker)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "MaxParts", &obj.MaxParts)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "IsTruncated", &obj.IsTruncated)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Parts", &obj.Parts, UnmarshalListPartsOutputParts)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Initiator", &obj.Initiator, UnmarshalInitiator)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Owner", &obj.Owner, UnmarshalOwner)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "StorageClass", &obj.StorageClass)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// MultipartUpload : Container for the `MultipartUpload` for the IBM COS object.
type MultipartUpload struct {
// Upload ID that identifies the multipart upload.
UploadID *string `json:"UploadId,omitempty"`
// Key of the object for which the multipart upload was initiated.
Key *string `json:"Key,omitempty"`
// Date and time at which the multipart upload was initiated.
Initiated *strfmt.DateTime `json:"Initiated,omitempty"`
// The class of storage used to store the object.
StorageClass *string `json:"StorageClass,omitempty"`
// Specifies the owner of the object that is part of the multipart upload.
Owner *Owner `json:"Owner,omitempty"`
// Identifies who initiated the multipart upload.
Initiator *Initiator `json:"Initiator,omitempty"`
}
// Constants associated with the MultipartUpload.StorageClass property.
// The class of storage used to store the object.
const (
MultipartUpload_StorageClass_Accelerated = "ACCELERATED"
MultipartUpload_StorageClass_Glacier = "GLACIER"
MultipartUpload_StorageClass_Standard = "STANDARD"
)
// UnmarshalMultipartUpload unmarshals an instance of MultipartUpload from the specified map of raw messages.
func UnmarshalMultipartUpload(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(MultipartUpload)
err = core.UnmarshalPrimitive(m, "UploadId", &obj.UploadID)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Key", &obj.Key)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Initiated", &obj.Initiated)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "StorageClass", &obj.StorageClass)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Owner", &obj.Owner, UnmarshalOwner)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Initiator", &obj.Initiator, UnmarshalInitiator)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Object : An object consists of data and its descriptive metadata.
type Object struct {
// The name that you assign to an object. You use the object key to retrieve the object.
Key *string `json:"Key,omitempty"`
// The date the Object was Last Modified.
LastModified *strfmt.DateTime `json:"LastModified,omitempty"`
// <p>The entity tag is a hash of the object. The ETag reflects changes only to the contents of an object, not its
// metadata. The ETag may or may not be an MD5 digest of the object data. Whether or not it is depends on how the
// object was created and how it is encrypted as described below:</p> <ul> <li> <p>Objects created by the PUT Object,
// POST Object, or Copy operation, or through the AWS Management Console, and are encrypted by SSE-S3 or plaintext,
// have ETags that are an MD5 digest of their object data.</p> </li> <li> <p>Objects created by the PUT Object, POST
// Object, or Copy operation, or through the AWS Management Console, and are encrypted by SSE-C or SSE-KMS, have ETags
// that are not an MD5 digest of their object data.</p> </li> <li> <p>If an object is created by either the Multipart
// Upload or Part Copy operation, the ETag is not an MD5 digest, regardless of the method of encryption.</p> </li>
// </ul>.
ETag *string `json:"ETag,omitempty"`
// Size in bytes of the object.
Size *int64 `json:"Size,omitempty"`
// The class of storage used to store the object.
StorageClass *string `json:"StorageClass,omitempty"`
// The owner of the object.
Owner *Owner `json:"Owner,omitempty"`
}
// Constants associated with the Object.StorageClass property.
// The class of storage used to store the object.
const (
Object_StorageClass_Accelerated = "ACCELERATED"
Object_StorageClass_Glacier = "GLACIER"
Object_StorageClass_Standard = "STANDARD"
)
// UnmarshalObject unmarshals an instance of Object from the specified map of raw messages.
func UnmarshalObject(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Object)
err = core.UnmarshalPrimitive(m, "Key", &obj.Key)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "LastModified", &obj.LastModified)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "ETag", &obj.ETag)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Size", &obj.Size)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "StorageClass", &obj.StorageClass)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Owner", &obj.Owner, UnmarshalOwner)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Owner : Container for the owner's display name and ID.
type Owner struct {
// Container for the display name of the owner.
DisplayName *string `json:"DisplayName,omitempty"`
// Container for the ID of the owner.
ID *string `json:"ID,omitempty"`
}
// UnmarshalOwner unmarshals an instance of Owner from the specified map of raw messages.
func UnmarshalOwner(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Owner)
err = core.UnmarshalPrimitive(m, "DisplayName", &obj.DisplayName)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "ID", &obj.ID)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Part : Container for elements related to a part.
type Part struct {
// Part number identifying the part. This is a positive integer between 1 and 10,000.
PartNumber *int64 `json:"PartNumber,omitempty"`
// Date and time at which the part was uploaded.
LastModified *strfmt.DateTime `json:"LastModified,omitempty"`
// Entity tag returned when the part was uploaded.
ETag *string `json:"ETag,omitempty"`
// Size in bytes of the uploaded part data.
Size *int64 `json:"Size,omitempty"`
}
// UnmarshalPart unmarshals an instance of Part from the specified map of raw messages.
func UnmarshalPart(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Part)
err = core.UnmarshalPrimitive(m, "PartNumber", &obj.PartNumber)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "LastModified", &obj.LastModified)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "ETag", &obj.ETag)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Size", &obj.Size)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// PublicAccessBlockConfiguration : The PublicAccessBlock configuration that you want to apply to this IBM COS bucket. You can enable the configuration
// options in any combination. For more information about when IBM COS considers a bucket or object public, see <a
// href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status">The
// Meaning of "Public"</a> in the <i>Amazon Simple Storage Service Developer Guide</i>.
type PublicAccessBlockConfiguration struct {
// <p>Specifies whether IBM COS should block public access control lists (ACLs) for this bucket and objects in this
// bucket. Setting this element to `TRUE` causes the following behavior:</p> <ul> <li> <p>PUT Bucket acl and PUT Object
// acl calls fail if the specified ACL is public.</p> </li> <li> <p>PUT Object calls fail if the request includes a
// public ACL.</p> </li> <li> <p>PUT Bucket calls fail if the request includes a public ACL.</p> </li> </ul>
// <p>Enabling this setting doesn't affect existing policies or ACLs.</p>.
BlockPublicAcls *bool `json:"BlockPublicAcls,omitempty"`
// <p>Specifies whether IBM COS should ignore public ACLs for this bucket and objects in this bucket. Setting this
// element to `TRUE` causes IBM COS to ignore all public ACLs on this bucket and objects in this bucket.</p>
// <p>Enabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs
// from being set.</p>.
IgnorePublicAcls *bool `json:"IgnorePublicAcls,omitempty"`
// <p>Specifies whether IBM COS should block public bucket policies for this bucket. Setting this element to `TRUE`
// causes IBM COS to reject calls to PUT Bucket policy if the specified bucket policy allows public access. </p>
// <p>Enabling this setting doesn't affect existing bucket policies.</p>.
BlockPublicPolicy *bool `json:"BlockPublicPolicy,omitempty"`
// <p>Specifies whether IBM COS should restrict public bucket policies for this bucket. Setting this element to `TRUE`
// restricts access to this bucket to only AWS services and authorized users within this account if the bucket has a
// public policy.</p> <p>Enabling this setting doesn't affect previously stored bucket policies, except that public and
// cross-account access within any public bucket policy, including non-public delegation to specific accounts, is
// blocked.</p>.
RestrictPublicBuckets *bool `json:"RestrictPublicBuckets,omitempty"`
}
// UnmarshalPublicAccessBlockConfiguration unmarshals an instance of PublicAccessBlockConfiguration from the specified map of raw messages.
func UnmarshalPublicAccessBlockConfiguration(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(PublicAccessBlockConfiguration)
err = core.UnmarshalPrimitive(m, "BlockPublicAcls", &obj.BlockPublicAcls)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "IgnorePublicAcls", &obj.IgnorePublicAcls)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "BlockPublicPolicy", &obj.BlockPublicPolicy)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "RestrictPublicBuckets", &obj.RestrictPublicBuckets)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// PutBucketAclOptions : The PutBucketAcl options.
type PutBucketAclOptions struct {
// The bucket to which to apply the ACL.
Bucket *string `validate:"required,ne="`
Acl *bool `validate:"required"`
// The canned ACL to apply to the bucket.
XAmzAcl *string
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the PutBucketAclOptions.XAmzAcl property.
// The canned ACL to apply to the bucket.
const (
PutBucketAclOptions_XAmzAcl_Private = "private"
PutBucketAclOptions_XAmzAcl_PublicRead = "public-read"
)
// NewPutBucketAclOptions : Instantiate PutBucketAclOptions
func (*IbmCloudObjectStorageS3ApiV2) NewPutBucketAclOptions(bucket string, acl bool) *PutBucketAclOptions {
return &PutBucketAclOptions{
Bucket: core.StringPtr(bucket),
Acl: core.BoolPtr(acl),
}
}
// SetBucket : Allow user to set Bucket
func (options *PutBucketAclOptions) SetBucket(bucket string) *PutBucketAclOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetAcl : Allow user to set Acl
func (options *PutBucketAclOptions) SetAcl(acl bool) *PutBucketAclOptions {
options.Acl = core.BoolPtr(acl)
return options
}
// SetXAmzAcl : Allow user to set XAmzAcl
func (options *PutBucketAclOptions) SetXAmzAcl(xAmzAcl string) *PutBucketAclOptions {
options.XAmzAcl = core.StringPtr(xAmzAcl)
return options
}
// SetHeaders : Allow user to set Headers
func (options *PutBucketAclOptions) SetHeaders(param map[string]string) *PutBucketAclOptions {
options.Headers = param
return options
}
// PutBucketCorsOptions : The PutBucketCors options.
type PutBucketCorsOptions struct {
// Specifies the bucket impacted by the CORS configuration.
Bucket *string `validate:"required,ne="`
Cors *bool `validate:"required"`
Body *string `validate:"required"`
// The base64-encoded 128-bit MD5 digest of the payload (just the request body without the headers) according to [RFC
// 1864](http://www.ietf.org/rfc/rfc1864.txt). This header can be used as a message integrity check to verify that the
// data is the same data that was originally sent. Although it is optional, it is recommended to use the Content-MD5
// mechanism as an end-to-end integrity check.
ContentMD5 *string
// Allows users to set headers on API requests
Headers map[string]string
}
// NewPutBucketCorsOptions : Instantiate PutBucketCorsOptions
func (*IbmCloudObjectStorageS3ApiV2) NewPutBucketCorsOptions(bucket string, cors bool, body string) *PutBucketCorsOptions {
return &PutBucketCorsOptions{
Bucket: core.StringPtr(bucket),
Cors: core.BoolPtr(cors),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *PutBucketCorsOptions) SetBucket(bucket string) *PutBucketCorsOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetCors : Allow user to set Cors
func (options *PutBucketCorsOptions) SetCors(cors bool) *PutBucketCorsOptions {
options.Cors = core.BoolPtr(cors)
return options
}
// SetBody : Allow user to set Body
func (options *PutBucketCorsOptions) SetBody(body string) *PutBucketCorsOptions {
options.Body = core.StringPtr(body)
return options
}
// SetContentMD5 : Allow user to set ContentMD5
func (options *PutBucketCorsOptions) SetContentMD5(contentMD5 string) *PutBucketCorsOptions {
options.ContentMD5 = core.StringPtr(contentMD5)
return options
}
// SetHeaders : Allow user to set Headers
func (options *PutBucketCorsOptions) SetHeaders(param map[string]string) *PutBucketCorsOptions {
options.Headers = param
return options
}
// PutBucketLifecycleConfigurationOptions : The PutBucketLifecycleConfiguration options.
type PutBucketLifecycleConfigurationOptions struct {
// The name of the bucket for which to set the configuration.
Bucket *string `validate:"required,ne="`
Lifecycle *bool `validate:"required"`
Body *string `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewPutBucketLifecycleConfigurationOptions : Instantiate PutBucketLifecycleConfigurationOptions
func (*IbmCloudObjectStorageS3ApiV2) NewPutBucketLifecycleConfigurationOptions(bucket string, lifecycle bool, body string) *PutBucketLifecycleConfigurationOptions {
return &PutBucketLifecycleConfigurationOptions{
Bucket: core.StringPtr(bucket),
Lifecycle: core.BoolPtr(lifecycle),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *PutBucketLifecycleConfigurationOptions) SetBucket(bucket string) *PutBucketLifecycleConfigurationOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetLifecycle : Allow user to set Lifecycle
func (options *PutBucketLifecycleConfigurationOptions) SetLifecycle(lifecycle bool) *PutBucketLifecycleConfigurationOptions {
options.Lifecycle = core.BoolPtr(lifecycle)
return options
}
// SetBody : Allow user to set Body
func (options *PutBucketLifecycleConfigurationOptions) SetBody(body string) *PutBucketLifecycleConfigurationOptions {
options.Body = core.StringPtr(body)
return options
}
// SetHeaders : Allow user to set Headers
func (options *PutBucketLifecycleConfigurationOptions) SetHeaders(param map[string]string) *PutBucketLifecycleConfigurationOptions {
options.Headers = param
return options
}
// PutBucketProtectionConfigurationOptions : The PutBucketProtectionConfiguration options.
type PutBucketProtectionConfigurationOptions struct {
// The name of the bucket for which to set the configuration.
Bucket *string `validate:"required,ne="`
Protection *bool `validate:"required"`
Body *string `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewPutBucketProtectionConfigurationOptions : Instantiate PutBucketProtectionConfigurationOptions
func (*IbmCloudObjectStorageS3ApiV2) NewPutBucketProtectionConfigurationOptions(bucket string, protection bool, body string) *PutBucketProtectionConfigurationOptions {
return &PutBucketProtectionConfigurationOptions{
Bucket: core.StringPtr(bucket),
Protection: core.BoolPtr(protection),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *PutBucketProtectionConfigurationOptions) SetBucket(bucket string) *PutBucketProtectionConfigurationOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetProtection : Allow user to set Protection
func (options *PutBucketProtectionConfigurationOptions) SetProtection(protection bool) *PutBucketProtectionConfigurationOptions {
options.Protection = core.BoolPtr(protection)
return options
}
// SetBody : Allow user to set Body
func (options *PutBucketProtectionConfigurationOptions) SetBody(body string) *PutBucketProtectionConfigurationOptions {
options.Body = core.StringPtr(body)
return options
}
// SetHeaders : Allow user to set Headers
func (options *PutBucketProtectionConfigurationOptions) SetHeaders(param map[string]string) *PutBucketProtectionConfigurationOptions {
options.Headers = param
return options
}
// PutBucketWebsiteOptions : The PutBucketWebsite options.
type PutBucketWebsiteOptions struct {
// The bucket that will serve a static website.
Bucket *string `validate:"required,ne="`
Website *bool `validate:"required"`
Body *string `validate:"required"`
// The base64-encoded 128-bit MD5 digest of the payload (just the request body without the headers) according to [RFC
// 1864](http://www.ietf.org/rfc/rfc1864.txt). This header can be used as a message integrity check to verify that the
// data is the same data that was originally sent. Although it is optional, it is recommended to use the Content-MD5
// mechanism as an end-to-end integrity check.
ContentMD5 *string
// Allows users to set headers on API requests
Headers map[string]string
}
// NewPutBucketWebsiteOptions : Instantiate PutBucketWebsiteOptions
func (*IbmCloudObjectStorageS3ApiV2) NewPutBucketWebsiteOptions(bucket string, website bool, body string) *PutBucketWebsiteOptions {
return &PutBucketWebsiteOptions{
Bucket: core.StringPtr(bucket),
Website: core.BoolPtr(website),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *PutBucketWebsiteOptions) SetBucket(bucket string) *PutBucketWebsiteOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetWebsite : Allow user to set Website
func (options *PutBucketWebsiteOptions) SetWebsite(website bool) *PutBucketWebsiteOptions {
options.Website = core.BoolPtr(website)
return options
}
// SetBody : Allow user to set Body
func (options *PutBucketWebsiteOptions) SetBody(body string) *PutBucketWebsiteOptions {
options.Body = core.StringPtr(body)
return options
}
// SetContentMD5 : Allow user to set ContentMD5
func (options *PutBucketWebsiteOptions) SetContentMD5(contentMD5 string) *PutBucketWebsiteOptions {
options.ContentMD5 = core.StringPtr(contentMD5)
return options
}
// SetHeaders : Allow user to set Headers
func (options *PutBucketWebsiteOptions) SetHeaders(param map[string]string) *PutBucketWebsiteOptions {
options.Headers = param
return options
}
// PutObjectAclOptions : The PutObjectAcl options.
type PutObjectAclOptions struct {
// The bucket name that contains the object to which you want to attach the ACL.
Bucket *string `validate:"required,ne="`
// Key for which the PUT operation was initiated.
Key *string `validate:"required,ne="`
Acl *bool `validate:"required"`
Body *string `validate:"required"`
// The canned ACL to apply to the object.
XAmzAcl *string
// The base64-encoded 128-bit MD5 digest of the payload (just the request body without the headers) according to [RFC
// 1864](http://www.ietf.org/rfc/rfc1864.txt). This header can be used as a message integrity check to verify that the
// data is the same data that was originally sent. Although it is optional, it is recommended to use the Content-MD5
// mechanism as an end-to-end integrity check.
ContentMD5 *string
// <p>Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.</p> <p>This action is not
// supported by IBM COS on Outposts.</p>.
XAmzGrantFullControl *string
// <p>Allows grantee to list the objects in the bucket.</p> <p>This action is not supported by IBM COS on
// Outposts.</p>.
XAmzGrantRead *string
// <p>Allows grantee to read the bucket ACL.</p> <p>This action is not supported by IBM COS on Outposts.</p>.
XAmzGrantReadAcp *string
// Allows grantee to create, overwrite, and delete any object in the bucket.
XAmzGrantWrite *string
// <p>Allows grantee to write the ACL for the applicable bucket.</p> <p>This action is not supported by IBM COS on
// Outposts.</p>.
XAmzGrantWriteAcp *string
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the PutObjectAclOptions.XAmzAcl property.
// The canned ACL to apply to the object.
const (
PutObjectAclOptions_XAmzAcl_Private = "private"
PutObjectAclOptions_XAmzAcl_PublicRead = "public-read"
)
// NewPutObjectAclOptions : Instantiate PutObjectAclOptions
func (*IbmCloudObjectStorageS3ApiV2) NewPutObjectAclOptions(bucket string, key string, acl bool, body string) *PutObjectAclOptions {
return &PutObjectAclOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
Acl: core.BoolPtr(acl),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *PutObjectAclOptions) SetBucket(bucket string) *PutObjectAclOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *PutObjectAclOptions) SetKey(key string) *PutObjectAclOptions {
options.Key = core.StringPtr(key)
return options
}
// SetAcl : Allow user to set Acl
func (options *PutObjectAclOptions) SetAcl(acl bool) *PutObjectAclOptions {
options.Acl = core.BoolPtr(acl)
return options
}
// SetBody : Allow user to set Body
func (options *PutObjectAclOptions) SetBody(body string) *PutObjectAclOptions {
options.Body = core.StringPtr(body)
return options
}
// SetXAmzAcl : Allow user to set XAmzAcl
func (options *PutObjectAclOptions) SetXAmzAcl(xAmzAcl string) *PutObjectAclOptions {
options.XAmzAcl = core.StringPtr(xAmzAcl)
return options
}
// SetContentMD5 : Allow user to set ContentMD5
func (options *PutObjectAclOptions) SetContentMD5(contentMD5 string) *PutObjectAclOptions {
options.ContentMD5 = core.StringPtr(contentMD5)
return options
}
// SetXAmzGrantFullControl : Allow user to set XAmzGrantFullControl
func (options *PutObjectAclOptions) SetXAmzGrantFullControl(xAmzGrantFullControl string) *PutObjectAclOptions {
options.XAmzGrantFullControl = core.StringPtr(xAmzGrantFullControl)
return options
}
// SetXAmzGrantRead : Allow user to set XAmzGrantRead
func (options *PutObjectAclOptions) SetXAmzGrantRead(xAmzGrantRead string) *PutObjectAclOptions {
options.XAmzGrantRead = core.StringPtr(xAmzGrantRead)
return options
}
// SetXAmzGrantReadAcp : Allow user to set XAmzGrantReadAcp
func (options *PutObjectAclOptions) SetXAmzGrantReadAcp(xAmzGrantReadAcp string) *PutObjectAclOptions {
options.XAmzGrantReadAcp = core.StringPtr(xAmzGrantReadAcp)
return options
}
// SetXAmzGrantWrite : Allow user to set XAmzGrantWrite
func (options *PutObjectAclOptions) SetXAmzGrantWrite(xAmzGrantWrite string) *PutObjectAclOptions {
options.XAmzGrantWrite = core.StringPtr(xAmzGrantWrite)
return options
}
// SetXAmzGrantWriteAcp : Allow user to set XAmzGrantWriteAcp
func (options *PutObjectAclOptions) SetXAmzGrantWriteAcp(xAmzGrantWriteAcp string) *PutObjectAclOptions {
options.XAmzGrantWriteAcp = core.StringPtr(xAmzGrantWriteAcp)
return options
}
// SetHeaders : Allow user to set Headers
func (options *PutObjectAclOptions) SetHeaders(param map[string]string) *PutObjectAclOptions {
options.Headers = param
return options
}
// PutObjectOptions : The PutObject options.
type PutObjectOptions struct {
// The bucket that will hold the object.
Bucket *string `validate:"required,ne="`
// Object key for which the PUT operation was initiated.
Key *string `validate:"required,ne="`
Body *string `validate:"required"`
// The canned ACL to apply to the object.
XAmzAcl *string
// Upload the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412
// (precondition failed).
IfMatch *string
// Upload the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not
// modified).
IfNoneMatch *string
// Upload the object only if it has not been modified since the specified time, otherwise return a 412 (precondition
// failed).
IfUnmodifiedSince *strfmt.DateTime
// Can be used to specify caching behavior along the request/reply chain. For more information, see [RFC
// 2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9).
CacheControl *string
// Specifies presentational information for the object. For more information, see [RFC
// 2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1).
ContentDisposition *string
// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied
// to obtain the media-type referenced by the Content-Type header field. For more information, see [RFC
// 2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11).
ContentEncoding *string
// The language the content is in.
ContentLanguage *string
// Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.
// For more information, see [RFC 2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13).
ContentLength *int64
// The base64-encoded 128-bit MD5 digest of the payload (just the request body without the headers) according to [RFC
// 1864](http://www.ietf.org/rfc/rfc1864.txt). This header can be used as a message integrity check to verify that the
// data is the same data that was originally sent. Although it is optional, it is recommended to use the Content-MD5
// mechanism as an end-to-end integrity check.
ContentMD5 *string
// The date and time at which the object is no longer cacheable. For more information, [RFC
// 2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21).
Expires *strfmt.DateTime
// The server-side encryption algorithm used when storing this object in IBM COS (`AES256`).
XAmzServerSideEncryption *string
// If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or
// to an external URL.
XAmzWebsiteRedirectLocation *string
// Specifies the algorithm to use to when encrypting the object (for example, `AES256`).
XAmzServerSideEncryptionCustomerAlgorithm *string
// Specifies the customer-provided encryption key for IBM COS to use in encrypting data. This value is used to store
// the object and then it is discarded; IBM COS does not store the encryption key. The key must be appropriate for use
// with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm` header.
XAmzServerSideEncryptionCustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. IBM COS uses this header for a message
// integrity check to ensure that the encryption key was transmitted without error.
XAmzServerSideEncryptionCustomerKeyMD5 *string
// A set of tags for the object. The tags must be encoded as URL duery parameters (For example, `SomeKey=SomeValue`).
XAmzTagging *string
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the PutObjectOptions.XAmzAcl property.
// The canned ACL to apply to the object.
const (
PutObjectOptions_XAmzAcl_Private = "private"
PutObjectOptions_XAmzAcl_PublicRead = "public-read"
)
// Constants associated with the PutObjectOptions.XAmzServerSideEncryption property.
// The server-side encryption algorithm used when storing this object in IBM COS (`AES256`).
const (
PutObjectOptions_XAmzServerSideEncryption_Aes256 = "AES256"
)
// NewPutObjectOptions : Instantiate PutObjectOptions
func (*IbmCloudObjectStorageS3ApiV2) NewPutObjectOptions(bucket string, key string, body string) *PutObjectOptions {
return &PutObjectOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *PutObjectOptions) SetBucket(bucket string) *PutObjectOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *PutObjectOptions) SetKey(key string) *PutObjectOptions {
options.Key = core.StringPtr(key)
return options
}
// SetBody : Allow user to set Body
func (options *PutObjectOptions) SetBody(body string) *PutObjectOptions {
options.Body = core.StringPtr(body)
return options
}
// SetXAmzAcl : Allow user to set XAmzAcl
func (options *PutObjectOptions) SetXAmzAcl(xAmzAcl string) *PutObjectOptions {
options.XAmzAcl = core.StringPtr(xAmzAcl)
return options
}
// SetIfMatch : Allow user to set IfMatch
func (options *PutObjectOptions) SetIfMatch(ifMatch string) *PutObjectOptions {
options.IfMatch = core.StringPtr(ifMatch)
return options
}
// SetIfNoneMatch : Allow user to set IfNoneMatch
func (options *PutObjectOptions) SetIfNoneMatch(ifNoneMatch string) *PutObjectOptions {
options.IfNoneMatch = core.StringPtr(ifNoneMatch)
return options
}
// SetIfUnmodifiedSince : Allow user to set IfUnmodifiedSince
func (options *PutObjectOptions) SetIfUnmodifiedSince(ifUnmodifiedSince *strfmt.DateTime) *PutObjectOptions {
options.IfUnmodifiedSince = ifUnmodifiedSince
return options
}
// SetCacheControl : Allow user to set CacheControl
func (options *PutObjectOptions) SetCacheControl(cacheControl string) *PutObjectOptions {
options.CacheControl = core.StringPtr(cacheControl)
return options
}
// SetContentDisposition : Allow user to set ContentDisposition
func (options *PutObjectOptions) SetContentDisposition(contentDisposition string) *PutObjectOptions {
options.ContentDisposition = core.StringPtr(contentDisposition)
return options
}
// SetContentEncoding : Allow user to set ContentEncoding
func (options *PutObjectOptions) SetContentEncoding(contentEncoding string) *PutObjectOptions {
options.ContentEncoding = core.StringPtr(contentEncoding)
return options
}
// SetContentLanguage : Allow user to set ContentLanguage
func (options *PutObjectOptions) SetContentLanguage(contentLanguage string) *PutObjectOptions {
options.ContentLanguage = core.StringPtr(contentLanguage)
return options
}
// SetContentLength : Allow user to set ContentLength
func (options *PutObjectOptions) SetContentLength(contentLength int64) *PutObjectOptions {
options.ContentLength = core.Int64Ptr(contentLength)
return options
}
// SetContentMD5 : Allow user to set ContentMD5
func (options *PutObjectOptions) SetContentMD5(contentMD5 string) *PutObjectOptions {
options.ContentMD5 = core.StringPtr(contentMD5)
return options
}
// SetExpires : Allow user to set Expires
func (options *PutObjectOptions) SetExpires(expires *strfmt.DateTime) *PutObjectOptions {
options.Expires = expires
return options
}
// SetXAmzServerSideEncryption : Allow user to set XAmzServerSideEncryption
func (options *PutObjectOptions) SetXAmzServerSideEncryption(xAmzServerSideEncryption string) *PutObjectOptions {
options.XAmzServerSideEncryption = core.StringPtr(xAmzServerSideEncryption)
return options
}
// SetXAmzWebsiteRedirectLocation : Allow user to set XAmzWebsiteRedirectLocation
func (options *PutObjectOptions) SetXAmzWebsiteRedirectLocation(xAmzWebsiteRedirectLocation string) *PutObjectOptions {
options.XAmzWebsiteRedirectLocation = core.StringPtr(xAmzWebsiteRedirectLocation)
return options
}
// SetXAmzServerSideEncryptionCustomerAlgorithm : Allow user to set XAmzServerSideEncryptionCustomerAlgorithm
func (options *PutObjectOptions) SetXAmzServerSideEncryptionCustomerAlgorithm(xAmzServerSideEncryptionCustomerAlgorithm string) *PutObjectOptions {
options.XAmzServerSideEncryptionCustomerAlgorithm = core.StringPtr(xAmzServerSideEncryptionCustomerAlgorithm)
return options
}
// SetXAmzServerSideEncryptionCustomerKey : Allow user to set XAmzServerSideEncryptionCustomerKey
func (options *PutObjectOptions) SetXAmzServerSideEncryptionCustomerKey(xAmzServerSideEncryptionCustomerKey string) *PutObjectOptions {
options.XAmzServerSideEncryptionCustomerKey = core.StringPtr(xAmzServerSideEncryptionCustomerKey)
return options
}
// SetXAmzServerSideEncryptionCustomerKeyMD5 : Allow user to set XAmzServerSideEncryptionCustomerKeyMD5
func (options *PutObjectOptions) SetXAmzServerSideEncryptionCustomerKeyMD5(xAmzServerSideEncryptionCustomerKeyMD5 string) *PutObjectOptions {
options.XAmzServerSideEncryptionCustomerKeyMD5 = core.StringPtr(xAmzServerSideEncryptionCustomerKeyMD5)
return options
}
// SetXAmzTagging : Allow user to set XAmzTagging
func (options *PutObjectOptions) SetXAmzTagging(xAmzTagging string) *PutObjectOptions {
options.XAmzTagging = core.StringPtr(xAmzTagging)
return options
}
// SetHeaders : Allow user to set Headers
func (options *PutObjectOptions) SetHeaders(param map[string]string) *PutObjectOptions {
options.Headers = param
return options
}
// PutObjectTaggingOptions : The PutObjectTagging options.
type PutObjectTaggingOptions struct {
// The bucket containing the object.
Bucket *string `validate:"required,ne="`
// Name of the object key.
Key *string `validate:"required,ne="`
Tagging *bool `validate:"required"`
Body *string `validate:"required"`
// The base64-encoded 128-bit MD5 digest of the payload (just the request body without the headers) according to [RFC
// 1864](http://www.ietf.org/rfc/rfc1864.txt). This header can be used as a message integrity check to verify that the
// data is the same data that was originally sent. Although it is optional, it is recommended to use the Content-MD5
// mechanism as an end-to-end integrity check.
ContentMD5 *string
// Allows users to set headers on API requests
Headers map[string]string
}
// NewPutObjectTaggingOptions : Instantiate PutObjectTaggingOptions
func (*IbmCloudObjectStorageS3ApiV2) NewPutObjectTaggingOptions(bucket string, key string, tagging bool, body string) *PutObjectTaggingOptions {
return &PutObjectTaggingOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
Tagging: core.BoolPtr(tagging),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *PutObjectTaggingOptions) SetBucket(bucket string) *PutObjectTaggingOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *PutObjectTaggingOptions) SetKey(key string) *PutObjectTaggingOptions {
options.Key = core.StringPtr(key)
return options
}
// SetTagging : Allow user to set Tagging
func (options *PutObjectTaggingOptions) SetTagging(tagging bool) *PutObjectTaggingOptions {
options.Tagging = core.BoolPtr(tagging)
return options
}
// SetBody : Allow user to set Body
func (options *PutObjectTaggingOptions) SetBody(body string) *PutObjectTaggingOptions {
options.Body = core.StringPtr(body)
return options
}
// SetContentMD5 : Allow user to set ContentMD5
func (options *PutObjectTaggingOptions) SetContentMD5(contentMD5 string) *PutObjectTaggingOptions {
options.ContentMD5 = core.StringPtr(contentMD5)
return options
}
// SetHeaders : Allow user to set Headers
func (options *PutObjectTaggingOptions) SetHeaders(param map[string]string) *PutObjectTaggingOptions {
options.Headers = param
return options
}
// PutPublicAccessBlockOptions : The PutPublicAccessBlock options.
type PutPublicAccessBlockOptions struct {
// The name of the IBM COS bucket whose `PublicAccessBlock` configuration you want to set.
Bucket *string `validate:"required,ne="`
PublicAccessBlock *bool `validate:"required"`
Body *string `validate:"required"`
// The base64-encoded 128-bit MD5 digest of the payload (just the request body without the headers) according to [RFC
// 1864](http://www.ietf.org/rfc/rfc1864.txt). This header can be used as a message integrity check to verify that the
// data is the same data that was originally sent. Although it is optional, it is recommended to use the Content-MD5
// mechanism as an end-to-end integrity check.
ContentMD5 *string
// Allows users to set headers on API requests
Headers map[string]string
}
// NewPutPublicAccessBlockOptions : Instantiate PutPublicAccessBlockOptions
func (*IbmCloudObjectStorageS3ApiV2) NewPutPublicAccessBlockOptions(bucket string, publicAccessBlock bool, body string) *PutPublicAccessBlockOptions {
return &PutPublicAccessBlockOptions{
Bucket: core.StringPtr(bucket),
PublicAccessBlock: core.BoolPtr(publicAccessBlock),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *PutPublicAccessBlockOptions) SetBucket(bucket string) *PutPublicAccessBlockOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetPublicAccessBlock : Allow user to set PublicAccessBlock
func (options *PutPublicAccessBlockOptions) SetPublicAccessBlock(publicAccessBlock bool) *PutPublicAccessBlockOptions {
options.PublicAccessBlock = core.BoolPtr(publicAccessBlock)
return options
}
// SetBody : Allow user to set Body
func (options *PutPublicAccessBlockOptions) SetBody(body string) *PutPublicAccessBlockOptions {
options.Body = core.StringPtr(body)
return options
}
// SetContentMD5 : Allow user to set ContentMD5
func (options *PutPublicAccessBlockOptions) SetContentMD5(contentMD5 string) *PutPublicAccessBlockOptions {
options.ContentMD5 = core.StringPtr(contentMD5)
return options
}
// SetHeaders : Allow user to set Headers
func (options *PutPublicAccessBlockOptions) SetHeaders(param map[string]string) *PutPublicAccessBlockOptions {
options.Headers = param
return options
}
// Redirect : Specifies how requests are redirected. In the event of an error, you can specify a different error code to return.
type Redirect struct {
// The host name to use in the redirect request.
HostName *string `json:"HostName,omitempty"`
// The HTTP redirect code to use on the response. Not required if one of the siblings is present.
HttpRedirectCode *string `json:"HttpRedirectCode,omitempty"`
// Protocol to use when redirecting requests. The default is the protocol that is used in the original request.
Protocol *string `json:"Protocol,omitempty"`
// The object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix
// `docs/` (objects in the `docs/` folder) to `documents/`, you can set a condition block with `KeyPrefixEquals` set to
// `docs/` and in the Redirect set `ReplaceKeyPrefixWith` to `/documents`. Not required if one of the siblings is
// present. Can be present only if `ReplaceKeyWith` is not provided.
ReplaceKeyPrefixWith *string `json:"ReplaceKeyPrefixWith,omitempty"`
// The specific object key to use in the redirect request. For example, redirect request to `error.html`. Not required
// if one of the siblings is present. Can be present only if `ReplaceKeyPrefixWith` is not provided.
ReplaceKeyWith *string `json:"ReplaceKeyWith,omitempty"`
}
// Constants associated with the Redirect.Protocol property.
// Protocol to use when redirecting requests. The default is the protocol that is used in the original request.
const (
Redirect_Protocol_Http = "http"
Redirect_Protocol_Https = "https"
)
// UnmarshalRedirect unmarshals an instance of Redirect from the specified map of raw messages.
func UnmarshalRedirect(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Redirect)
err = core.UnmarshalPrimitive(m, "HostName", &obj.HostName)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "HttpRedirectCode", &obj.HttpRedirectCode)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Protocol", &obj.Protocol)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "ReplaceKeyPrefixWith", &obj.ReplaceKeyPrefixWith)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "ReplaceKeyWith", &obj.ReplaceKeyWith)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RedirectAllRequestsTo : Specifies the redirect behavior of all requests to a website endpoint of an IBM COS bucket.
type RedirectAllRequestsTo struct {
// Name of the host where requests are redirected.
HostName *string `json:"HostName" validate:"required"`
// Protocol to use when redirecting requests. The default is the protocol that is used in the original request.
Protocol *string `json:"Protocol,omitempty"`
}
// Constants associated with the RedirectAllRequestsTo.Protocol property.
// Protocol to use when redirecting requests. The default is the protocol that is used in the original request.
const (
RedirectAllRequestsTo_Protocol_Http = "http"
RedirectAllRequestsTo_Protocol_Https = "https"
)
// UnmarshalRedirectAllRequestsTo unmarshals an instance of RedirectAllRequestsTo from the specified map of raw messages.
func UnmarshalRedirectAllRequestsTo(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RedirectAllRequestsTo)
err = core.UnmarshalPrimitive(m, "HostName", &obj.HostName)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Protocol", &obj.Protocol)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RestoreObjectOptions : The RestoreObject options.
type RestoreObjectOptions struct {
// The bucket name or containing the object to restore.
Bucket *string `validate:"required,ne="`
// Object key for which the operation was initiated.
Key *string `validate:"required,ne="`
Restore *bool `validate:"required"`
Body *string `validate:"required"`
// Allows users to set headers on API requests
Headers map[string]string
}
// NewRestoreObjectOptions : Instantiate RestoreObjectOptions
func (*IbmCloudObjectStorageS3ApiV2) NewRestoreObjectOptions(bucket string, key string, restore bool, body string) *RestoreObjectOptions {
return &RestoreObjectOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
Restore: core.BoolPtr(restore),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *RestoreObjectOptions) SetBucket(bucket string) *RestoreObjectOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *RestoreObjectOptions) SetKey(key string) *RestoreObjectOptions {
options.Key = core.StringPtr(key)
return options
}
// SetRestore : Allow user to set Restore
func (options *RestoreObjectOptions) SetRestore(restore bool) *RestoreObjectOptions {
options.Restore = core.BoolPtr(restore)
return options
}
// SetBody : Allow user to set Body
func (options *RestoreObjectOptions) SetBody(body string) *RestoreObjectOptions {
options.Body = core.StringPtr(body)
return options
}
// SetHeaders : Allow user to set Headers
func (options *RestoreObjectOptions) SetHeaders(param map[string]string) *RestoreObjectOptions {
options.Headers = param
return options
}
// RoutingRulesItem : Specifies the redirect behavior and when a redirect is applied. For more information about routing rules, see <a
// href="https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects">Configuring
// advanced conditional redirects</a> in the <i>Amazon Simple Storage Service Developer Guide</i>.
type RoutingRulesItem struct {
// A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If
// request is for pages in the `/docs` folder, redirect to the `/documents` folder. 2. If request results in HTTP error
// 4xx, redirect request to another host where you might process the error.
Condition *Condition `json:"Condition,omitempty"`
// Container for redirect information. You can redirect requests to another host, to another page, or with another
// protocol. In the event of an error, you can specify a different error code to return.
Redirect *Redirect `json:"Redirect" validate:"required"`
}
// UnmarshalRoutingRulesItem unmarshals an instance of RoutingRulesItem from the specified map of raw messages.
func UnmarshalRoutingRulesItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RoutingRulesItem)
err = core.UnmarshalModel(m, "Condition", &obj.Condition, UnmarshalCondition)
if err != nil {
return
}
err = core.UnmarshalModel(m, "Redirect", &obj.Redirect, UnmarshalRedirect)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Tag : A container of a key value name pair.
type Tag struct {
// Name of the object key.
Key *string `json:"Key" validate:"required"`
// Value of the tag.
Value *string `json:"Value" validate:"required"`
}
// UnmarshalTag unmarshals an instance of Tag from the specified map of raw messages.
func UnmarshalTag(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Tag)
err = core.UnmarshalPrimitive(m, "Key", &obj.Key)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Value", &obj.Value)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// TagSetItem : A container of a key value name pair.
type TagSetItem struct {
// Name of the object key.
Key *string `json:"Key" validate:"required"`
// Value of the tag.
Value *string `json:"Value" validate:"required"`
}
// UnmarshalTagSetItem unmarshals an instance of TagSetItem from the specified map of raw messages.
func UnmarshalTagSetItem(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(TagSetItem)
err = core.UnmarshalPrimitive(m, "Key", &obj.Key)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Value", &obj.Value)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// Transition : Specifies when an object transitions to a specified storage class.
type Transition struct {
// Indicates when objects are transitioned to the specified storage class. The date value must be in ISO 8601 format.
// The time is always midnight UTC.
Date *strfmt.DateTime `json:"Date,omitempty"`
// Indicates the number of days after creation when objects are transitioned to the specified storage class. The value
// must be a positive integer.
Days *int64 `json:"Days,omitempty"`
// The storage class to which you want the object to transition.
StorageClass *string `json:"StorageClass,omitempty"`
}
// Constants associated with the Transition.StorageClass property.
// The storage class to which you want the object to transition.
const (
Transition_StorageClass_Accelerated = "ACCELERATED"
Transition_StorageClass_Glacier = "GLACIER"
)
// UnmarshalTransition unmarshals an instance of Transition from the specified map of raw messages.
func UnmarshalTransition(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(Transition)
err = core.UnmarshalPrimitive(m, "Date", &obj.Date)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "Days", &obj.Days)
if err != nil {
return
}
err = core.UnmarshalPrimitive(m, "StorageClass", &obj.StorageClass)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// UploadPartCopyOptions : The UploadPartCopy options.
type UploadPartCopyOptions struct {
// The bucket name.
Bucket *string `validate:"required,ne="`
// Specifies the source object to use as a part in the multipart upload.
XAmzCopySource *string `validate:"required"`
// Object key for which the multipart upload was initiated.
Key *string `validate:"required,ne="`
// Part number of part being copied. This is a positive integer between 1 and 10,000.
PartNumber *int64 `validate:"required"`
// Upload ID identifying the multipart upload whose part is being copied.
UploadID *string `validate:"required"`
// Copies the object if its entity tag (ETag) matches the specified tag.
XAmzCopySourceIfMatch *string
// Copies the object if it has been modified since the specified time.
XAmzCopySourceIfModifiedSince *strfmt.DateTime
// Copies the object if its entity tag (ETag) is different than the specified ETag.
XAmzCopySourceIfNoneMatch *string
// Copies the object if it hasn't been modified since the specified time.
XAmzCopySourceIfUnmodifiedSince *strfmt.DateTime
// The range of bytes to copy from the source object. The range value must use the form bytes=first-last, where the
// first and last are the zero-based byte offsets to copy. For example, bytes=0-9 indicates that you want to copy the
// first 10 bytes of the source. You can copy a range only if the source object is greater than 5 MB.
XAmzCopySourceRange *string
// Specifies the algorithm to use to when encrypting the object (`AES256`).
XAmzServerSideEncryptionCustomerAlgorithm *string
// Specifies the customer-provided encryption key for IBM COS to use in encrypting data. This value is used to store
// the object and then it is discarded; IBM COS does not store the encryption key. The key must be appropriate for use
// with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm` header. This must be the same
// encryption key specified in the initiate multipart upload request.
XAmzServerSideEncryptionCustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to [RFC 1321](https://tools.ietf.org/html/rfc1321).
// IBM COS uses this header for a message integrity check to ensure that the encryption key was transmitted without
// error.
XAmzServerSideEncryptionCustomerKeyMD5 *string
// Specifies the algorithm to use when decrypting the source object (`AES256`).
XAmzCopySourceServerSideEncryptionCustomerAlgorithm *string
// Specifies the customer-provided encryption key for IBM COS to use to decrypt the source object. The encryption key
// provided in this header must be one that was used when the source object was created.
XAmzCopySourceServerSideEncryptionCustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to [RFC 1321](https://tools.ietf.org/html/rfc1321).
// IBM COS uses this header for a message integrity check to ensure that the encryption key was transmitted without
// error.
XAmzCopySourceServerSideEncryptionCustomerKeyMD5 *string
// Allows users to set headers on API requests
Headers map[string]string
}
// NewUploadPartCopyOptions : Instantiate UploadPartCopyOptions
func (*IbmCloudObjectStorageS3ApiV2) NewUploadPartCopyOptions(bucket string, xAmzCopySource string, key string, partNumber int64, uploadID string) *UploadPartCopyOptions {
return &UploadPartCopyOptions{
Bucket: core.StringPtr(bucket),
XAmzCopySource: core.StringPtr(xAmzCopySource),
Key: core.StringPtr(key),
PartNumber: core.Int64Ptr(partNumber),
UploadID: core.StringPtr(uploadID),
}
}
// SetBucket : Allow user to set Bucket
func (options *UploadPartCopyOptions) SetBucket(bucket string) *UploadPartCopyOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetXAmzCopySource : Allow user to set XAmzCopySource
func (options *UploadPartCopyOptions) SetXAmzCopySource(xAmzCopySource string) *UploadPartCopyOptions {
options.XAmzCopySource = core.StringPtr(xAmzCopySource)
return options
}
// SetKey : Allow user to set Key
func (options *UploadPartCopyOptions) SetKey(key string) *UploadPartCopyOptions {
options.Key = core.StringPtr(key)
return options
}
// SetPartNumber : Allow user to set PartNumber
func (options *UploadPartCopyOptions) SetPartNumber(partNumber int64) *UploadPartCopyOptions {
options.PartNumber = core.Int64Ptr(partNumber)
return options
}
// SetUploadID : Allow user to set UploadID
func (options *UploadPartCopyOptions) SetUploadID(uploadID string) *UploadPartCopyOptions {
options.UploadID = core.StringPtr(uploadID)
return options
}
// SetXAmzCopySourceIfMatch : Allow user to set XAmzCopySourceIfMatch
func (options *UploadPartCopyOptions) SetXAmzCopySourceIfMatch(xAmzCopySourceIfMatch string) *UploadPartCopyOptions {
options.XAmzCopySourceIfMatch = core.StringPtr(xAmzCopySourceIfMatch)
return options
}
// SetXAmzCopySourceIfModifiedSince : Allow user to set XAmzCopySourceIfModifiedSince
func (options *UploadPartCopyOptions) SetXAmzCopySourceIfModifiedSince(xAmzCopySourceIfModifiedSince *strfmt.DateTime) *UploadPartCopyOptions {
options.XAmzCopySourceIfModifiedSince = xAmzCopySourceIfModifiedSince
return options
}
// SetXAmzCopySourceIfNoneMatch : Allow user to set XAmzCopySourceIfNoneMatch
func (options *UploadPartCopyOptions) SetXAmzCopySourceIfNoneMatch(xAmzCopySourceIfNoneMatch string) *UploadPartCopyOptions {
options.XAmzCopySourceIfNoneMatch = core.StringPtr(xAmzCopySourceIfNoneMatch)
return options
}
// SetXAmzCopySourceIfUnmodifiedSince : Allow user to set XAmzCopySourceIfUnmodifiedSince
func (options *UploadPartCopyOptions) SetXAmzCopySourceIfUnmodifiedSince(xAmzCopySourceIfUnmodifiedSince *strfmt.DateTime) *UploadPartCopyOptions {
options.XAmzCopySourceIfUnmodifiedSince = xAmzCopySourceIfUnmodifiedSince
return options
}
// SetXAmzCopySourceRange : Allow user to set XAmzCopySourceRange
func (options *UploadPartCopyOptions) SetXAmzCopySourceRange(xAmzCopySourceRange string) *UploadPartCopyOptions {
options.XAmzCopySourceRange = core.StringPtr(xAmzCopySourceRange)
return options
}
// SetXAmzServerSideEncryptionCustomerAlgorithm : Allow user to set XAmzServerSideEncryptionCustomerAlgorithm
func (options *UploadPartCopyOptions) SetXAmzServerSideEncryptionCustomerAlgorithm(xAmzServerSideEncryptionCustomerAlgorithm string) *UploadPartCopyOptions {
options.XAmzServerSideEncryptionCustomerAlgorithm = core.StringPtr(xAmzServerSideEncryptionCustomerAlgorithm)
return options
}
// SetXAmzServerSideEncryptionCustomerKey : Allow user to set XAmzServerSideEncryptionCustomerKey
func (options *UploadPartCopyOptions) SetXAmzServerSideEncryptionCustomerKey(xAmzServerSideEncryptionCustomerKey string) *UploadPartCopyOptions {
options.XAmzServerSideEncryptionCustomerKey = core.StringPtr(xAmzServerSideEncryptionCustomerKey)
return options
}
// SetXAmzServerSideEncryptionCustomerKeyMD5 : Allow user to set XAmzServerSideEncryptionCustomerKeyMD5
func (options *UploadPartCopyOptions) SetXAmzServerSideEncryptionCustomerKeyMD5(xAmzServerSideEncryptionCustomerKeyMD5 string) *UploadPartCopyOptions {
options.XAmzServerSideEncryptionCustomerKeyMD5 = core.StringPtr(xAmzServerSideEncryptionCustomerKeyMD5)
return options
}
// SetXAmzCopySourceServerSideEncryptionCustomerAlgorithm : Allow user to set XAmzCopySourceServerSideEncryptionCustomerAlgorithm
func (options *UploadPartCopyOptions) SetXAmzCopySourceServerSideEncryptionCustomerAlgorithm(xAmzCopySourceServerSideEncryptionCustomerAlgorithm string) *UploadPartCopyOptions {
options.XAmzCopySourceServerSideEncryptionCustomerAlgorithm = core.StringPtr(xAmzCopySourceServerSideEncryptionCustomerAlgorithm)
return options
}
// SetXAmzCopySourceServerSideEncryptionCustomerKey : Allow user to set XAmzCopySourceServerSideEncryptionCustomerKey
func (options *UploadPartCopyOptions) SetXAmzCopySourceServerSideEncryptionCustomerKey(xAmzCopySourceServerSideEncryptionCustomerKey string) *UploadPartCopyOptions {
options.XAmzCopySourceServerSideEncryptionCustomerKey = core.StringPtr(xAmzCopySourceServerSideEncryptionCustomerKey)
return options
}
// SetXAmzCopySourceServerSideEncryptionCustomerKeyMD5 : Allow user to set XAmzCopySourceServerSideEncryptionCustomerKeyMD5
func (options *UploadPartCopyOptions) SetXAmzCopySourceServerSideEncryptionCustomerKeyMD5(xAmzCopySourceServerSideEncryptionCustomerKeyMD5 string) *UploadPartCopyOptions {
options.XAmzCopySourceServerSideEncryptionCustomerKeyMD5 = core.StringPtr(xAmzCopySourceServerSideEncryptionCustomerKeyMD5)
return options
}
// SetHeaders : Allow user to set Headers
func (options *UploadPartCopyOptions) SetHeaders(param map[string]string) *UploadPartCopyOptions {
options.Headers = param
return options
}
// UploadPartCopyOutput : UploadPartCopyOutput struct
type UploadPartCopyOutput struct {
// Container for all response elements.
CopyPartResult *CopyPartResult `json:"CopyPartResult,omitempty"`
}
// UnmarshalUploadPartCopyOutput unmarshals an instance of UploadPartCopyOutput from the specified map of raw messages.
func UnmarshalUploadPartCopyOutput(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(UploadPartCopyOutput)
err = core.UnmarshalModel(m, "CopyPartResult", &obj.CopyPartResult, UnmarshalCopyPartResult)
if err != nil {
return
}
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// UploadPartOptions : The UploadPart options.
type UploadPartOptions struct {
// The name of the bucket to which the multipart upload was initiated.
Bucket *string `validate:"required,ne="`
// Object key for which the multipart upload was initiated.
Key *string `validate:"required,ne="`
// Part number of part being uploaded. This is a positive integer between 1 and 10,000.
PartNumber *int64 `validate:"required"`
// Upload ID identifying the multipart upload whose part is being uploaded.
UploadID *string `validate:"required"`
Body *string `validate:"required"`
// Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.
// For more information, see [RFC 2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13).
ContentLength *int64
// The base64-encoded 128-bit MD5 digest of the payload (just the request body without the headers) according to [RFC
// 1864](http://www.ietf.org/rfc/rfc1864.txt). This header can be used as a message integrity check to verify that the
// data is the same data that was originally sent. Although it is optional, it is recommended to use the Content-MD5
// mechanism as an end-to-end integrity check.
ContentMD5 *string
// Specifies the algorithm to use to when encrypting the object (for example, AES256).
XAmzServerSideEncryptionCustomerAlgorithm *string
// Specifies the customer-provided encryption key for IBM COS to use in encrypting data. This value is used to store
// the object and then it is discarded; IBM COS does not store the encryption key. The key must be appropriate for use
// with the algorithm specified in the `x-amz-server-side-encryption-customer-algorithm header`. This must be the same
// encryption key specified in the initiate multipart upload request.
XAmzServerSideEncryptionCustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. IBM COS uses this header for a message
// integrity check to ensure that the encryption key was transmitted without error.
XAmzServerSideEncryptionCustomerKeyMD5 *string
// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this
// parameter in their requests. For information about downloading objects from requester pays buckets, see <a
// href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html">Downloading Objects in
// Requestor Pays Buckets</a> in the <i>IBM COS Developer Guide</i>.
XAmzRequestPayer *string
// The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail
// with an HTTP `403 (Access Denied)` error.
XAmzExpectedBucketOwner *string
// Allows users to set headers on API requests
Headers map[string]string
}
// Constants associated with the UploadPartOptions.XAmzRequestPayer property.
// Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this
// parameter in their requests. For information about downloading objects from requester pays buckets, see <a
// href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html">Downloading Objects in
// Requestor Pays Buckets</a> in the <i>IBM COS Developer Guide</i>.
const (
UploadPartOptions_XAmzRequestPayer_Requester = "requester"
)
// NewUploadPartOptions : Instantiate UploadPartOptions
func (*IbmCloudObjectStorageS3ApiV2) NewUploadPartOptions(bucket string, key string, partNumber int64, uploadID string, body string) *UploadPartOptions {
return &UploadPartOptions{
Bucket: core.StringPtr(bucket),
Key: core.StringPtr(key),
PartNumber: core.Int64Ptr(partNumber),
UploadID: core.StringPtr(uploadID),
Body: core.StringPtr(body),
}
}
// SetBucket : Allow user to set Bucket
func (options *UploadPartOptions) SetBucket(bucket string) *UploadPartOptions {
options.Bucket = core.StringPtr(bucket)
return options
}
// SetKey : Allow user to set Key
func (options *UploadPartOptions) SetKey(key string) *UploadPartOptions {
options.Key = core.StringPtr(key)
return options
}
// SetPartNumber : Allow user to set PartNumber
func (options *UploadPartOptions) SetPartNumber(partNumber int64) *UploadPartOptions {
options.PartNumber = core.Int64Ptr(partNumber)
return options
}
// SetUploadID : Allow user to set UploadID
func (options *UploadPartOptions) SetUploadID(uploadID string) *UploadPartOptions {
options.UploadID = core.StringPtr(uploadID)
return options
}
// SetBody : Allow user to set Body
func (options *UploadPartOptions) SetBody(body string) *UploadPartOptions {
options.Body = core.StringPtr(body)
return options
}
// SetContentLength : Allow user to set ContentLength
func (options *UploadPartOptions) SetContentLength(contentLength int64) *UploadPartOptions {
options.ContentLength = core.Int64Ptr(contentLength)
return options
}
// SetContentMD5 : Allow user to set ContentMD5
func (options *UploadPartOptions) SetContentMD5(contentMD5 string) *UploadPartOptions {
options.ContentMD5 = core.StringPtr(contentMD5)
return options
}
// SetXAmzServerSideEncryptionCustomerAlgorithm : Allow user to set XAmzServerSideEncryptionCustomerAlgorithm
func (options *UploadPartOptions) SetXAmzServerSideEncryptionCustomerAlgorithm(xAmzServerSideEncryptionCustomerAlgorithm string) *UploadPartOptions {
options.XAmzServerSideEncryptionCustomerAlgorithm = core.StringPtr(xAmzServerSideEncryptionCustomerAlgorithm)
return options
}
// SetXAmzServerSideEncryptionCustomerKey : Allow user to set XAmzServerSideEncryptionCustomerKey
func (options *UploadPartOptions) SetXAmzServerSideEncryptionCustomerKey(xAmzServerSideEncryptionCustomerKey string) *UploadPartOptions {
options.XAmzServerSideEncryptionCustomerKey = core.StringPtr(xAmzServerSideEncryptionCustomerKey)
return options
}
// SetXAmzServerSideEncryptionCustomerKeyMD5 : Allow user to set XAmzServerSideEncryptionCustomerKeyMD5
func (options *UploadPartOptions) SetXAmzServerSideEncryptionCustomerKeyMD5(xAmzServerSideEncryptionCustomerKeyMD5 string) *UploadPartOptions {
options.XAmzServerSideEncryptionCustomerKeyMD5 = core.StringPtr(xAmzServerSideEncryptionCustomerKeyMD5)
return options
}
// SetXAmzRequestPayer : Allow user to set XAmzRequestPayer
func (options *UploadPartOptions) SetXAmzRequestPayer(xAmzRequestPayer string) *UploadPartOptions {
options.XAmzRequestPayer = core.StringPtr(xAmzRequestPayer)
return options
}
// SetXAmzExpectedBucketOwner : Allow user to set XAmzExpectedBucketOwner
func (options *UploadPartOptions) SetXAmzExpectedBucketOwner(xAmzExpectedBucketOwner string) *UploadPartOptions {
options.XAmzExpectedBucketOwner = core.StringPtr(xAmzExpectedBucketOwner)
return options
}
// SetHeaders : Allow user to set Headers
func (options *UploadPartOptions) SetHeaders(param map[string]string) *UploadPartOptions {
options.Headers = param
return options
}
// CORSRuleAllowedHeaders : Headers that are specified in the `Access-Control-Request-Headers` header. These headers are allowed in a preflight
// OPTIONS request. In response to any preflight OPTIONS request, IBM COS returns any requested headers that are
// allowed.
// This model "extends" String
type CORSRuleAllowedHeaders struct {
}
// UnmarshalCORSRuleAllowedHeaders unmarshals an instance of CORSRuleAllowedHeaders from the specified map of raw messages.
func UnmarshalCORSRuleAllowedHeaders(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CORSRuleAllowedHeaders)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CORSRuleAllowedMethods : An HTTP method that you allow the origin to execute. Valid values are `GET`, `PUT`, `HEAD`, `POST`, and `DELETE`.
// This model "extends" String
type CORSRuleAllowedMethods struct {
}
// UnmarshalCORSRuleAllowedMethods unmarshals an instance of CORSRuleAllowedMethods from the specified map of raw messages.
func UnmarshalCORSRuleAllowedMethods(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CORSRuleAllowedMethods)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CORSRuleAllowedOrigins : One or more origins you want customers to be able to access the bucket from.
// This model "extends" String
type CORSRuleAllowedOrigins struct {
}
// UnmarshalCORSRuleAllowedOrigins unmarshals an instance of CORSRuleAllowedOrigins from the specified map of raw messages.
func UnmarshalCORSRuleAllowedOrigins(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CORSRuleAllowedOrigins)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CORSRuleExposeHeaders : One or more headers in the response that you want customers to be able to access from their applications (for
// example, from a JavaScript `XMLHttpRequest` object).
// This model "extends" String
type CORSRuleExposeHeaders struct {
}
// UnmarshalCORSRuleExposeHeaders unmarshals an instance of CORSRuleExposeHeaders from the specified map of raw messages.
func UnmarshalCORSRuleExposeHeaders(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CORSRuleExposeHeaders)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// CommonPrefixList : CommonPrefixList struct
// This model "extends" CommonPrefix
type CommonPrefixList struct {
}
// UnmarshalCommonPrefixList unmarshals an instance of CommonPrefixList from the specified map of raw messages.
func UnmarshalCommonPrefixList(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(CommonPrefixList)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// DeleteObjectsOutputErrors : Container for a failed delete operation that describes the object that IBM COS attempted to delete and the error it
// encountered.
// This model "extends" Error
type DeleteObjectsOutputErrors struct {
}
// UnmarshalDeleteObjectsOutputErrors unmarshals an instance of DeleteObjectsOutputErrors from the specified map of raw messages.
func UnmarshalDeleteObjectsOutputErrors(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(DeleteObjectsOutputErrors)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetBucketCorsOutputCORSRules : A set of origins and methods (cross-origin access that you want to allow). You can add up to 100 rules to the
// configuration.
// This model "extends" CORSRule
type GetBucketCorsOutputCORSRules struct {
}
// UnmarshalGetBucketCorsOutputCORSRules unmarshals an instance of GetBucketCorsOutputCORSRules from the specified map of raw messages.
func UnmarshalGetBucketCorsOutputCORSRules(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetBucketCorsOutputCORSRules)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetBucketLifecycleConfigurationOutputRules : Container for a lifecycle rule.
// This model "extends" LifecycleRule
type GetBucketLifecycleConfigurationOutputRules struct {
}
// UnmarshalGetBucketLifecycleConfigurationOutputRules unmarshals an instance of GetBucketLifecycleConfigurationOutputRules from the specified map of raw messages.
func UnmarshalGetBucketLifecycleConfigurationOutputRules(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetBucketLifecycleConfigurationOutputRules)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// GetObjectAclOutputGrants : A list of grants.
// This model "extends" GrantsItem
type GetObjectAclOutputGrants struct {
}
// UnmarshalGetObjectAclOutputGrants unmarshals an instance of GetObjectAclOutputGrants from the specified map of raw messages.
func UnmarshalGetObjectAclOutputGrants(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(GetObjectAclOutputGrants)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// LifecycleRuleAndOperatorTags : All of these tags must exist in the object's tag set in order for the rule to apply.
// This model "extends" TagSetItem
type LifecycleRuleAndOperatorTags struct {
}
// UnmarshalLifecycleRuleAndOperatorTags unmarshals an instance of LifecycleRuleAndOperatorTags from the specified map of raw messages.
func UnmarshalLifecycleRuleAndOperatorTags(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(LifecycleRuleAndOperatorTags)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// LifecycleRuleTransitions : Specifies when an IBM COS object transitions to a specified storage class.
// This model "extends" Transition
type LifecycleRuleTransitions struct {
}
// UnmarshalLifecycleRuleTransitions unmarshals an instance of LifecycleRuleTransitions from the specified map of raw messages.
func UnmarshalLifecycleRuleTransitions(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(LifecycleRuleTransitions)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ListMultipartUploadsOutputUploads : Container for elements related to a particular multipart upload. A response can contain zero or more `Upload`
// elements.
// This model "extends" MultipartUpload
type ListMultipartUploadsOutputUploads struct {
}
// UnmarshalListMultipartUploadsOutputUploads unmarshals an instance of ListMultipartUploadsOutputUploads from the specified map of raw messages.
func UnmarshalListMultipartUploadsOutputUploads(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ListMultipartUploadsOutputUploads)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ListPartsOutputParts : Container for elements related to a particular part. A response can contain zero or more `Part` elements.
// This model "extends" Part
type ListPartsOutputParts struct {
}
// UnmarshalListPartsOutputParts unmarshals an instance of ListPartsOutputParts from the specified map of raw messages.
func UnmarshalListPartsOutputParts(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ListPartsOutputParts)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// ObjectList : ObjectList struct
// This model "extends" Object
type ObjectList struct {
}
// UnmarshalObjectList unmarshals an instance of ObjectList from the specified map of raw messages.
func UnmarshalObjectList(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(ObjectList)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// RoutingRules : RoutingRules struct
// This model "extends" RoutingRulesItem
type RoutingRules struct {
}
// UnmarshalRoutingRules unmarshals an instance of RoutingRules from the specified map of raw messages.
func UnmarshalRoutingRules(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(RoutingRules)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
// TagSet : TagSet struct
// This model "extends" TagSetItem
type TagSet struct {
}
// UnmarshalTagSet unmarshals an instance of TagSet from the specified map of raw messages.
func UnmarshalTagSet(m map[string]json.RawMessage, result interface{}) (err error) {
obj := new(TagSet)
reflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))
return
}
|
package _const
const (
PageNo = 1
PerPageLimit = 10
)
|
// +build ignore
// Package main demonstrates the use of `xflag` and `xflag/cflag`.
package main
import (
"flag"
"log"
"github.com/goaltools/xflag"
"github.com/goaltools/xflag/cflag"
)
var (
name = flag.String("name", "John Doe", "Name of the user.")
age = flag.Int("age", 16, "Age of the user.")
path = flag.String("paths:src", "/home/smbd", "Path to the Go sources.")
// emails uses "cflag" package that provides support of
// complex flags: slices of strings, ints, floats, etc.
emails = cflag.Strings("emails[]", []string{"test@test.xx"}, "A list of e-mails.")
)
func main() {
// Parse flags: use 3 different configuration files.
err := xflag.Parse("./file1.ini", "./file2.ini", "./file3.ini")
if err != nil {
log.Fatal(err)
}
// Print the values of flags.
log.Printf("Name: `%s`.", *name)
log.Printf("Age: %d.", *age)
log.Printf("Path: `%s`.", *path)
log.Printf("Emails: %v.", *emails)
}
|
package main
import "fmt"
func main(){
fah := 0.0
fmt.Println("Enter the degree in fahrenheit")
fmt.Scanf("%f",&fah)
Celsius := (fah - 32) * 5/9
fmt.Printf("The degree in Celsius is %f \n",Celsius)
}
|
package main
import (
"flag"
"fmt"
"os"
"os/exec"
"os/signal"
"time"
)
const (
version string = "1.0"
)
var (
n = flag.Int("n", 5, "Specify an interval n seconds to run command")
h = flag.Bool("h", false, "Display Help")
v = flag.Bool("v", false, "Display version")
)
func main() {
flag.Parse()
if *v {
fmt.Println(version)
return
}
if *h {
flag.PrintDefaults()
return
}
args := flag.Args()
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "command is required")
os.Exit(1)
}
command := args[0]
if len(args) > 1 {
args = args[1:]
} else {
args = []string{}
}
repeatCmd(command, args, time.Duration(*n))
}
func repeatCmd(cmd string, args []string, n time.Duration) {
done := make(chan os.Signal)
signal.Notify(done, os.Interrupt)
// first lunch
if err := executeCmd(cmd, args); err != nil {
fmt.Fprintf(os.Stderr, "There was an error running '%s' command: \n %v\n", cmd, err)
return
}
for {
select {
case <-time.After(n * time.Second):
if err := executeCmd(cmd, args); err != nil {
fmt.Fprintf(os.Stderr, "There was an error running '%s' command: \n %v\n", cmd, err)
return
}
case <-done:
return
}
}
}
func executeCmd(command string, args []string) error {
if err := clearCmd(); err != nil {
return err
}
cmd := exec.Command(command, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func clearCmd() error {
c := exec.Command("clear")
c.Stdout = os.Stdout
c.Stderr = os.Stderr
return c.Run()
}
|
// Test OneDrive filesystem interface
package onedrive_test
import (
"testing"
"github.com/ncw/rclone/backend/onedrive"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestOneDrive:",
NilObject: (*onedrive.Object)(nil),
})
}
|
package db
import (
"errors"
"sync"
"github.com/gin-gonic/gin"
"urlshortener.api/models"
)
// Ensure MemoryDB conforms to DataAccessLayer interface.
var _ DataAccessLayer = &MemoryDB{}
// MemoryDB is a simple in-memory persistence layer for urlshortens.
type MemoryDB struct {
mu sync.Mutex
urlshortens map[string]*models.URLShorten
}
// NewMemoryDB initializes a in-memory urlshortens repos
func NewMemoryDB() *MemoryDB {
return &MemoryDB{
urlshortens: make(map[string]*models.URLShorten),
}
}
// Fetch retrieves URLShorten resource by its shortURL
func (db *MemoryDB) Fetch(ctx *gin.Context, shortURL string) (*models.URLShorten, error) {
db.mu.Lock()
defer db.mu.Unlock()
urlsh, ok := db.urlshortens[shortURL]
if !ok {
return nil, errors.New("[MemDB]: Not Found")
}
return urlsh, nil
}
// Store adds URLShorten resource to urlshorten repos
func (db *MemoryDB) Store(ctx *gin.Context, urlsh *models.URLShorten) (*models.URLShorten, error) {
db.mu.Lock()
defer db.mu.Unlock()
db.urlshortens[urlsh.ShortURL] = urlsh
return urlsh, nil
}
// Close empties urlshorten repos
func (db *MemoryDB) Close() {
db.mu.Lock()
defer db.mu.Unlock()
db.urlshortens = nil
}
// Size returns the length of urlshorten repos
func (db *MemoryDB) Size() int {
return len(db.urlshortens)
}
|
package shellcode
import (
"bytes"
"testing"
)
func TestX8664MemFdExec(t *testing.T) {
payload := `
#/bin/bash
echo "Hello from memfd_create exec sploit shellcode" > ./success.txt
`
x8664 := NewX8664()
shellcode, err := x8664.LinuxMemFdExec([]byte(payload))
if err != nil {
t.Fatal(err)
}
if len(shellcode) != 263 {
t.Fatal("Shellcode size != 263")
}
}
func TestX8664LinuxShell(t *testing.T) {
x8664 := NewX8664()
shellcode, err := x8664.LinuxShell()
if err != nil {
t.Fatal(err)
}
scBytes := []byte{0x31, 0xc0, 0x48, 0xbb, 0xd1, 0x9d, 0x96, 0x91,
0xd0, 0x8c, 0x97, 0xff, 0x48, 0xf7, 0xdb, 0x53,
0x54, 0x5f, 0x99, 0x52, 0x57, 0x54, 0x5e, 0xb0,
0x3b, 0x0f, 0x05}
if bytes.Compare(shellcode, scBytes) != 0 {
t.Fatal("Shellcode bytes != expected")
}
}
|
package authz
import (
"encoding/json"
"io"
"strconv"
"time"
"github.com/AuthzMemory/core"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/pkg/authorization"
// "fmt"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
)
const (
// AuditHookStdout indicates logs are streamed to stdout
AuditHookStdout = ""
)
// defaultAuditLogPath is the file test hook log path
const defaultAuditLogPath = "/var/log/authz-broker.log"
type basicAuthorizer struct {
settings *BasicAuthorizerSettings
}
// BasicAuthorizerSettings provides settings for the basic authoerizer flow
type BasicAuthorizerSettings struct {
}
var memoryLimit int64
var currentMemory float64
var cli *client.Client
var memoryPerID map[string]int64
// NewBasicAuthZAuthorizer creates a new basic authorizer
func NewBasicAuthZAuthorizer(settings *BasicAuthorizerSettings) core.Authorizer {
return &basicAuthorizer{settings: settings}
}
// Init loads the basic authz plugin configuration from disk
func (f *basicAuthorizer) Init() error {
currentMemory = 0.0
memoryLimit = 0
return nil
}
func initializeOnFirstCall() error {
memoryPerID = make(map[string]int64)
defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0", AuthZTenantIDHeaderName: "infoTenantInternal"}
var err error
cli, err = client.NewClient("unix:///var/run/docker.sock", "v1.24", nil, defaultHeaders)
if err != nil {
panic(err)
}
info, err := cli.Info(context.Background())
memoryLimit = info.MemTotal
if err != nil {
panic(err)
}
type decodingResult struct {
msg events.Message
err error
}
stopChan := make(chan struct{})
responseBody, err := cli.Events(context.Background(), types.EventsOptions{})
if err != nil {
panic(err)
}
resultChan := make(chan decodingResult)
go func() {
dec := json.NewDecoder(responseBody)
for {
var result decodingResult
result.err = dec.Decode(&result.msg)
resultChan <- result
if result.err == io.EOF {
break
}
}
close(resultChan)
}()
go func() {
defer responseBody.Close()
for {
select {
case <-stopChan:
// ec <- nil
return
case result := <-resultChan:
if result.err != nil {
// ec <- result.err
return
}
logrus.Debug(result.msg)
if result.msg.Action == "create" && result.msg.Type == "container" {
memoryPerID[result.msg.ID] = 0
cJSON, _ := cli.ContainerInspect(context.Background(), result.msg.ID)
if cJSON.ContainerJSONBase != nil && cJSON.ContainerJSONBase.HostConfig != nil {
memoryPerID[result.msg.ID] = cJSON.ContainerJSONBase.HostConfig.Memory
}
} else if result.msg.Action == "destroy" && result.msg.Type == "container" {
currentMemory -= float64(memoryPerID[result.msg.ID])
delete(memoryPerID, result.msg.ID)
}
}
}
}()
go func() {
for {
options := types.ContainerListOptions{All: true}
containers, err := cli.ContainerList(context.Background(), options)
if err != nil {
panic(err)
}
var tmp int64
for _, c := range containers {
cJSON, _ := cli.ContainerInspect(context.Background(), c.ID)
if cJSON.ContainerJSONBase != nil && cJSON.ContainerJSONBase.HostConfig != nil {
tmp += cJSON.ContainerJSONBase.HostConfig.Memory
if cJSON.ContainerJSONBase.HostConfig.Memory == 0 {
logrus.Infof("Warning no memory accounted for container %s ", cJSON.ID)
}
}
}
logrus.Info("Current memory used: " + strconv.FormatInt(int64(tmp), 10))
currentMemory = float64(tmp)
time.Sleep(30 * time.Second)
}
}()
return nil
}
//AuthZTenantIDHeaderName - TenantId HTPP header name.
var AuthZTenantIDHeaderName = "X-Auth-Tenantid"
func (f *basicAuthorizer) AuthZReq(authZReq *authorization.Request) *authorization.Response {
if memoryLimit == 0 {
memoryLimit = 1 //Prevent infitine loop of querinying this plugin
initializeOnFirstCall()
}
// logrus.Infof("Received AuthZ request, method: '%s', url: '%s' , headers: '%s'", authZReq.RequestMethod, authZReq.RequestURI, authZReq.RequestHeaders)
action, _ := core.ParseRoute(authZReq.RequestMethod, authZReq.RequestURI)
if action == core.ActionContainerCreate {
var request interface{}
err := json.Unmarshal(authZReq.RequestBody, &request)
if err != nil {
logrus.Error(err)
}
m := request.(map[string]interface{})
// logrus.Info(m)
hostConfig := m["HostConfig"].(map[string]interface{})
memory := hostConfig["Memory"].(float64)
// if memory == 0.0 {
// return &authorization.Response{
// Allow: false,
// Msg: "Must request Memory",
// }
// }
// logrus.Info(memory)
if float64(currentMemory)+memory < float64(memoryLimit) {
currentMemory += memory
return &authorization.Response{
Allow: true,
}
}
return &authorization.Response{
Allow: false,
Msg: "Not enough Memory",
}
}
return &authorization.Response{
Allow: true,
}
}
// AuthZRes always allow responses from server
func (f *basicAuthorizer) AuthZRes(authZReq *authorization.Request) *authorization.Response {
return &authorization.Response{Allow: true}
}
|
package main
import (
"os"
"path/filepath"
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/quick"
"github.com/therecipe/qt/widgets"
_ "github.com/therecipe/qt/internal/examples/showcases/wallet/assets"
_ "github.com/therecipe/qt/internal/examples/showcases/wallet/dashboard"
_ "github.com/therecipe/qt/internal/examples/showcases/wallet/files"
fcontroller "github.com/therecipe/qt/internal/examples/showcases/wallet/files/controller"
_ "github.com/therecipe/qt/internal/examples/showcases/wallet/hosting"
_ "github.com/therecipe/qt/internal/examples/showcases/wallet/terminal"
tcontroller "github.com/therecipe/qt/internal/examples/showcases/wallet/terminal/controller"
_ "github.com/therecipe/qt/internal/examples/showcases/wallet/theme"
_ "github.com/therecipe/qt/internal/examples/showcases/wallet/view"
_ "github.com/therecipe/qt/internal/examples/showcases/wallet/view/left"
_ "github.com/therecipe/qt/internal/examples/showcases/wallet/view/top"
_ "github.com/therecipe/qt/internal/examples/showcases/wallet/wallet"
wcontroller "github.com/therecipe/qt/internal/examples/showcases/wallet/wallet/controller"
_ "github.com/therecipe/qt/internal/examples/showcases/wallet/wallet/dialog"
"github.com/therecipe/qt/internal/examples/showcases/wallet/controller"
)
var (
pathToProject = filepath.Join(os.Getenv("GOPATH"), "src", "github.com", "therecipe", "qt", "internal", "examples", "showcases", "wallet")
pathToWalletDaemon = filepath.Join(os.Getenv("GOPATH"), "bin", "wallet", "WalletDaemon")
PRODUCTION = true //set to 'true' to use qrc: instead of qml files
DEMO = true //set to 'true' to use demo data for the wallet and files table
)
func init() {
tcontroller.PathToWalletDaemon = pathToWalletDaemon
if !PRODUCTION {
os.Setenv("QML_DISABLE_DISK_CACHE", "true")
}
controller.DEMO = DEMO
wcontroller.DEMO = DEMO
fcontroller.DEMO = DEMO
}
func main() {
path := filepath.Join(pathToProject, "view", "qml", "View.qml")
core.QCoreApplication_SetAttribute(core.Qt__AA_EnableHighDpiScaling, true)
widgets.NewQApplication(len(os.Args), os.Args)
controller.NewController(nil)
view := quick.NewQQuickView(nil)
view.SetResizeMode(quick.QQuickView__SizeRootObjectToView)
view.SetMinimumSize(core.NewQSize2(1024, 415))
view.Resize(core.NewQSize2(1024, 768))
if PRODUCTION {
view.Engine().AddImportPath("qrc:/qml/")
view.SetSource(core.NewQUrl3("qrc:/qml/View.qml", 0))
} else {
view.Engine().AddImportPath(filepath.Join(pathToProject, "theme", "qml"))
view.Engine().AddImportPath(filepath.Join(pathToProject, "view", "qml"))
view.Engine().AddImportPath(filepath.Join(pathToProject, "view", "top", "qml"))
view.Engine().AddImportPath(filepath.Join(pathToProject, "view", "left", "qml"))
view.Engine().AddImportPath(filepath.Join(pathToProject, "dashboard", "qml"))
view.Engine().AddImportPath(filepath.Join(pathToProject, "files", "qml"))
view.Engine().AddImportPath(filepath.Join(pathToProject, "hosting", "qml"))
view.Engine().AddImportPath(filepath.Join(pathToProject, "wallet", "qml"))
view.Engine().AddImportPath(filepath.Join(pathToProject, "wallet", "dialog", "qml"))
view.Engine().AddImportPath(filepath.Join(pathToProject, "terminal", "qml"))
view.SetSource(core.QUrl_FromLocalFile(path))
}
view.Show()
widgets.QApplication_Exec()
}
|
package server
import (
"log"
"strconv"
"github.com/gin-gonic/gin"
"go.mongodb.org/mongo-driver/mongo"
"github.com/AskJag07/virtuoso-server/config"
"github.com/AskJag07/virtuoso-server/controllers"
"github.com/AskJag07/virtuoso-server/middleware"
)
func NewRouter(client *mongo.Client) *gin.Engine {
Production := config.GetVar("PRODUCTION")
production, err := strconv.ParseBool(Production)
if err != nil {
log.Panic(err)
}
if production {
gin.SetMode(gin.ReleaseMode)
}
r := gin.New()
r.Use(gin.Logger())
router := r.Group("/api")
router.GET("/", controllers.Status())
router.POST("/auth/register", controllers.Register(client))
router.POST("/auth/login", controllers.Login(client))
router.Use(middleware.Authentication(client))
router.GET("/students", controllers.Students(client))
return r
}
|
package actuator
import "logging"
type Curve interface {
Lookup(int) int
MapY(int, int, int, int) Curve
}
/*
* Linear interpolation between a min and a max. Should work well for
* everything
*/
type ClampedLinearCurve struct {
x1, y1, x2, y2 int
}
func ClampedLinear(x1, y1, x2, y2 int) Curve {
if x1>=x2 {
panic("Invalid parameters: x1>=x2")
}
return &ClampedLinearCurve{x1, y1, x2, y2}
}
func (C *ClampedLinearCurve) Lookup(x int) int {
if x<=C.x1 {
return C.y1
}
if x>=C.x2 {
return C.y2
}
return C.y1 + (C.y2-C.y1) * (x-C.x1) / (C.x2-C.x1)
}
// Actuators transform the existing curve to directly fit to their min/max
func (C *ClampedLinearCurve) MapY(min, max, newmin, newmax int) Curve {
newy1 := newmin + C.y1 * (newmax-newmin) / (max-min)
newy2 := newmin + C.y2 * (newmax-newmin) / (max-min)
logging.Info("Remapped y1=%d->%d, y2=%d->%d ([%d-%d]->[%d-%d])\n",
C.y1, newy1, C.y2, newy2, min, max, newmin, newmax,
)
return &ClampedLinearCurve {C.x1, newy1, C.x2, newy2}
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
)
// BoolQuery type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/_types/query_dsl/compound.ts#L28-L34
type BoolQuery struct {
Boost *float32 `json:"boost,omitempty"`
Filter []Query `json:"filter,omitempty"`
MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"`
Must []Query `json:"must,omitempty"`
MustNot []Query `json:"must_not,omitempty"`
QueryName_ *string `json:"_name,omitempty"`
Should []Query `json:"should,omitempty"`
}
func (s *BoolQuery) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "boost":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseFloat(v, 32)
if err != nil {
return err
}
f := float32(value)
s.Boost = &f
case float64:
f := float32(v)
s.Boost = &f
}
case "filter":
rawMsg := json.RawMessage{}
dec.Decode(&rawMsg)
if !bytes.HasPrefix(rawMsg, []byte("[")) {
o := NewQuery()
if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil {
return err
}
s.Filter = append(s.Filter, *o)
} else {
if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil {
return err
}
}
case "minimum_should_match":
if err := dec.Decode(&s.MinimumShouldMatch); err != nil {
return err
}
case "must":
rawMsg := json.RawMessage{}
dec.Decode(&rawMsg)
if !bytes.HasPrefix(rawMsg, []byte("[")) {
o := NewQuery()
if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil {
return err
}
s.Must = append(s.Must, *o)
} else {
if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Must); err != nil {
return err
}
}
case "must_not":
rawMsg := json.RawMessage{}
dec.Decode(&rawMsg)
if !bytes.HasPrefix(rawMsg, []byte("[")) {
o := NewQuery()
if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil {
return err
}
s.MustNot = append(s.MustNot, *o)
} else {
if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.MustNot); err != nil {
return err
}
}
case "_name":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.QueryName_ = &o
case "should":
rawMsg := json.RawMessage{}
dec.Decode(&rawMsg)
if !bytes.HasPrefix(rawMsg, []byte("[")) {
o := NewQuery()
if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil {
return err
}
s.Should = append(s.Should, *o)
} else {
if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Should); err != nil {
return err
}
}
}
}
return nil
}
// NewBoolQuery returns a BoolQuery.
func NewBoolQuery() *BoolQuery {
r := &BoolQuery{}
return r
}
|
package resp
type StatusesPublicLineRsp struct {
Statuses []struct {
CreatedAt string `json:"created_at"`
ID int64 `json:"id"`
Text string `json:"text"`
Favorited bool `json:"favorited"`
Truncated bool `json:"truncated"`
InReplyToStatusID string `json:"in_reply_to_status_id"`
InReplyToUserID string `json:"in_reply_to_user_id"`
InReplyToScreenName string `json:"in_reply_to_screen_name"`
Geo interface{} `json:"geo"`
Mid string `json:"mid"`
RepostsCount int `json:"reposts_count"`
CommentsCount int `json:"comments_count"`
Annotations []interface{} `json:"annotations"`
User struct {
ID int `json:"id"`
ScreenName string `json:"screen_name"`
Name string `json:"name"`
Province string `json:"province"`
City string `json:"city"`
Location string `json:"location"`
Description string `json:"description"`
URL string `json:"url"`
ProfileImageURL string `json:"profile_image_url"`
Domain string `json:"domain"`
Gender string `json:"gender"`
FollowersCount int `json:"followers_count"`
FriendsCount int `json:"friends_count"`
StatusesCount int `json:"statuses_count"`
FavouritesCount int `json:"favourites_count"`
CreatedAt string `json:"created_at"`
Following bool `json:"following"`
AllowAllActMsg bool `json:"allow_all_act_msg"`
Remark string `json:"remark"`
GeoEnabled bool `json:"geo_enabled"`
Verified bool `json:"verified"`
AllowAllComment bool `json:"allow_all_comment"`
AvatarLarge string `json:"avatar_large"`
VerifiedReason string `json:"verified_reason"`
FollowMe bool `json:"follow_me"`
OnlineStatus int `json:"online_status"`
BiFollowersCount int `json:"bi_followers_count"`
} `json:"user"`
} `json:"statuses"`
PreviousCursor int `json:"previous_cursor"`
NextCursor int64 `json:"next_cursor"`
TotalNumber int `json:"total_number"`
}
|
package lib
import "fmt"
func Log(sign, addr, msg string) string {
if addr == "" {
return fmt.Sprintf("[类型 : %v 操作: %v] ", sign, msg)
}
return fmt.Sprintf("[类型 : %v 用户: %v 操作: %v] ", sign, addr, msg)
}
func Loger(sign, msg string) string {
return fmt.Sprintf("[类型 : %v 操作: %v] ", sign, msg)
}
|
package main
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"testing"
)
func TestOpenLogFile(t *testing.T) {
// test file open failure
f, lerr := OpenLogFile("")
if f != nil {
t.Error("shouldn't open an empty filename")
}
if lerr.Tag != "open-log-file" {
t.Errorf("invalid error tag %q", lerr.Tag)
}
if lerr.Err == nil {
t.Error("should have an error")
}
dir, err := ioutil.TempDir("", "log-file-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
// test creating a file
n := path.Join(dir, "test.log")
f, lerr = OpenLogFile(n)
if lerr != nil {
t.Fatal(lerr.Err)
}
if f == nil {
t.Fatal("expected a file")
}
_, err = f.WriteString("foo\n")
f.Close()
// test writing to an existing file
f, lerr = OpenLogFile(n)
if lerr != nil {
t.Fatal(lerr.Err)
}
if f == nil {
t.Fatal("expected a file")
}
_, err = f.WriteString("bar\n")
f.Close()
// verify that the file has the right stuff in it
contents, err := ioutil.ReadFile(n)
if err != nil {
t.Fatal(err)
}
expected := "foo\nbar\n"
if string(contents) != expected {
t.Errorf("expected %q, got %q", expected, contents)
}
}
type ErrorWriter struct{}
func (_ ErrorWriter) Write(_ []byte) (int, error) {
return 0, fmt.Errorf("oh no")
}
func TestEncodeJSON(t *testing.T) {
// test JSON encoding errors
l1 := EmailLogger{
Writer: ErrorWriter{},
}
lerr := l1.EncodeJSON(LogEntry{})
if lerr == nil {
t.Fatal("expected an error")
}
if lerr.Tag != "json-encoding" {
t.Fatalf("unexpected tag %q", lerr.Tag)
}
// test expected encoding
e := LogEntry{
Time: "2009-11-10T23:00:00Z",
UserID: "123",
Username: "foo",
Arguments: []string{"yay", "asdf"},
Body: "stuff",
}
expected := "{\"time\":\"2009-11-10T23:00:00Z\",\"uid\":\"123\",\"username\":\"foo\",\"arguments\":[\"yay\",\"asdf\"],\"body\":\"stuff\"}\n"
b := bytes.Buffer{}
l2 := EmailLogger{
Writer: &b,
}
lerr = l2.EncodeJSON(e)
if lerr != nil {
t.Fatal(lerr.Err)
}
actual := b.String()
if expected != actual {
t.Fatalf("expected %q, got %q", expected, actual)
}
}
type ErrorReader struct{}
func (_ ErrorReader) Read(_ []byte) (int, error) {
return 0, fmt.Errorf("oh no")
}
func ConstUsername() (string, string) {
return "123", "foobar"
}
func ConstTime() string {
return "2009-11-10T23:00:00Z"
}
func TestPopulateEntry(t *testing.T) {
// test stdin read failure
l1 := EmailLogger{
Body: ErrorReader{},
User: ConstUsername,
Time: ConstTime,
}
e1 := LogEntry{}
lerr := l1.Populate(&e1)
if lerr == nil {
t.Fatal("expected an error")
}
if lerr.Tag != "stdin-failed" {
t.Fatalf("unexpected tag %q", lerr.Tag)
}
// test entry population
l2 := EmailLogger{
Body: strings.NewReader("hello"),
User: ConstUsername,
Time: ConstTime,
Args: []string{"yay", "stuff"},
}
e2 := LogEntry{}
lerr = l2.Populate(&e2)
if lerr != nil {
t.Fatal(lerr.Err)
}
if e2.Time != "2009-11-10T23:00:00Z" {
t.Errorf("bad time %q", e2.Time)
}
if e2.UserID != "123" {
t.Errorf("bad user ID %q", e2.UserID)
}
if e2.Username != "foobar" {
t.Errorf("bad username %q", e2.Username)
}
if e2.Body != "hello" {
t.Errorf("bad body %q", e2.Body)
}
if a := e2.Arguments; len(a) != 2 || a[0] != "yay" || a[1] != "stuff" {
t.Errorf("bad arguments %v", e2.Arguments)
}
}
func TestEmit(t *testing.T) {
dir, err := ioutil.TempDir("", "emit-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
n := path.Join(dir, "test.log.json")
l := EmailLogger{
LogPath: n,
Args: []string{"fro", "bozz"},
Body: strings.NewReader("hello\nworld\n"),
User: ConstUsername,
Time: ConstTime,
}
lerr := l.Emit()
if lerr != nil {
t.Fatal(err)
}
b, err := ioutil.ReadFile(n)
if err != nil {
t.Fatal(err)
}
expected := "{\"time\":\"2009-11-10T23:00:00Z\",\"uid\":\"123\",\"username\":\"foobar\",\"arguments\":[\"fro\",\"bozz\"],\"body\":\"hello\\nworld\\n\"}\n"
if string(b) != expected {
t.Fatalf("got %q, expected %q", b, expected)
}
}
|
package frame
func newFloatBottomPolicy(frame *Frame) *floatBottomPolicy {
return &floatBottomPolicy{
Frame: frame,
}
}
// proactive action!
// note: most frame objects don't exist, make changes based on the frame config
func (policy *floatBottomPolicy) onInit() {
height := policy.Frame.config.Lines
if policy.Frame.config.HasHeader {
height++
}
if policy.Frame.config.HasFooter {
height++
}
// the screen index starts at 1 (not 0), hence the +1
policy.Frame.topRow = (terminalHeight - height) + 1
}
// reactive action!
func (policy *floatBottomPolicy) onResize(adjustment int) {
if adjustment > 0 {
// Grow in size:
// appended rows should appear to move upwards on the screen, which means that we should
// move the entire frame upwards 1 line while making more screen space by 1 line
policy.Frame.move(-adjustment)
policy.Frame.rowAdvancements += adjustment
} else if adjustment < 0 {
// Shrink in size:
policy.Frame.move(adjustment)
}
}
// reactive policy!
func (policy *floatBottomPolicy) onTrail() {
// write the removed line to the trail log + move the policy down (while advancing the frame)
// policy.frame.move(1)
policy.Frame.rowAdvancements += 1
}
// reactive action!
// update any positions based on external data and redraw
// func (policy *floatBottomPolicy) onUpdate() {
// height := policy.Frame.Height()
// targetFrameStartRow := (terminalHeight - height) + 1
// if policy.Frame.topRow != targetFrameStartRow {
// // reset the policy and all activeLines to the correct offset. This must be done with new
// // lines since we should not overwrite the trail rows above the policy.
// policy.Frame.rowAdvancements += policy.Frame.topRow - targetFrameStartRow
// }
// }
// proactive policy!
// func (policy *floatBottomPolicy) onClose() {
// // allow new real estate to be created for the cursor to be placed after the frame at the bottom of the screen
// // policy.frame.rowAdvancements += 1
// // advanceScreen(1)
//
// // no no: it is possible for a bottom frame to exist without the cursor at the bottom of the screen
// // do nothing!
// }
// proactive action!
func (policy *floatBottomPolicy) allowedMotion(rows int) int {
return 0
}
func (policy *floatBottomPolicy) allowTrail() bool {
return true
}
|
package main
import "fmt"
func main() {
fmt.Println("hello world")
// printSomething()
}
// for loops
// func printSomething() {
// for i := 0; i < 10; i++ {
// fmt.Printf("this is number %d\n", i)
// }
// }
// go built in data types
// ** numbers like 32, 64, etc. represent bits
// integer types: uint8, uint16, uint32, uint64, int8, int16, int32, int64 (int type mostly used)
// float types: float32 float64
// complex number types: complex128, complex64 (generally float64 most commonly used)
// declaring variables
// go knows basic types so it automatically infers them without manually declaring type
var x string = "hello world"
var y = "hello world"
z := "hello world"
var w string
w = "hi there"
// constant variables unmutable after they have been made
const a string = "hello world"
// shortcut to defin multiple variables at once
var (
c = 5
b = 20
)
const (
d = 30
something = "hi"
)
// for loops in go
i := 1
for i <= 10 {
fmt.Println(i)
i += 1
}
for i := 1; i <= 10; i++ {
fmt.Println(i)
}
// simple if else if else
if i % 2 == 0 {
// even
} else if != 0 {
// odd
} else {
// NaN
}
// arrays in go
// fixed length with only one type elements
var somearray [5]int
somearray[4] = 100
fmt.Println(somearray)
// prints [0 0 0 0 100]
func average() float64 {
var z [5]int
z[0] = 20
z[3] = 32
var total float64 = 0
for i := 0; i < len(z); i++ {
total += z[i]
}
// will result in error since total and len(z) is different types so...
// return total / len(z)
return total / float64(len(z))
}
// another way of using for loop, similar to a forEach in Node.js
var z [5]int
z[0] = 20
z[3] = 32
// below will give error warning because we decalare the variable index but we dont use it
// for index, value := range z {
// fmt.Println(value)
// // will print each value
// }
// so....
for _, value := range z {
fmt.Println(value)
}
// THE UNDERSCORE IN THIS CASE WILL
// ALLOW YOU TO HAVE AN UNDECLARED VARIABLE IN PLACES YOU NEED TO PUT IT
// go short syntax for creating arrays
somearray := [5]float64{98, 88, 33, 55, 63}
// you can put it on different lines as such:
secondarray := [3]float64{
33,
21,
15,
}
//////////////!!!!!!!!! last railing comma needed when doing multi line array declaration
// since array is fixed length, removing elements becomes mendou cuz u gotta change size of array too
// SO WE USE SLICES
// which is a type built on an array
var someSplice []float64
otherSlice := make([]float64, 5)
// this is a slice with an associated array of length 5 (it cant be longer but can always be shorter in length)
// the makes function also takes in a 3rd parameter
evenMoreSlice := make([]float64, 5, 10)
// another way to create slices is to use [low : high] expression
arr := [5]float64{1, 2, 3, 4, 5}
aS := arr[0:5]
// aS is a slice created from the arr array
bS := arr[0:]
cS := arr[:5]
dS := arr[:]
lS := arr[2:4]
// 2 built in functions to help with slices
// 1- append
slice1 := []int{1,2,3}
slice2 := append(slice1, 4, 5)
// slice1 == [1, 2, 3]
// slice2 == [1, 2, 3, 4, 5]
// 2- copy
slice3 := []int{1, 2, 3}
slice4 := make([]int, 2)
copy(slice4, slice3)
// slice3 == [1, 2, 3]
// slice4 == [1, 2]
// it copied slice3 elements into slice4 however since slice4 only has length 2
// only first two elements were coppied
// MAPS == JS objects ---> key value pair
// maps have no fixed length
// maps are not sequential meaning they have no order
var map1 map[string]int
// map1 is a map with string keys and int values
// settiing key value pair
map1["first key"] = 22
// above wont work because the map was not INITIALIZED
// all maps have to be initialized
map2 := make(map[string]int)
map2["some key"] = 99
// built in map functions
// delete
delete(map2, "some key")
name, ok := map2["some key"]
// name returns the value of the specified key
// ok returns if there was a value associated to the key (boolean)
// typically used like this:
if name, ok := map2["some key"]; ok {
fmt.Println(name + " was found!")
}
// short way of creating maps
randomMap := map[string]string{
"name": "shinno",
"nickname": "snoop dogg",
}
// AGAIN WITH THE LAST TRAILING COMMA
// map of maps
peopleMap := map[string]map[string]int{
"shinno": map[string]int{
"age": 25,
"height": 17999999,
},
"doug": map[string]int{
"age": 25,
"height": 17999999,
},
}
// functions
func averageSecond(someSlice []float64) float64 {
var total float64 = 0
for i := 0; i < len(someSlice); i++ {
total += someSlice[i]
}
// will result in error since total and len(someSlice) is different types so...
// return total / len(someSlice)
return total / float64(len(someSlice))
}
// in go we can name return types
func f2() (r int) {
r =1
return
}
// WE CAN RETURN MULTIPLE VALUES IN GO!!!!
func foo() (int, int) {
return 5, 6
}
// this way you can get two values when you call the function later in main like this
// func main() {
firsVal, secondVal := foo()
// }
// in go return multiple values is used in return the actual value and error or boolean to indicate success
// variadic functions, having one parameter but passing multiple arguments
func add(args ...int) int {
total := 0
for _, value := range args {
total += value
}
return total
}
// so when you call add() you can do
add(1, 2, 3) // passing in more than one argument
// we can even pass slice so long that we trail it with elipses
randomSl := []int{1, 2, 3}
add(randomSl...)
// functions as variables like arrow functions!!!!!!!!!! (also function within functions)
func main() {
add := func(x, y int) int {
return x + y
}
fmt.Println(add(2, 5))
}
// HIGHER ORDER FUNCTIONNSSSSSS func that returns a func
func makeEvenGenerator() func() uint {
i := uint(0)
return func() uint {
ret = i
i += 2
return ret
}
}
// RECURSIONNNNNNNNNNNN FUNCTION CALLING ITSELF
// factorial
func factorial(x uint) uint {
if x == 0 {
return 1
}
return x * factorial(x - 1)
}
// special statements in GO for functions
// defer - schedules a function call to be run after the fucntion compiles
func first() {
fmt.Println("first")
}
func second() {
fmt.Println("second")
}
func main() {
defer second()
first()
}
// will print first then second even though second is called first because it is being deferred meaning it is being told to wait until function compiles
// mostly used in situatioins where resources need to be freed in some way
f, _ := os.Open(filename)
defer f.Close()
// PANIC AND RECOVER - recovering from a runtime error
// *** small not ::: u can directly call (only nameless funcs???????) function while defining it by adding () at the end or (with args here)
func() {
// do something
}() // calls it here
// say we have a func that calls the builtin panic func that will throw some runtime error
// if it is at the top of the call stack, once its been compiled it will throw error and so rest of code wont run
// how can we stop this from happening so that even if first on callstack has error, we still recover and run rest of code?
// USING DEFER AND RECOVER
func main() {
defer func() {
str := recover()
fmt.Println(str)
}()
panic("PANIC")
}
// POINTERS
// consider the examples below
func zero(x int) {
x = 0
}
func main() {
x := 5
zero(x)
fmt.Println()
}
// main function will still print 5 here because it copies the variable argument within the function and so it doesnt mutate the original argument
// we can modify original argument if we want to by using special data type called pointers
func zero(xPtr *int) {
*xPtr = 0
}
func main() {
x := 5
zero(&x)
fmt.Println(x)
}
// * will point to a memory location specified and access or write something in that memory space
// & is to find the memory space of a variable (it returns a pointer to int)
// new way of doing it is using new() built in function
func one(xPtr *int) {
*xPtr = 1
}
func main() {
xPtr := new(int)
one(xPtr)
fmt.Println(*xPtr) // x is 1
}
// new() will take type as arg and allocates enough memory to fit a value of that type, and returns pointers to it
// in GO new() and & is the same unlike other languages
// it is a garbage collecting language so memory is automatically cleaned up when not in use
// ---------------------------------
// STRUCTS and INTERFACES
// structs similar to classes
type Circle struct {
x float64
y float64
r float64
}
// short hand:
type Circle struct {
x, y, r float64
}
// creating instances of new Circle type:
var c Circle
// a more uncommon way below
c := new(Circle)
// how to set initial values and not just empty instances?
c := Circle{x: 0, y: 0, r: 5}
// if we know the order the fields were defined we can do
c := Circle{0, 0, 5}
// if you want a pointer to the struct use &
c := &Circle{0, 0, 5}
// we can access fields of instances using . notation
fmt.Println(c.r)
c.x = 10
// etc.
// if we want to modify fields of instances of Circle, obvie gotta use pointer
func circleArea(c *Circle) float64 {
return math.Pi * c.r*c.r
}
func main() {
c := Circle{0,0,5}
fmt.Println(circleArea(&c))
}
// Methods - special type of functions called methods
// instance methods for the structs you made!!!!!!
// before the name of the function, we specify which struct to attach the function to
func (circleInstance *Circle) area() float64 {
return math.Pi * circleInstance.r*circleInstance.r
}
// now we can call it using . notation on the instance of the struct
fmt.Println(c.area())
// embedded types
type Person struct {
Name string
}
func (p *Person) Talk() {
fmt.Println("Hi my name is ", p.Name)
}
// type Employee struct {
// Person Person
// Position string
// }
// but this says employee HAS a person not IS a person
// so instead we can do
type Employee struct {
Person
Position string
}
randomEmployee := Person{Position: "software engineer"} //?????
randomEmployee.Person.Talk()
randomEmployee.Talk()
// Interface
// area() is common to all shapes fot ex. so we make parent method interface to apply to all shapes instead of defining it for each
type Shape interface {
area() float64
}
func totalArea(shapes ...Shape) float64 {
var area float64
for _, s := range shapes {
area += s.area()
}
return area
}
// if we have c Circle and r Radius
fmt.Println(totalArea(&c, &r))
// all that the totalArea knows is that there is an area() method for each Shape
// so it can only access area() of each shape struct such as Circle or Rectangle
// it CANNOT access anything else like the radius for example
// interfaces can also be used as fields
type MultiShape struct {
shapes []Shape
}
multiShapeOne := MultiShape{
shapes: []Shape{
Circle{0,0,5},
Rectangle{0,0,10,10},
},
}
// interfaces become useful as the program develops
// allows us to hide incidental details of implementation
// in our example, as long as the area() method is defined in all shape structs like Circle or Rectangle
// we are free to do whatever with other fields without having to modify the interface method
// ----------------------------
// PACKAGES
// packages include a variety of reusable functions
// reduces change of overlappnig names, in turn keeping function names short
// organizes code so easy to find what u want to use
// speeds up compiler by only requiring recompilation of smaller chunks of a program
// core packages:
import (
"fmt"
"strings"
"io"
"bytes"
"os"
"io/ioutil"
)
func main() {
// string manipulation package "string"
fmt.Println(strings.Contains("test", "es")) // true
fmt.Println(strings.Count("test", "t")) // 2
fmt.Println(strings.HasPrefix("test", "te")) // true
fmt.Println(strings.HasSuffix("test", "st")) // true
fmt.Println(strings.Index("test", "e")) // 1
fmt.Println(strings.Join([]string{"a", "b"}, "-")) // "a-b"
fmt.Println(strings.Repeat("a", 5)) // "aaaaa"
fmt.Println(strings.Replace("aaaa", "a", "b", 2)) // "bbaa" (put -1 to have it repeat for every accurance)
fmt.Println(strings.Split("a-b-c", "-")) // []string{"a", "b", "c"}
fmt.Println(strings.ToLower("TEST")) // "test"
fmt.Println(strings.ToUpper("test")) // "TEST"
// work with strings as binary data. string to slice of binary data and vise versa
arr:= []byte("test")
str := string([]byte{'t', 'e', 's', 't'})
// input output package "io"
// consists of few functions but mostly interfaces used in other packages
// most notably the Reader and Writer interfaces
// to read or write to []byte or string, use Buffer struct in bytes package
var buf bytes.Buffer
buf.Write([]byte("test"))
// you can convert buf into []byte by calling buf.Bytes()
// if you only need to read from string use strings.NewReader --> more efficient than Buffer
}
func filesAndFolder() {
// files and folders package "os"
// here is code to open contents of file and print on terminal
file, err := os.Open("test.txt")
if err != nil {
fmt.Println("sorry there was an error")
return
}
defer file.Close() // we use this line to ensure that the file is complete only right after function completes
// get the file size
stat, err := file.Stat()
if err != nil {
fmt.Println("error again sorry")
return
}
// read the file
bs := make([]byte, stat.Size())
_, err = file.Read(bs)
if err != nil {
fmt.Println("lol another error")
return
}
str := string(bs)
fmt.Println(str)
}
// since reading files is common, theres another shorter way to do the above
func shorterReadFile() {
bs, err := ioutil.ReadFile("test.txt")
if err != nil {
fmt.Println("error dangerrrr")
return
}
str := string(bs)
fmt.Println(str)
}
// creating a file
func writeFile() {
file, err := os.Create("test.txt")
if err != nil {
fmt.Println("omg error")
return
}
defer file.Close()
file.WriteString("test")
}
// open contents of directory
func openDirectory() {
dir, err := os.Open(".")
if err != nil {
return
}
defer dir.Close()
// Readdir takes one argument, a int that limits the number of files returned
// by passing in -1 we return ALL the files
fileInfos, err := dir.Readdir(-1)
if err != nil {
return
}
for _, fi := range fileInfos {
fmt.Println(fi.Name())
}
}
// sometimes we want to recursively crawl (walk) a folder
// read a folder, its files, subfolders, then subfolders of those folders, etc. etc.
// to do this, there is a path/filepath package
import {
"fmt"
"os"
"path/filepath"
}
func readAllFoldersAndSubfolders() {
filepath.Walk(".", func(path string, info os.FileInfo, err error) error {
fmt.Println(path)
return nil
})
}
// the function passed to filepath.Walk() will be called for every file and folder in the root folder
// filepath.SkipDir ?????
// Error type and creating custom errors
import "errors"
func main() {
err := errors.New("custom error message")
}
// CONTAINERS AND SORT
// container/list package implements a doubly linked list
// each node of the list contains a value and a pointer to the next node
// since doubly linked list, each node will also have pointers to the previous node
// we can create a doubly linked list as such:
import (
"fmt"
"container/list"
)
func main() {
var x list.List
x.PushBack(1)
x.PushBack(2)
x.PushBack(3)
for e := x.Front(); e != nil; e=e.Next() {
fmt.Println(e.Value.(int))
}
}
// we print every value of each node by iterating starting from the first node in the linked list
// SORT!!!!!!
// sort package contains functions for sorting arbitrary data
// predifined sorts for slices
import (
"fmt"
"sort"
)
type Person struct {
Name string
Age int
}
type ByName []Person
func (ps ByName) Len() int {
return len(ps)
}
func (ps ByName) Less(i, j int) bool {
return ps[i].Name < ps[j].Name
}
func (ps ByName) Swap(i, j int) {
ps[i], ps[j] = ps[j], ps[i]
}
func main() {
kids := []Person{
{"Jill",9},
{"Jack",10},
}
sort.Sort(ByName(kids))
fmt.Println(kids)
}
// Len, Less, and Swap functions are necessary for sort algorithms
// HASHES AND CRYPTOGRAPHY
// takes set of data and reduces it to a smaller fixed size
// two types: cryptographic and non-cryptographic
// non-cryptographic found under hash package
// adler32, crc32, crc64, and fnv
import (
"fmt"
"hash/crc32"
)
func main() {
// create hasher
h := crc32.NewIEEE()
// write our data to it
h.Write([]byte("test"))
// calculate the crc32 checksum
v := h.Sum32()
fmt.Println(v)
}
// cyptographic hashing is the same but made almost irrevirsible
// so that one cannot reproduce the hash
import (
"fmt"
"crypto/sha1"
)
func main() {
h := sha1.New()
h.Write([]byte("test"))
bs := h.Sum([]byte{})
fmt.Println(bs)
}
// same as non-cryptgraphic hash except crc32 computes 32-bit hash, sha1 computes 160-bit hash
// no native type to represent a 160-bit number, so we use slice of 20bytes instead
// SERVERS
// TCP - primary protocol for communication over the internet
import (
"encoding/gob"
"fmt"
"net"
)
func server() {
// listen on a port
ln, err := net.Listen("tcp", ":9999")
if err != nil {
dmt.Println(err)
return
}
for {
// accept a connection
c, err := ln.Accept()
if err != nil {
fmt.Println(err)
continue
}
// handle the connection (function defined below)
go handleServerConnection(c)
}
}
func handleServerConnection(c net.Conn) {
// receive the message
var msg string
err := gob.NewDecoder(c).Decode(&msg)
if err != nil {
fmt.Println(err)
} else {
fmt.Println("Received", msg)
}
c.Close()
}
func client() {
// connect to the server
c, err := net.Dial("tcp", "127.0.0.1:9999")
if err != nil {
fmt.Println(err)
return
}
msg := "Hello, World"
fmt.Println("Sending", msg)
err = gob.NewEncoder(c).Encode(msg)
if err != nil {
fmt.Println(err)
}
c.Close()
}
func main() {
go server()
go client()
var input string
fmt.Scanln(&input)
}
// this example uses the encoding/gob package which makes it easy to encode Go values so that other Go programs (or the same program) can read them
// HTTP SERVERS!!!
// http servers are even easier to set up and use:
import (
"net/http"
"io"
)
func hello(res http.ResponseWriter, req *http.Request) {
res.Header().Set(
"Content-Type",
"text/html",
)
io.WriteString(
res,
`<DOCTYPE html>
<html>
<head>
<title>Hello, World</title>
</head>
<body>
Hello world!
</body>
</html>`,
)
}
func main() {
http.HandleFunc("/hello", hello)
http.ListenAndServe(":9000", nil)
}
// HandleFunc handles a URL route (/hello) by calling the given function
// we can also handle static files by using FileServer
http.Handle(
"/assets/",
http.StripPrefix(
"/assets/",
http.FileServer(http.Dir("assets")),
),
)
// net/rpc (remote procedure call) net/rpc/jsonrpc packages
// privde easy way to expose methods so they can be invoked over a network
// rather than just in the program running them
// ------ im guessing this is similar to meteor methods??? where u can call backend function in the client
// PARSING COMMAND LINE ARGUMENTS
// example program which generates a number between 0 and 6
// we can change max value by sending (-mx=100) flag
import (
"fmt"
"flag"
"math/rand"
)
func main() {
// define flags
maxp := flag.Int("max", 6, "the max value")
// parse
flag.Parse()
// generate a number between 0 and max
fmt.Println(rand.Intn(*maxp))
}
// any additional non-fleg arguments can be retriever with flag.Args() which returns an array of string []string
// CREATING OUR OWN PACKAGES!!
// say we want to make a package called math
// inside some-directory create main.go (some-directory/main.go)
package main
import "fmt"
import "some-directory/math"
func main() {
xs := []float64{1, 2, 3, 4}
avg := math.Average(xs)
fmt.Println(avg)
}
// to use the package called math (not the built in package but our custom math package)
// we must create a file called math.go inside math directory (some-directory/math/math.go)
// in this file we will name our package and define function we want to export
package math
func Average(xs []float64) float64 {
total := float64(0)
for _, x := range xs {
total += x
}
return total / float64(len(xs))
}
// note that exported functions must be CAPITAL LETTER
// you can also make aliases when importing
import m "some-directory/math" // this way you can use m.Average instead of math.Average
// finally, package names must be the same name as its directory name although there are workarounds to this
// DOCUMENTATION
// go allows us to generate documentation for our packages really easily
// if we typ:
godoc some-directory/math Average
// into our terminal, we should see:
func Average(xs []float64) float64
// displayed on our termina
// however by adding comment above our function giviing details about the function
// we can have this displayed instead:
func Average(xs []float64) float64
Finds the average of a series of numbers
// this doc is also available in web form by running this command:
godoc -http=":6060"
// and entering this url into your browser
http://localhost:6060/pkg/
//TESTING!!!
// if we wanted to run tests for our custom math package
// we must include file called math_test.go
// go knows that files which end in _test.go will be test files
// in math_test.go
package math // must name package same as the package we are testing
import "testing"
// must name test func starting with Test (capiitalized always)
func TestAverage(t *testing.T) {
v := Average([]float64{1,2})
if v != 1.5 {
t.Error("expected 1.5, got ", v)
}
}
// to run tests run this command in the same dir
go test
// view p.84 for more!!!
// GO ROUTINES
// running synchronous code concurrently
func f(from string) {
for i := 0; i < 3; i++ {
fmt.Println(from, ":", i)
}
}
func main() {
f("direct")
// notice the go keywords below
go f("goroutine")
go func(msg string) {
fmt.Println(msg)
}("helloooo")
}
// this will print to the console:
// direct:0
// direct:1
// direct:2
// goroutine:0
// heloooo
// goroutine:1
// goroutine:2
// Channels are the pipes that connect concurrent goroutines. You can send values into channels from one goroutine and receive those values into another goroutine.
func main() {
// make new channel
messages := make(chan string)
// send value into channel using channel <- value notation
// from goroutine
go func() {messages <- "ping"}()
// reveive value from channel using variableName := <-channel notation
msg := <-messages
fmt.Println(msg)
}
// https://gobyexample.com/channels
|
/*
Example:
package main
import (
"context"
"fmt"
"time"
"github.com/5xxxx/pie"
)
func main() {
t, err := pie.NewClient("demo")
t.SetURI("mongodb://127.0.0.1:27017")
if err != nil {
panic(err)
}
err = t.Connect(context.Background())
if err != nil {
panic(err)
}
var user User
err = t.filter("nickName", "淳朴的润土").FindOne(&user)
if err != nil {
panic(err)
}
fmt.Println(user)
}
*/
package pie
import (
"github.com/5xxxx/pie/driver"
"github.com/5xxxx/pie/internal"
"go.mongodb.org/mongo-driver/mongo/options"
)
func NewClient(db string, options ...*options.ClientOptions) (driver.Client, error) {
return internal.NewClient(db, options...)
}
func NewCondition() driver.Condition {
return internal.DefaultCondition()
}
|
package main
import "testing"
func TestPermutation(t *testing.T) {
var tests = []struct {
str1 string
str2 string
output bool
}{
{"the quick brown fox", "het uqkic wborn ofx", true},
{"ぁあぃぎじ", "じぎぃあぁ", true},
{"()[]", "][)(!", false},
{"ぁあぃぎじ", "じぎぃあぁ!", false},
}
for _, test := range tests {
got := permutation(test.str1, test.str2)
if got != test.output {
t.Errorf("permutation(%q, %q) == %t, want %t", test.str1, test.str2, got, test.output)
}
}
}
|
// +build unit
package http
import (
"bytes"
"errors"
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"io/ioutil"
"kz.nitec.digidocs.pcr/internal/models"
"kz.nitec.digidocs.pcr/internal/service"
"net/http"
"net/http/httptest"
"testing"
)
func TestPong(t *testing.T) {
w := httptest.NewRecorder()
r := gin.Default()
r.GET("/ping", Pong)
req, _ := http.NewRequest("GET", "/ping", nil)
r.ServeHTTP(w, req)
assert := assert.New(t)
assert.Equal(w.Code, http.StatusOK)
p, err := ioutil.ReadAll(w.Body)
if err != nil {
assert.Fail("Response Body wrong")
}
assert.Equal(string(p), "pong")
}
func TestHandler_TaskManager(t *testing.T) {
services := &service.Services{
&MockPersonPhotoService{},
}
h := NewHandler(services)
assert.New(t)
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
body := bytes.NewBuffer([]byte("{\"iin\":\"950110350172\",\n\"services\": {\n\"PCR_CERTIFICATE\":\n{\"code\":\"PCR_CERTIFICATE\",\n\"serviceId\":\"CovidResult\",\n\"url\": \"http://localhost:8095/pcr-cert\"}\n},\n\"documentType\": {\n\"code\": \"\",\n\"nameRu\": \"nameRu\",\n\"nameKk\": \"nameKk\"}}\n"))
c.Request, _ = http.NewRequest("POST", "/", body)
h.TaskManager(c)
assert := assert.New(t)
assert.Equal(w.Code, 400)
}
type MockPersonPhotoService struct {
mock.Mock
}
func (m *MockPersonPhotoService) GetBySoap(request *models.SoapRequest) (*models.SoapResponse, error) {
args := m.Called(request)
var response *models.SoapResponse
switch args.Get(1).(type) {
case *models.SoapResponse:
response = args.Get(1).(*models.SoapResponse)
default:
return nil, errors.New("invalid type format")
}
return response, args.Error(1)
}
func (m *MockPersonPhotoService) NewSoapRequest(request *models.DocumentRequest) (*models.SoapRequest, error) {
args := m.Called(request)
var response *models.SoapRequest
switch args.Get(1).(type) {
case *models.SoapRequest:
response = args.Get(1).(*models.SoapRequest)
default:
return nil, errors.New("invalid type format")
}
return response, args.Error(1)
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
"text/tabwriter"
)
func main() {
raw, errRead := ioutil.ReadFile("messages.json")
if errRead != nil {
panic(errRead)
}
var skypeExport SkypeExport
if errUm := json.Unmarshal(raw, &skypeExport); errUm != nil {
panic(errUm)
}
fmt.Printf("Conversations: %d\n\n", len(skypeExport.Conversations))
tw := tabwriter.NewWriter(os.Stdout, 1, 2, 2, ' ', tabwriter.TabIndent)
fmt.Fprintln(tw, "From\tOriginalArrivalTime")
callSum := 0
for _, conversation := range skypeExport.Conversations {
if strings.Contains(strings.ToLower(conversation.DisplayName), secret) {
for _, message := range conversation.MessageList {
if message.MessageType == "Event/Call" {
fmt.Fprintf(tw, "%s\t%s\n", message.From, message.OriginalArrivalTime)
callSum++
}
}
}
}
tw.Flush()
fmt.Printf("\nCall count: %d\n", callSum)
}
|
//go:build release
package main
func init() {
globals.ReleaseMode = ReleaseModeRelease
}
|
package glsa
import (
"glsamaker/pkg/app/handler/authentication"
"glsamaker/pkg/app/handler/authentication/utils"
"glsamaker/pkg/database/connection"
"glsamaker/pkg/logger"
"glsamaker/pkg/models"
"glsamaker/pkg/models/bugzilla"
"net/http"
"strconv"
)
// Show renders a template to show the landing page of the application
func UpdateBugs(w http.ResponseWriter, r *http.Request) {
user := utils.GetAuthenticatedUser(r)
if !user.Permissions.Glsa.UpdateBugs {
authentication.AccessDenied(w, r)
return
}
go bugUpdate()
http.Redirect(w, r, "/", 301)
}
func bugUpdate() {
var allBugs []*bugzilla.Bug
connection.DB.Model(&allBugs).Select()
var bugIdsLists [][]string
bugIdsLists = append(bugIdsLists, []string{})
for _, bug := range allBugs {
lastElem := bugIdsLists[len(bugIdsLists)-1]
if len(lastElem) < 100 {
bugIdsLists[len(bugIdsLists)-1] = append(lastElem, strconv.FormatInt(bug.Id, 10))
} else {
bugIdsLists = append(bugIdsLists, []string{strconv.FormatInt(bug.Id, 10)})
}
}
for _, bugIdsList := range bugIdsLists {
updatedBugs := bugzilla.GetBugsByIds(bugIdsList)
for _, updatedBug := range updatedBugs {
_, err := connection.DB.Model(&updatedBug).WherePK().Update()
if err != nil {
logger.Error.Println("Error during bug data update")
logger.Error.Println(err)
}
}
}
// Possibly delete deleted bugs
// Do we even delete bugs?
// update the time of the last bug update
models.SetApplicationValue("LastBugUpdate", "")
}
|
package api
import (
"fmt"
"github.com/yaziedda/iser/app/common"
"github.com/yaziedda/iser/app/penawaran"
"log"
"net/http"
)
func GetPenawaranDataList(w http.ResponseWriter, r *http.Request) {
data := penawaran.GetListPenawaran()
log.Printf("%s%s", "Get list penawaran : ", data)
fmt.Fprintf(w, "%v", data)
}
func InsertPenawaranData(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
common.CheckError(err)
a := r.PostFormValue("id")
b := r.PostFormValue("user_id")
c := r.PostFormValue("judul")
d := r.PostFormValue("deskripsi")
e := r.PostFormValue("harga")
f := r.PostFormValue("lokasi")
g := r.PostFormValue("lokasi_initial")
if a != "" && b != "" && c != "" && d != "" && e != "" && f != "" && g != "" {
dataa := penawaran.InsertPenawaran(a, b, c, d, e, f, g)
fmt.Fprintf(w, "%v", dataa)
} else {
http.Error(w, common.ErrorBadReq(), http.StatusBadRequest)
}
}
func PenawaranAcceptByPenyedia(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
common.CheckError(err)
a := r.PostFormValue("id")
b := r.PostFormValue("penyedia_id")
if a != "" && b != "" {
data := penawaran.PenawaranAcceptByPenyedia(a, b)
fmt.Fprintf(w, "%v", data)
} else {
http.Error(w, common.ErrorBadReq(), http.StatusBadRequest)
}
}
func PenawaranDone(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
common.CheckError(err)
a := r.PostFormValue("penawaran_id")
if a != "" {
data := penawaran.PenawaranDone(a)
fmt.Fprintf(w, "%v", data)
} else {
http.Error(w, common.ErrorBadReq(), http.StatusBadRequest)
}
}
func PenawaranSelectId(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
common.CheckError(err)
a := r.PostFormValue("penawaran_id")
if a != "" {
data := penawaran.PenawaranSelectId(a)
log.Printf("%s%s", "Get list penawaran by id : ", data)
fmt.Fprintf(w, "%v", data)
} else {
http.Error(w, common.ErrorBadReq(), http.StatusBadRequest)
}
}
|
package router
import (
"net/http"
"os"
"path/filepath"
"github.com/abhinavnair/budget/internal/pkg/handler"
"github.com/go-chi/chi"
)
var (
wd string
)
func init() {
var err error
if wd, err = os.Getwd(); err != nil {
panic(err.Error())
}
}
// Handler returns the http handler that handles all the requests
func Handler() http.Handler {
r := chi.NewRouter()
handler.FileServer(r, "/", filepath.Join(wd, "static"))
return r
}
|
package main
import (
"bufio"
"fmt"
"os"
"regexp"
"strconv"
"strings"
"github.com/dobegor/calc"
)
var terms []*Term
func registerTerm(t *Term) {
terms = append(terms, t)
}
func parseTerms() {
for _, t := range terms {
t.Parse()
}
}
type Formula struct {
Fml string
Deps []*Term
f func(map[string]float64) float64
}
func (f *Formula) CanEvaluate() bool {
for _, t := range f.Deps {
if t.Known || t.CanEvaluate() {
continue
} else {
return false
}
}
return true
}
func (f *Formula) Parse() {
r := regexp.MustCompile("[A-Z]")
res := r.FindAllString(f.Fml, -1)
for _, t := range res {
for _, term := range terms {
if term.ID == t {
f.Deps = append(f.Deps, term)
}
}
}
f.f = calc.WrapFunction(f.Fml)
}
func (f *Formula) Evaluate() float64 {
m := make(map[string]float64)
for _, d := range f.Deps {
m[d.ID] = d.Evaluate()
}
return f.f(m)
}
type Term struct {
ID string
Known bool
Desc string
Value float64
Formulas []*Formula
}
func (t *Term) SetValue(v float64) {
t.Value = v
t.Known = true
}
func (t *Term) Parse() {
for _, f := range t.Formulas {
f.Parse()
}
}
func (t *Term) AddFormula(s string) {
f := Formula{
Fml: s,
}
t.Formulas = append(t.Formulas, &f)
}
func (t *Term) Evaluate() float64 {
if t.Known {
return t.Value
}
for _, f := range t.Formulas {
if f.CanEvaluate() {
t.Value = f.Evaluate()
t.Known = true
return f.Evaluate()
}
}
panic("Can't evaluate this")
}
func (t *Term) CanEvaluate() bool {
if t.Known {
return true
}
for _, f := range t.Formulas {
if f.CanEvaluate() {
return true
}
}
return false
}
func main() {
fmt.Println("Yolo")
file, _ := os.Open("input.txt")
defer file.Close()
scanner := bufio.NewScanner(file)
scanner.Scan()
for scanner.Scan() {
if scanner.Text() == "#2" {
break
}
s := strings.Split(scanner.Text(), ":")
t := Term{
ID: strings.Trim(s[0], " "),
Desc: strings.Trim(s[1], " "),
}
registerTerm(&t)
}
for scanner.Scan() {
s := strings.Split(scanner.Text(), "=")
id := strings.Trim(s[0], " ")
var term *Term
for _, t := range terms {
if t.ID == id {
term = t
}
}
f, err := strconv.ParseFloat(strings.Trim(s[1], " "), 64)
if err == nil {
term.SetValue(f)
} else {
term.AddFormula(strings.Trim(s[1], " "))
}
}
parseTerms()
fmt.Println("Terms registered:")
for _, t := range terms {
fmt.Println(t.ID, t.Desc)
}
fmt.Println("Known:")
for _, t := range terms {
if t.Known {
fmt.Println(t.ID, t.Desc)
}
}
fmt.Println("Can evaluate:")
for _, t := range terms {
if t.CanEvaluate() {
fmt.Println(t.ID, t.Desc)
}
}
fmt.Println("Type the term you wish to evaluate or q to exit the program:")
var input string
for {
fmt.Scanln(&input)
if input == "q" {
fmt.Println("Bye!")
return
}
for _, t := range terms {
if t.ID == input && t.CanEvaluate() {
fmt.Println(t.Evaluate())
}
}
}
}
|
// Copyright (C) 2019 Cisco Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"github.com/yookoala/realpath"
"golang.org/x/sys/unix"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/projectcalico/vpp-dataplane/v3/config"
"github.com/projectcalico/vpp-dataplane/v3/vpplink"
)
var (
FakeVppNextHopIP4 = net.ParseIP("169.254.0.1")
FakeVppNextHopIP6 = net.ParseIP("fc00:ffff:ffff:ffff:ca11:c000:fd10:fffe")
VppSideMac, _ = net.ParseMAC("02:ca:11:c0:fd:10")
)
func IsDriverLoaded(driver string) (bool, error) {
_, err := os.Stat("/sys/bus/pci/drivers/" + driver)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func GetMaxCIDRLen(isv6 bool) int {
if isv6 {
return 128
} else {
return 32
}
}
func GetMaxCIDRMask(addr net.IP) net.IPMask {
maxCIDRLen := GetMaxCIDRLen(vpplink.IsIP6(addr))
return net.CIDRMask(maxCIDRLen, maxCIDRLen)
}
func WriteFile(state string, path string) error {
err := os.WriteFile(path, []byte(state+"\n"), 0400)
if err != nil {
return errors.Errorf("Failed to write state to %s %s", path, err)
}
return nil
}
func WriteInfoFile() error {
file, err := json.MarshalIndent(config.Info, "", " ")
if err != nil {
return errors.Errorf("Failed to encode json for info file: %s", err)
}
return os.WriteFile(config.VppManagerInfoFile, file, 0644)
}
func RouteIsIP6(r *netlink.Route) bool {
if r.Dst != nil {
return vpplink.IsIP6(r.Dst.IP)
}
if r.Gw != nil {
return vpplink.IsIP6(r.Gw)
}
if r.Src != nil {
return vpplink.IsIP6(r.Src)
}
return false
}
func RouteIsLinkLocalUnicast(r *netlink.Route) bool {
if r.Dst == nil {
return false
}
if !vpplink.IsIP6(r.Dst.IP) {
return false
}
return r.Dst.IP.IsLinkLocalUnicast()
}
func SetInterfaceRxQueues(ifname string, queues int) error {
/* TODO: use go library */
cmd := exec.Command("ethtool", "-L", ifname, "combined", fmt.Sprintf("%d", queues))
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func SwapDriver(pciDevice, newDriver string, addId bool) error {
if pciDevice == "" {
log.Warnf("PCI ID not found, not swapping drivers")
return nil
}
deviceRoot := fmt.Sprintf("/sys/bus/pci/devices/%s", pciDevice)
driverRoot := fmt.Sprintf("/sys/bus/pci/drivers/%s", newDriver)
if addId {
// Grab device vendor and id
vendor, err := os.ReadFile(deviceRoot + "/vendor")
if err != nil {
return errors.Wrapf(err, "Error reading device %s vendor", pciDevice)
}
device, err := os.ReadFile(deviceRoot + "/device")
if err != nil {
return errors.Wrapf(err, "Error reading device %s id", pciDevice)
}
// Add it to driver before unbinding to prevent spontaneous binds
identifier := fmt.Sprintf("%s %s\n", string(vendor[2:6]), string(device[2:6]))
log.Infof("Adding id '%s' to driver %s", identifier, newDriver)
err = os.WriteFile(driverRoot+"/new_id", []byte(identifier), 0200)
if err != nil {
log.Warnf("Could not add id %s to driver %s: %v", identifier, newDriver, err)
}
}
err := os.WriteFile(deviceRoot+"/driver/unbind", []byte(pciDevice), 0200)
if err != nil {
// Error on unbind is not critical, device might beind successfully afterwards if it is not currently bound
log.Warnf("Error unbinding %s: %v", pciDevice, err)
}
err = os.WriteFile(driverRoot+"/bind", []byte(pciDevice), 0200)
return errors.Wrapf(err, "Error binding %s to %s", pciDevice, newDriver)
}
func FormatIPNetSlice(lst []net.IPNet) string {
strLst := make([]string, 0, len(lst))
for _, e := range lst {
strLst = append(strLst, e.String())
}
return strings.Join(strLst, ", ")
}
func SetCorePattern(corePattern string) error {
if corePattern == "" {
return nil
}
err := WriteFile(corePattern, "/proc/sys/kernel/core_pattern")
if err != nil {
return errors.Wrap(err, "Error writing corePattern")
}
return nil
}
func SetRLimitMemLock() error {
err := syscall.Setrlimit(8, &syscall.Rlimit{
Cur: ^uint64(0),
Max: ^uint64(0),
}) // 8 - RLIMIT_MEMLOCK
if err != nil {
return err
}
return nil
}
func CreateVppLink() (vpp *vpplink.VppLink, err error) {
// Get an API connection, with a few retries to accommodate VPP startup time
for i := 0; i < 10; i++ {
vpp, err = vpplink.NewVppLink(config.VppApiSocket, log.WithFields(log.Fields{"component": "vpp-api"}))
if err != nil {
if i < 5 {
/* do not warn, it is probably fine */
log.Infof("Waiting for VPP... [%d/10]", i)
} else {
log.Warnf("Waiting for VPP... [%d/10] %v", i, err)
}
time.Sleep(2 * time.Second)
} else {
return vpp, nil
}
}
return nil, errors.Errorf("Cannot connect to VPP after 10 tries")
}
func ClearVppManagerFiles() error {
config.Info.Status = config.Starting
config.Info.UplinkStatuses = make(map[string]config.UplinkStatus, 0)
config.Info.PhysicalNets = make(map[string]config.PhysicalNetwork, 0)
return WriteInfoFile()
}
func SetVfioEnableUnsafeNoIommuMode(mode config.UnsafeNoIommuMode) (err error) {
if mode == config.VFIO_UNSAFE_NO_IOMMU_MODE_DISABLED {
return
}
return WriteFile(string(mode), "/sys/module/vfio/parameters/enable_unsafe_noiommu_mode")
}
func GetVfioEnableUnsafeNoIommuMode() (config.UnsafeNoIommuMode, error) {
iommuStr, err := os.ReadFile("/sys/module/vfio/parameters/enable_unsafe_noiommu_mode")
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return config.VFIO_UNSAFE_NO_IOMMU_MODE_DISABLED, nil
}
return config.VFIO_UNSAFE_NO_IOMMU_MODE_DISABLED, errors.Wrapf(err, "Couldnt read /sys/module/vfio/parameters/enable_unsafe_noiommu_mode")
}
if strings.TrimSpace(string(iommuStr)) == "Y" {
return config.VFIO_UNSAFE_NO_IOMMU_MODE_YES, nil
} else {
return config.VFIO_UNSAFE_NO_IOMMU_MODE_NO, nil
}
}
func DeleteInterfaceVF(pciId string) (err error) {
sriovNumvfsPath := fmt.Sprintf("/sys/bus/pci/devices/%s/sriov_numvfs", pciId)
err = WriteFile("0", sriovNumvfsPath)
if err != nil {
return errors.Wrapf(err, "cannot disable VFs for %s", pciId)
}
return nil
}
func GetInterfaceNumVFs(pciId string) (int, error) {
sriovNumvfsPath := fmt.Sprintf("/sys/bus/pci/devices/%s/sriov_numvfs", pciId)
numVfsStr, err := os.ReadFile(sriovNumvfsPath)
if err != nil {
return 0, errors.Wrapf(err, "/sys/bus/pci/devices/%s/sriov_numvfs", pciId)
}
numVfs, err := strconv.ParseInt(strings.TrimSpace(string(numVfsStr)), 10, 32)
if err != nil {
return 0, errors.Wrapf(err, "Couldnt parse sriov_numvfs: %v", err)
}
return int(numVfs), nil
}
func SetVFSpoofTrust(ifName string, vf int, spoof bool, trust bool) error {
spoofOn := "off"
trustOn := "off"
if spoof {
spoofOn = "on"
}
if trust {
trustOn = "on"
}
cmd := exec.Command("ip", "link", "set", "dev", ifName,
"vf", fmt.Sprintf("%d", vf),
"spoof", spoofOn,
"trust", trustOn)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
/**
* This function was copied from the following repo [0]
* as we depend on pkg/ns, but it doesnot support netns creation
* [0] github.com/containernetworking/plugins.git:pkg/testutils/netns_linux.go
*/
func getNsRunDir() string {
xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR")
/// If XDG_RUNTIME_DIR is set, check if the current user owns /var/run. If
// the owner is different, we are most likely running in a user namespace.
// In that case use $XDG_RUNTIME_DIR/netns as runtime dir.
if xdgRuntimeDir != "" {
if s, err := os.Stat("/var/run"); err == nil {
st, ok := s.Sys().(*syscall.Stat_t)
if ok && int(st.Uid) != os.Geteuid() {
return path.Join(xdgRuntimeDir, "netns")
}
}
}
return "/var/run/netns"
}
// getCurrentThreadNetNSPath copied from containernetworking/plugins/pkg/ns
func getCurrentThreadNetNSPath() string {
// /proc/self/ns/net returns the namespace of the main thread, not
// of whatever thread this goroutine is running on. Make sure we
// use the thread's net namespace since the thread is switching around
return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
}
/**
* This function was copied from the following repo [0]
* as we depend on pkg/ns, but it doesnot support netns creation
* [0] github.com/containernetworking/plugins.git:pkg/testutils/netns_linux.go
*/
func NewNS(nsName string) (ns.NetNS, error) {
// Creates a new persistent (bind-mounted) network namespace and returns an object
// representing that namespace, without switching to it.
nsRunDir := getNsRunDir()
// Create the directory for mounting network namespaces
// This needs to be a shared mountpoint in case it is mounted in to
// other namespaces (containers)
err := os.MkdirAll(nsRunDir, 0755)
if err != nil {
return nil, err
}
// Remount the namespace directory shared. This will fail if it is not
// already a mountpoint, so bind-mount it on to itself to "upgrade" it
// to a mountpoint.
err = unix.Mount("", nsRunDir, "none", unix.MS_SHARED|unix.MS_REC, "")
if err != nil {
if err != unix.EINVAL {
return nil, fmt.Errorf("mount --make-rshared %s failed: %q", nsRunDir, err)
}
// Recursively remount /var/run/netns on itself. The recursive flag is
// so that any existing netns bindmounts are carried over.
err = unix.Mount(nsRunDir, nsRunDir, "none", unix.MS_BIND|unix.MS_REC, "")
if err != nil {
return nil, fmt.Errorf("mount --rbind %s %s failed: %q", nsRunDir, nsRunDir, err)
}
// Now we can make it shared
err = unix.Mount("", nsRunDir, "none", unix.MS_SHARED|unix.MS_REC, "")
if err != nil {
return nil, fmt.Errorf("mount --make-rshared %s failed: %q", nsRunDir, err)
}
}
// create an empty file at the mount point
nsPath := path.Join(nsRunDir, nsName)
mountPointFd, err := os.Create(nsPath)
if err != nil {
return nil, err
}
mountPointFd.Close()
// Ensure the mount point is cleaned up on errors; if the namespace
// was successfully mounted this will have no effect because the file
// is in-use
defer os.RemoveAll(nsPath)
var wg sync.WaitGroup
wg.Add(1)
// do namespace work in a dedicated goroutine, so that we can safely
// Lock/Unlock OSThread without upsetting the lock/unlock state of
// the caller of this function
go (func() {
defer wg.Done()
runtime.LockOSThread()
// Don't unlock. By not unlocking, golang will kill the OS thread when the
// goroutine is done (for go1.10+)
var origNS ns.NetNS
origNS, err = ns.GetNS(getCurrentThreadNetNSPath())
if err != nil {
return
}
defer origNS.Close()
// create a new netns on the current thread
err = unix.Unshare(unix.CLONE_NEWNET)
if err != nil {
return
}
// Put this thread back to the orig ns, since it might get reused (pre go1.10)
defer func() {
err2 := origNS.Set()
if err2 != nil {
err = fmt.Errorf("Error setting origNS %s (origin err %s)", err2, err)
}
}()
// bind mount the netns from the current thread (from /proc) onto the
// mount point. This causes the namespace to persist, even when there
// are no threads in the ns.
err = unix.Mount(getCurrentThreadNetNSPath(), nsPath, "none", unix.MS_BIND, "")
if err != nil {
err = fmt.Errorf("failed to bind mount ns at %s: %v", nsPath, err)
}
})()
wg.Wait()
if err != nil {
return nil, fmt.Errorf("failed to create namespace: %v", err)
}
return ns.GetNS(nsPath)
}
func GetnetnsPath(nsName string) string {
return path.Join(getNsRunDir(), nsName)
}
func GetInterfaceVFPciId(pciId string) (vfPciId string, err error) {
virtfn0Path := fmt.Sprintf("/sys/bus/pci/devices/%s/virtfn0", pciId)
vfPciId, err = getPciIdFromLink(virtfn0Path)
if err != nil {
return "", errors.Wrapf(err, "Couldn't find VF pciID in %s", virtfn0Path)
}
return vfPciId, nil
}
func CreateInterfaceVF(pciId string) error {
numVfs, err := GetInterfaceNumVFs(pciId)
if err != nil {
return errors.Wrapf(err, "cannot get num VFs for %s", pciId)
}
if numVfs == 0 {
/* Create a VF only if none is available */
sriovNumvfsPath := fmt.Sprintf("/sys/bus/pci/devices/%s/sriov_numvfs", pciId)
err = WriteFile("1", sriovNumvfsPath)
if err != nil {
return errors.Wrapf(err, "cannot add VFs for %s", pciId)
}
}
return nil
}
func BindVFtoDriver(pciId string, driver string) error {
unbindPath := fmt.Sprintf("/sys/bus/pci/devices/%s/driver/unbind", pciId)
err := WriteFile(pciId, unbindPath)
if err != nil {
return errors.Wrapf(err, "cannot unbind VF %s", pciId)
}
overridePath := fmt.Sprintf("/sys/bus/pci/devices/%s/driver_override", pciId)
err = WriteFile(driver, overridePath)
if err != nil {
return errors.Wrapf(err, "cannot override VF %s driver to %s", pciId, driver)
}
vfPciBindPath := fmt.Sprintf("/sys/bus/pci/drivers/%s/bind", driver)
err = WriteFile(pciId, vfPciBindPath)
if err != nil {
return errors.Wrapf(err, "cannot bind VF %s to %s", pciId, driver)
}
err = WriteFile("", overridePath)
if err != nil {
return errors.Wrapf(err, "cannot remove VF %s override driver", pciId)
}
return nil
}
func GetInterfaceNameFromPci(pciId string) (string, error) {
// Grab Driver id for the pci device
driverLinkPath := fmt.Sprintf("/sys/bus/pci/devices/%s/net", pciId)
netDevs, err := os.ReadDir(driverLinkPath)
if err != nil {
return "", errors.Wrapf(err, "cannot list /net for %s", pciId)
}
if len(netDevs) != 1 {
return "", errors.Wrapf(err, "Found %d devices in /net for %s", len(netDevs), pciId)
}
return netDevs[0].Name(), nil
}
func GetDriverNameFromPci(pciId string) (string, error) {
// Grab Driver id for the pci device
driverLinkPath := fmt.Sprintf("/sys/bus/pci/devices/%s/driver", pciId)
driverPath, err := os.Readlink(driverLinkPath)
if err != nil {
return "", errors.Wrapf(err, "cannot find driver for %s", pciId)
}
driver := driverPath[strings.LastIndex(driverPath, "/")+1:]
return driver, nil
}
func getPciIdFromLink(path string) (string, error) {
realPath, err := realpath.Realpath(path)
if err != nil {
return "", err
}
pciID := regexp.MustCompile("[0-9a-f]{4}:[0-9a-f]{2}:[0-9a-f]{2}.[0-9a-f]")
matches := pciID.FindAllString(realPath, -1)
if matches == nil {
return "", nil
} else {
PciId := matches[len(matches)-1]
return PciId, nil
}
}
func GetInterfacePciId(interfaceName string) (string, error) {
// Grab PCI id - last PCI id in the real path to /sys/class/net/<device name>
deviceLinkPath := fmt.Sprintf("/sys/class/net/%s/device", interfaceName)
pciId, err := getPciIdFromLink(deviceLinkPath)
if err != nil {
return "", errors.Wrapf(err, "cannot resolve pci device path for %s", interfaceName)
}
return pciId, nil
}
func GetNrHugepages() (int, error) {
nrHugepagesStr, err := os.ReadFile("/proc/sys/vm/nr_hugepages")
if err != nil {
return 0, errors.Wrapf(err, "Couldnt read /proc/sys/vm/nr_hugepages")
}
nrHugepages, err := strconv.ParseInt(strings.TrimSpace(string(nrHugepagesStr)), 10, 32)
if err != nil {
return 0, errors.Wrapf(err, "Couldnt parse nrHugepages: %v", err)
}
return int(nrHugepages), nil
}
func ParseKernelVersion(versionStr string) (ver *config.KernelVersion, err error) {
re := regexp.MustCompile(`([0-9]+)\.([0-9]+)\.([0-9]+)\-([0-9]+)`)
match := re.FindStringSubmatch(versionStr)
if len(match) != 5 {
return nil, errors.Errorf("Couldnt parse kernel version %s : %v", versionStr, match)
}
/* match[0] is the whole string */
kernel, err := strconv.ParseInt(match[1], 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "Couldnt parse kernel version: %v", err)
}
major, err := strconv.ParseInt(match[2], 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "Couldnt parse major version: %v", err)
}
minor, err := strconv.ParseInt(match[3], 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "Couldnt parse minor version: %v", err)
}
patch, err := strconv.ParseInt(match[4], 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "Couldnt parse patch version: %v", err)
}
ver = &config.KernelVersion{
Kernel: int(kernel),
Major: int(major),
Minor: int(minor),
Patch: int(patch),
}
return ver, nil
}
func GetOsKernelVersion() (ver *config.KernelVersion, err error) {
versionStr, err := os.ReadFile("/proc/sys/kernel/osrelease")
if err != nil {
return nil, errors.Wrapf(err, "Couldnt read /proc/sys/kernel/osrelease")
}
ver, err = ParseKernelVersion(strings.TrimSpace(string(versionStr)))
return ver, err
}
func SafeGetLink(interfaceName string) (link netlink.Link, err error) {
retries := 0
for {
link, err = netlink.LinkByName(interfaceName)
if err != nil {
retries += 1
if retries >= 20 {
return nil, errors.Wrapf(err, "Error finding link %s after %d tries", interfaceName, retries)
}
time.Sleep(500 * time.Millisecond)
} else {
return link, nil
}
}
}
func SafeSetInterfaceUpByName(interfaceName string) (link netlink.Link, err error) {
link, err = SafeGetLink(interfaceName)
if err != nil {
return nil, err
}
err = netlink.LinkSetUp(link)
if err != nil {
return nil, errors.Wrapf(err, "Error setting link %s back up", interfaceName)
}
return link, nil
}
func CycleHardwareAddr(hwAddr net.HardwareAddr, n uint8) net.HardwareAddr {
/* Cycle the last n bits of hwaddr
* Given n <= 8 */
hw := make([]byte, len(hwAddr))
copy(hw, hwAddr)
i := hw[len(hw)-1]
lmask := byte((1 << n) - 1) /* last n bits mask */
tmask := byte(0xff & (0xff << n)) /* top n bits mask */
nmask := byte(1 << (n - 1)) /* nth bit mask */
i = (i & tmask) | (((i & lmask) << 1) & lmask) | (i & nmask)
hw[len(hw)-1] = i
return hw
}
func RenameInterface(name, newName string) (err error) {
link, err := SafeGetLink(name)
if err != nil {
return errors.Wrapf(err, "error finding link %s", name)
}
isUp := (link.Attrs().Flags & net.FlagUp) != 0
if isUp {
if err = netlink.LinkSetDown(link); err != nil {
err2 := netlink.LinkSetUp(link)
return errors.Wrapf(err, "cannot set link %s down (err2 %s)", name, err2)
}
}
if err = netlink.LinkSetName(link, newName); err != nil {
err2 := netlink.LinkSetUp(link)
return errors.Wrapf(err, "cannot rename link %s to %s (err2 %s)", name, newName, err2)
}
if isUp {
err = netlink.LinkSetUp(link)
return errors.Wrapf(err, "cannot set link %s up", newName)
}
return nil
}
func NormalizeIP(in net.IP) net.IP {
if out := in.To4(); out != nil {
return out
}
return in.To16()
}
// IncrementIP returns the given IP + 1
func IncrementIP(ip net.IP) (result net.IP) {
ip = NormalizeIP(ip)
result = make([]byte, len(ip))
carry := true
for i := len(ip) - 1; i >= 0; i-- {
result[i] = ip[i]
if carry {
result[i]++
if result[i] != 0 {
carry = false
}
}
}
return
}
// DecrementIP returns the given IP + 1
func DecrementIP(ip net.IP) (result net.IP) {
ip = NormalizeIP(ip)
result = make([]byte, len(ip))
carry := true
for i := len(ip) - 1; i >= 0; i-- {
result[i] = ip[i]
if carry {
result[i]--
if result[i] != 0xff {
carry = false
}
}
}
return
}
// NetworkAddr returns the first address in the given network, or the network address.
func NetworkAddr(n *net.IPNet) net.IP {
network := make([]byte, len(n.IP))
for i := 0; i < len(n.IP); i++ {
network[i] = n.IP[i] & n.Mask[i]
}
return network
}
// BroadcastAddr returns the last address in the given network, or the broadcast address.
func BroadcastAddr(n *net.IPNet) net.IP {
broadcast := make([]byte, len(n.IP))
for i := 0; i < len(n.IP); i++ {
broadcast[i] = n.IP[i] | ^n.Mask[i]
}
return broadcast
}
func FetchNodeAnnotations(nodeName string) map[string]string {
clusterConfig, err := rest.InClusterConfig()
if err != nil {
return make(map[string]string)
}
k8sclient, err := kubernetes.NewForConfig(clusterConfig)
if err != nil {
return make(map[string]string)
}
ctx, cancel1 := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel1()
node, err := k8sclient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return make(map[string]string)
}
return node.Annotations
}
type timeAndPath struct {
path string
modTime time.Time
}
type timeAndPathSlice []timeAndPath
func (s timeAndPathSlice) Less(i, j int) bool { return s[i].modTime.After(s[j].modTime) }
func (s timeAndPathSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s timeAndPathSlice) Len() int { return len(s) }
// to avoid side effects we only check that the prefix match
func matchesCorePattern(fname, corePattern string) bool {
splits := strings.SplitN(corePattern, "%", 2)
return strings.HasPrefix(fname, splits[0])
}
func CleanupCoreFiles(corePattern string, maxCoreFiles int) error {
if corePattern == "" {
return nil
}
var timeAndPaths timeAndPathSlice = make([]timeAndPath, 0)
directory, err := os.Open(filepath.Dir(corePattern))
if err != nil {
return errors.Wrap(err, "walk errored")
}
infos, err := directory.Readdir(-1)
directory.Close()
if err != nil {
return errors.Wrap(err, "directory readdir errored")
}
for _, info := range infos {
if !info.IsDir() && matchesCorePattern(info.Name(), filepath.Base(corePattern)) {
timeAndPaths = append(timeAndPaths, timeAndPath{
filepath.Join(filepath.Dir(corePattern), info.Name()),
info.ModTime(),
})
}
}
// sort timeAndPaths by decreasing times
sort.Sort(timeAndPaths)
// we remove at most (2 * maxCoreFiles + 2) coredumps leaving the first maxCorefiles in place
for i := maxCoreFiles; i < len(timeAndPaths) && (i-maxCoreFiles < maxCoreFiles+2); i++ {
os.Remove(timeAndPaths[i].path)
}
if len(timeAndPaths) > 0 && maxCoreFiles > 0 {
PrintLastBackTrace(timeAndPaths[0].path)
}
return nil
}
func PrintLastBackTrace(coreFile string) {
if _, err := os.Stat("/usr/bin/gdb"); os.IsNotExist(err) {
log.Infof("Found previous coredump %s, missing gdb for stacktrace", coreFile)
} else {
log.Infof("Found previous coredump %s, trying to print stacktrace", coreFile)
cmd := exec.Command("/usr/bin/gdb", "-ex", "bt", "-ex", "q", "vpp", coreFile)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Start()
if err != nil {
log.Infof("gdb returned %s", err)
}
}
}
|
package reda
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00100102 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:reda.001.001.02 Document"`
Message *PriceReportV02 `xml:"reda.001.001.02"`
}
func (d *Document00100102) AddMessage() *PriceReportV02 {
d.Message = new(PriceReportV02)
return d.Message
}
// Scope
// The PriceReport message is sent by a report provider, eg, a fund accountant, transfer agent, market data provider, or any other interested party, to a report user, eg, a fund management company, a transfer agent, market data provider, regulator or any other interested party.
// This message is used to provide net asset value and price information for financial instruments on given trade dates and, optionally, to quote price variation information.
// Usage
// The PriceReport message can be used to:
// - report prices for one or several different financial instruments for one or several different trade dates,
// - report statistical information about the valuation of a financial instrument,
// - inform another party that the quotation of a financial instrument is suspended,
// - report prices that are used for other purposes than the execution of investment funds orders.
type PriceReportV02 struct {
// Collective reference identifying a set of messages.
PoolReference *iso20022.AdditionalReference3 `xml:"PoolRef,omitempty"`
// Reference to a linked message that was previously sent.
PreviousReference []*iso20022.AdditionalReference3 `xml:"PrvsRef,omitempty"`
// Reference to a linked message that was previously received.
RelatedReference *iso20022.AdditionalReference3 `xml:"RltdRef,omitempty"`
// Information related to the price valuation of a financial instrument.
PriceValuationDetails []*iso20022.PriceValuation2 `xml:"PricValtnDtls"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
Extension []*iso20022.Extension1 `xml:"Xtnsn,omitempty"`
}
func (p *PriceReportV02) AddPoolReference() *iso20022.AdditionalReference3 {
p.PoolReference = new(iso20022.AdditionalReference3)
return p.PoolReference
}
func (p *PriceReportV02) AddPreviousReference() *iso20022.AdditionalReference3 {
newValue := new(iso20022.AdditionalReference3)
p.PreviousReference = append(p.PreviousReference, newValue)
return newValue
}
func (p *PriceReportV02) AddRelatedReference() *iso20022.AdditionalReference3 {
p.RelatedReference = new(iso20022.AdditionalReference3)
return p.RelatedReference
}
func (p *PriceReportV02) AddPriceValuationDetails() *iso20022.PriceValuation2 {
newValue := new(iso20022.PriceValuation2)
p.PriceValuationDetails = append(p.PriceValuationDetails, newValue)
return newValue
}
func (p *PriceReportV02) AddExtension() *iso20022.Extension1 {
newValue := new(iso20022.Extension1)
p.Extension = append(p.Extension, newValue)
return newValue
}
|
package dao
import "github.com/darkarchana/darkarchana-backend/model"
// HeroesDao : Interface for Heroes DAO
type HeroesDao interface {
FindOne(model.DbOperate) (model.Heroes, error)
FindAll(model.DbOperate) ([]model.Heroes, error)
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importintotest
import (
"context"
"fmt"
"strconv"
"sync"
"testing"
"time"
"github.com/fsouza/fake-gcs-server/fakestorage"
"github.com/pingcap/tidb/br/pkg/lightning/config"
"github.com/pingcap/tidb/executor/asyncloaddata"
"github.com/pingcap/tidb/executor/importer"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
)
type expectedRecord struct {
jobID string
dataSource string
targetTable string
importMode string
createdBy string
jobState string
jobStatus string
sourceFileSize string
importedRowCnt string
resultCode string
resultMessage string
}
func (r *expectedRecord) checkIgnoreTimes(t *testing.T, row []interface{}) {
require.Equal(t, r.jobID, row[0])
require.Equal(t, r.dataSource, row[4])
require.Equal(t, r.targetTable, row[5])
require.Equal(t, r.importMode, row[6])
require.Equal(t, r.createdBy, row[7])
require.Equal(t, r.jobState, row[8])
require.Equal(t, r.jobStatus, row[9])
require.Equal(t, r.sourceFileSize, row[10])
require.Equal(t, r.importedRowCnt, row[11])
require.Equal(t, r.resultCode, row[12])
require.Equal(t, r.resultMessage, row[13])
}
func (r *expectedRecord) check(t *testing.T, row []interface{}) {
r.checkIgnoreTimes(t, row)
require.NotEmpty(t, row[1])
require.NotEmpty(t, row[2])
require.NotEmpty(t, row[3])
}
func (s *mockGCSSuite) simpleShowLoadDataJobs(importMode string) {
s.tk.MustExec("DROP DATABASE IF EXISTS test_show;")
s.tk.MustExec("CREATE DATABASE test_show;")
s.tk.MustExec("CREATE TABLE test_show.t (i INT PRIMARY KEY);")
s.server.CreateObject(fakestorage.Object{
ObjectAttrs: fakestorage.ObjectAttrs{
BucketName: "test-show",
Name: "t.tsv",
},
Content: []byte(`1
2`),
})
user := &auth.UserIdentity{
AuthUsername: "test-load-2",
AuthHostname: "test-host",
}
tk2 := testkit.NewTestKit(s.T(), s.store)
tk2.Session().GetSessionVars().User = user
backup := asyncloaddata.HeartBeatInSec
asyncloaddata.HeartBeatInSec = 1
s.T().Cleanup(func() {
asyncloaddata.HeartBeatInSec = backup
})
resultMessage := "Records: 2 Deleted: 0 Skipped: 0 Warnings: 0"
withOptions := "WITH thread=1, DETACHED"
if importMode == "physical" {
withOptions = "WITH thread=1, DETACHED, import_mode='PHYSICAL'"
}
sql := fmt.Sprintf(`LOAD DATA INFILE 'gs://test-show/t.tsv?endpoint=%s'
INTO TABLE test_show.t %s;`, gcsEndpoint, withOptions)
rows := tk2.MustQuery(sql).Rows()
require.Len(s.T(), rows, 1)
row := rows[0]
jobID := row[0].(string)
require.Eventually(s.T(), func() bool {
rows = tk2.MustQuery("SHOW LOAD DATA JOB " + jobID + ";").Rows()
require.Len(s.T(), rows, 1)
row = rows[0]
return row[9] == "finished"
}, 5*time.Second, time.Second)
r := expectedRecord{
jobID: jobID,
dataSource: "gs://test-show/t.tsv",
targetTable: "`test_show`.`t`",
importMode: importMode,
createdBy: "test-load-2@test-host",
jobState: "loading",
jobStatus: "finished",
sourceFileSize: "3B",
importedRowCnt: "2",
resultCode: "0",
resultMessage: resultMessage,
}
r.check(s.T(), row)
}
func (s *mockGCSSuite) TestSimpleShowLoadDataJobs() {
s.T().Skip("WITH detached is removed in LOAD DATA")
//s.simpleShowLoadDataJobs("physical)
s.simpleShowLoadDataJobs("logical")
user := &auth.UserIdentity{
AuthUsername: "test-load-2",
AuthHostname: "test-host",
}
backupUser := s.tk.Session().GetSessionVars().User
s.tk.Session().GetSessionVars().User = user
s.T().Cleanup(func() {
s.tk.Session().GetSessionVars().User = backupUser
})
err := s.tk.QueryToErr("SHOW LOAD DATA JOB 999999999")
require.ErrorContains(s.T(), err, "Job ID 999999999 doesn't exist")
sql := fmt.Sprintf(`LOAD DATA INFILE 'gs://test-show/t.tsv?endpoint=%s'
INTO TABLE test_show.t WITH thread=1, DETACHED;`, gcsEndpoint)
// repeat LOAD DATA, will get duplicate entry error
rows := s.tk.MustQuery(sql).Rows()
require.Len(s.T(), rows, 1)
row := rows[0]
jobID := row[0].(string)
require.Eventually(s.T(), func() bool {
rows = s.tk.MustQuery("SHOW LOAD DATA JOB " + jobID + ";").Rows()
require.Len(s.T(), rows, 1)
row = rows[0]
return row[9] == "failed"
}, 5*time.Second, time.Second)
r := expectedRecord{
jobID: jobID,
dataSource: "gs://test-show/t.tsv",
targetTable: "`test_show`.`t`",
importMode: "logical",
createdBy: "test-load-2@test-host",
jobState: "loading",
jobStatus: "failed",
sourceFileSize: "<nil>",
importedRowCnt: "<nil>",
resultCode: "1062",
resultMessage: "Duplicate entry '1' for key 't.PRIMARY'",
}
r.check(s.T(), row)
// test IGNORE
sql = fmt.Sprintf(`LOAD DATA INFILE 'gs://test-show/t.tsv?endpoint=%s'
IGNORE INTO TABLE test_show.t WITH thread=1, DETACHED;`, gcsEndpoint)
rows = s.tk.MustQuery(sql).Rows()
require.Len(s.T(), rows, 1)
row = rows[0]
jobID = row[0].(string)
require.Eventually(s.T(), func() bool {
rows = s.tk.MustQuery("SHOW LOAD DATA JOB " + jobID + ";").Rows()
require.Len(s.T(), rows, 1)
row = rows[0]
return row[9] == "finished"
}, 10*time.Second, time.Second)
r = expectedRecord{
jobID: jobID,
dataSource: "gs://test-show/t.tsv",
targetTable: "`test_show`.`t`",
importMode: "logical",
createdBy: "test-load-2@test-host",
jobState: "loading",
jobStatus: "finished",
sourceFileSize: "3B",
importedRowCnt: "2",
resultCode: "0",
resultMessage: "Records: 2 Deleted: 0 Skipped: 2 Warnings: 2",
}
r.check(s.T(), row)
// test REPLACE
sql = fmt.Sprintf(`LOAD DATA INFILE 'gs://test-show/t.tsv?endpoint=%s'
REPLACE INTO TABLE test_show.t WITH thread=1, DETACHED;`, gcsEndpoint)
rows = s.tk.MustQuery(sql).Rows()
require.Len(s.T(), rows, 1)
row = rows[0]
jobID = row[0].(string)
require.Eventually(s.T(), func() bool {
rows = s.tk.MustQuery("SHOW LOAD DATA JOB " + jobID + ";").Rows()
require.Len(s.T(), rows, 1)
row = rows[0]
return row[9] == "finished"
}, 10*time.Second, time.Second)
r.jobID = jobID
r.resultMessage = "Records: 2 Deleted: 0 Skipped: 0 Warnings: 0"
r.check(s.T(), row)
}
func (s *mockGCSSuite) TestInternalStatus() {
s.T().Skip("WITH detached is removed in LOAD DATA")
s.testInternalStatus("logical")
//s.testInternalStatus("physical)
}
func (s *mockGCSSuite) testInternalStatus(importMode string) {
s.tk.MustExec("DROP DATABASE IF EXISTS load_tsv;")
s.tk.MustExec("CREATE DATABASE load_tsv;")
s.tk.MustExec("CREATE TABLE load_tsv.t (i INT);")
s.server.CreateObject(fakestorage.Object{
ObjectAttrs: fakestorage.ObjectAttrs{
BucketName: "test-tsv",
Name: "t1.tsv",
},
Content: []byte(`1`),
})
s.server.CreateObject(fakestorage.Object{
ObjectAttrs: fakestorage.ObjectAttrs{
BucketName: "test-tsv",
Name: "t2.tsv",
},
Content: []byte(`2`),
})
ctx := context.Background()
user := &auth.UserIdentity{
AuthUsername: "test-load",
AuthHostname: "test-host",
}
tk3 := testkit.NewTestKit(s.T(), s.store)
tk3.Session().GetSessionVars().User = user
resultMessage := "Records: 2 Deleted: 0 Skipped: 0 Warnings: 0"
withOptions := "WITH thread=1, DETACHED, batch_size=1"
progressAfterFirstBatch := `{"SourceFileSize":2,"LoadedFileSize":1,"LoadedRowCnt":1}`
progressAfterAll := `{"SourceFileSize":2,"LoadedFileSize":2,"LoadedRowCnt":2}`
if importMode == "physical" {
withOptions = fmt.Sprintf("WITH thread=1, DETACHED, import_mode='%s'", importMode)
progressAfterFirstBatch = `{"SourceFileSize":2,"ReadRowCnt":1,"EncodeFileSize":1,"LoadedRowCnt":1}`
progressAfterAll = `{"SourceFileSize":2,"ReadRowCnt":2,"EncodeFileSize":2,"LoadedRowCnt":2}`
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
tk2 := testkit.NewTestKit(s.T(), s.store)
tk2.Session().GetSessionVars().User = user
userStr := tk2.Session().GetSessionVars().User.String()
// wait for the load data job to be created
<-asyncloaddata.TestSyncCh
id := asyncloaddata.TestLastLoadDataJobID.Load()
expected := &asyncloaddata.JobInfo{
JobID: id,
User: "test-load@test-host",
DataSource: "gs://test-tsv/t*.tsv",
TableSchema: "load_tsv",
TableName: "t",
ImportMode: importMode,
Progress: "",
Status: asyncloaddata.JobPending,
StatusMessage: "",
}
rows := tk2.MustQuery(fmt.Sprintf("SHOW LOAD DATA JOB %d;", id)).Rows()
require.Len(s.T(), rows, 1)
row := rows[0]
r := expectedRecord{
jobID: strconv.Itoa(int(id)),
dataSource: "gs://test-tsv/t*.tsv",
targetTable: "`load_tsv`.`t`",
importMode: importMode,
createdBy: "test-load@test-host",
jobState: "loading",
jobStatus: "pending",
sourceFileSize: "<nil>",
importedRowCnt: "<nil>",
resultCode: "<nil>",
resultMessage: "",
}
r.checkIgnoreTimes(s.T(), row)
// resume the load data job
asyncloaddata.TestSyncCh <- struct{}{}
// wait for the load data job to be started
<-asyncloaddata.TestSyncCh
job := &asyncloaddata.Job{
ID: id,
Conn: tk2.Session(),
User: userStr,
}
info, err := job.GetJobInfo(ctx)
require.NoError(s.T(), err)
expected.CreateTime = info.CreateTime
expected.StartTime = info.StartTime
expected.EndTime = info.EndTime
expected.Status = asyncloaddata.JobRunning
require.Equal(s.T(), expected, info)
rows = tk2.MustQuery(fmt.Sprintf("SHOW LOAD DATA JOB %d;", id)).Rows()
require.Len(s.T(), rows, 1)
row = rows[0]
r.jobStatus = "running"
r.checkIgnoreTimes(s.T(), row)
// resume the load data job
asyncloaddata.TestSyncCh <- struct{}{}
// wait for the first task to be committed
<-importer.TestSyncCh
// wait for UpdateJobProgress
require.Eventually(s.T(), func() bool {
info, err = job.GetJobInfo(ctx)
if err != nil {
return false
}
return info.Progress == progressAfterFirstBatch
}, 6*time.Second, time.Millisecond*100)
info, err = job.GetJobInfo(ctx)
require.NoError(s.T(), err)
expected.Progress = progressAfterFirstBatch
require.Equal(s.T(), expected, info)
rows = tk2.MustQuery(fmt.Sprintf("SHOW LOAD DATA JOB %d;", id)).Rows()
require.Len(s.T(), rows, 1)
row = rows[0]
r.sourceFileSize = "2B"
r.importedRowCnt = "1"
r.checkIgnoreTimes(s.T(), row)
// resume the load data job
importer.TestSyncCh <- struct{}{}
// wait for the second task to be committed
<-importer.TestSyncCh
// wait for UpdateJobProgress
require.Eventually(s.T(), func() bool {
info, err = job.GetJobInfo(ctx)
if err != nil {
return false
}
return info.Progress == progressAfterAll
}, 6*time.Second, time.Millisecond*100)
rows = tk2.MustQuery(fmt.Sprintf("SHOW LOAD DATA JOB %d;", id)).Rows()
require.Len(s.T(), rows, 1)
row = rows[0]
r.importedRowCnt = "2"
r.checkIgnoreTimes(s.T(), row)
// resume the load data job
importer.TestSyncCh <- struct{}{}
require.Eventually(s.T(), func() bool {
info, err = job.GetJobInfo(ctx)
if err != nil {
return false
}
return info.Status == asyncloaddata.JobFinished
}, 6*time.Second, 100*time.Millisecond)
info, err = job.GetJobInfo(ctx)
require.NoError(s.T(), err)
expected.Status = asyncloaddata.JobFinished
expected.EndTime = info.EndTime
expected.StatusMessage = resultMessage
expected.Progress = progressAfterAll
require.Equal(s.T(), expected, info)
rows = tk2.MustQuery(fmt.Sprintf("SHOW LOAD DATA JOB %d;", id)).Rows()
require.Len(s.T(), rows, 1)
row = rows[0]
r.jobStatus = "finished"
r.resultCode = "0"
r.resultMessage = resultMessage
r.checkIgnoreTimes(s.T(), row)
}()
backup := asyncloaddata.HeartBeatInSec
asyncloaddata.HeartBeatInSec = 1
s.T().Cleanup(func() {
asyncloaddata.HeartBeatInSec = backup
})
backup2 := importer.LoadDataReadBlockSize
importer.LoadDataReadBlockSize = 1
s.T().Cleanup(func() {
importer.LoadDataReadBlockSize = backup2
})
backup3 := config.BufferSizeScale
config.BufferSizeScale = 1
s.T().Cleanup(func() {
config.BufferSizeScale = backup3
})
backup4 := config.DefaultBatchSize
config.DefaultBatchSize = 1
s.T().Cleanup(func() {
config.DefaultBatchSize = backup4
})
s.enableFailpoint("github.com/pingcap/tidb/executor/asyncloaddata/SaveLastLoadDataJobID", `return`)
s.enableFailpoint("github.com/pingcap/tidb/executor/asyncloaddata/SyncAfterCreateLoadDataJob", `return`)
s.enableFailpoint("github.com/pingcap/tidb/executor/asyncloaddata/SyncAfterStartJob", `return`)
if importMode == "logical" {
s.enableFailpoint("github.com/pingcap/tidb/executor/SyncAfterCommitOneTask", `return`)
} else {
s.enableFailpoint("github.com/pingcap/tidb/executor/importer/SyncAfterImportDataEngine", `return`)
}
sql := fmt.Sprintf(`LOAD DATA INFILE 'gs://test-tsv/t*.tsv?endpoint=%s'
INTO TABLE load_tsv.t %s;`, gcsEndpoint, withOptions)
tk3.MustQuery(sql)
wg.Wait()
}
|
package shipping_details
import (
shippingDetails "Pinjem/businesses/shipping_details"
"context"
"gorm.io/gorm"
)
type ShippingDetailRepository struct {
Conn *gorm.DB
}
func NewShippingDetailRepository(conn *gorm.DB) shippingDetails.DomainRepository {
return &ShippingDetailRepository{Conn: conn}
}
func (b *ShippingDetailRepository) GetAll(ctx context.Context) ([]shippingDetails.Domain, error) {
var shippingDetailsModel []ShippingDetails
if err := b.Conn.Find(&shippingDetailsModel).Error; err != nil {
return nil, err
}
var result []shippingDetails.Domain
result = ToListDomain(shippingDetailsModel)
return result, nil
}
func (b *ShippingDetailRepository) GetByOrderId(ctx context.Context, orderId uint) (shippingDetails.Domain, error) {
var shippingDetail ShippingDetails
if err := b.Conn.Where("order_id = ?", orderId).Find(&shippingDetail).Error; err != nil {
return shippingDetails.Domain{}, err
}
var result shippingDetails.Domain
result = shippingDetail.ToDomain()
return result, nil
}
func (b *ShippingDetailRepository) GetById(ctx context.Context, id uint) (shippingDetails.Domain, error) {
var shippingDetail ShippingDetails
if err := b.Conn.Where("id = ?", id).First(&shippingDetail).Error; err != nil {
return shippingDetails.Domain{}, err
}
return shippingDetail.ToDomain(), nil
}
func (b *ShippingDetailRepository) Create(ctx context.Context, shippingDetail shippingDetails.Domain) (shippingDetails.Domain, error) {
createdShippingDetail := FromDomain(shippingDetail)
createdShippingDetail.BeforeCreate()
err := b.Conn.Create(&createdShippingDetail).Error
if err != nil {
return shippingDetails.Domain{}, err
}
return createdShippingDetail.ToDomain(), nil
}
func (b *ShippingDetailRepository) Delete(ctx context.Context, id uint) error {
var shippingDetail ShippingDetails
if err := b.Conn.Where("id = ?", id).Delete(&shippingDetail).Error; err != nil {
return err
}
return nil
}
func (b *ShippingDetailRepository) DeleteByOrderId(ctx context.Context, orderId uint) error {
var order ShippingDetails
if err := b.Conn.Where("order_id = ?", orderId).Delete(&order).Error; err != nil {
return err
}
return nil
}
// func (b *ShippingDetailRepository) Update(user *User) error {
// return b.Conn.Save(user).Error
// }
|
package db
import (
"fmt"
"github.com/jinzhu/gorm"
"github.com/spf13/viper"
_ "gorm.io/driver/mysql"
)
var Client *gorm.DB
type config struct {
host string
port string
user string
dbname string
password string
sslMode string
}
func Connect() error {
var err error
c := loadConfig()
dsn := fmt.Sprintf(
"%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True&loc=Local",
c.user, c.password, c.host+":"+c.port, c.dbname)
Client, err = gorm.Open("mysql", dsn)
return err
}
func loadConfig() *config {
return &config{
host: viper.GetString("db.host"),
port: viper.GetString("db.port"),
user: viper.GetString("db.user"),
dbname: viper.GetString("db.dbname"),
password: viper.GetString("db.password"),
sslMode: viper.GetString("db.sslMode"),
}
}
|
package dandler
import (
"fmt"
"log"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCanonicalHost(t *testing.T) {
testdata := []struct {
options int
url string
expectedStatus int
expectedHost string
expectedPort string
expectedScheme string
}{
{ // test 0
options: 0,
url: "",
expectedStatus: 200,
expectedHost: "",
expectedPort: "",
expectedScheme: "",
}, { // test 1
options: ForceHost,
url: "desthost.com",
expectedStatus: http.StatusPermanentRedirect,
expectedHost: "desthost.com",
expectedPort: "",
expectedScheme: "",
}, { // test 2
options: ForcePort,
url: "127.0.0.1:1234",
expectedStatus: http.StatusPermanentRedirect,
expectedHost: "",
expectedPort: "1234",
expectedScheme: "",
}, { // test 3
options: ForceHTTP,
url: "desthost.com",
expectedStatus: 200,
expectedHost: "",
expectedPort: "",
expectedScheme: "",
}, { // test 4
options: ForceHTTPS,
url: "desthost.com",
expectedStatus: http.StatusPermanentRedirect,
expectedHost: "",
expectedPort: "",
expectedScheme: "https",
}, { // test 5
options: ForceHost | ForcePort,
url: "desthost.com:1234",
expectedStatus: http.StatusPermanentRedirect,
expectedHost: "desthost.com",
expectedPort: "1234",
expectedScheme: "",
}, { // test 6
options: ForceHTTPS | ForceTemporary,
url: "",
expectedStatus: http.StatusTemporaryRedirect,
expectedHost: "",
expectedPort: "",
expectedScheme: "https",
}, { // test 7
options: ForceHost,
url: "127.0.0.1",
expectedStatus: 200,
expectedHost: "",
expectedPort: "",
expectedScheme: "",
}, { // test 8
options: ForceHTTP,
url: "",
expectedStatus: 200,
expectedHost: "",
expectedPort: "",
expectedScheme: "http",
},
}
child := Success("child")
for id, test := range testdata {
t.Run(fmt.Sprintf("canonical test %d", id), func(t *testing.T) {
// t.Parallel()
c := http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
// create the server for the parallel test
ts := httptest.NewServer(CanonicalHost(test.url, test.options, child))
resp, err := c.Get(ts.URL)
if !assert.NoError(t, err) {
log.Println(err)
t.FailNow()
}
// verify teh response code
assert.Equal(t, test.expectedStatus, resp.StatusCode)
// if it's a redirect, check the stuff
var respURL *url.URL
if resp.StatusCode == 307 || resp.StatusCode == 308 {
respURL, err = url.Parse(resp.Header.Get("Location"))
assert.Nil(t, err)
if test.expectedScheme != "" {
assert.Equal(t, test.expectedScheme, respURL.Scheme)
}
if test.expectedHost != "" || test.expectedPort != "" {
// split the host up
var host, port string
if strings.Contains(respURL.Host, ":") {
host = strings.Split(respURL.Host, ":")[0]
port = strings.Split(respURL.Host, ":")[1]
} else {
host = respURL.Host
if respURL.Scheme == "http" {
port = "80"
} else {
port = "443"
}
}
if test.expectedHost != "" {
assert.Equal(t, test.expectedHost, host)
}
if test.expectedPort != "" {
assert.Equal(t, test.expectedPort, port)
}
}
}
})
}
}
|
package router
import (
"bankBigData/_public/app"
"gitee.com/johng/gf/g/net/ghttp"
)
func Index(r *ghttp.Request) {
r.Response.WriteJson(app.Response{
Data: "API running",
Status: app.Status{
Code: 0,
Error: true,
Msg: "URL参数不正确",
},
})
}
func Status_500(r *ghttp.Request) {
r.Response.WriteJson(app.Response{
Data: nil,
Status: app.Status{
Code: 0,
Error: true,
Msg: "服务异常,请重试",
},
})
}
|
package helpers
import (
"newproject/env"
"time"
"github.com/dgrijalva/jwt-go"
)
//Claims defines bse claims
type Claims struct {
Username string `json:"username"`
jwt.StandardClaims
}
// ObtainToken generates new token for user
func ObtainToken(username string) (string, error) {
appKey := env.GetVariable("APP_KEY")
expirationTime := time.Now().Add(60 * time.Minute)
claims := &Claims{
Username: username,
StandardClaims: jwt.StandardClaims{
ExpiresAt: expirationTime.Unix(),
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
return token.SignedString([]byte(appKey))
}
//VerifyToken if is valid
func VerifyToken(tokenString string) (Claims, bool, error) {
claims := Claims{}
appKey := env.GetVariable("APP_KEY")
token, err := jwt.ParseWithClaims(tokenString, &claims, func(tkn *jwt.Token) (interface{}, error) {
return []byte(appKey), nil
})
if err != nil {
return claims, false, err
}
return claims, token.Valid, nil
}
|
package midtrans
import (
"crypto/sha512"
"fmt"
"io"
"time"
"github.com/imrenagi/go-payment"
)
// TransactionStatus is object used to store notification from midtrans
type TransactionStatus struct {
ID uint64 `json:"id" gorm:"primary_key"`
CreatedAt time.Time `json:"created_at" gorm:"not null;"`
UpdatedAt time.Time `json:"updated_at" gorm:"not null;"`
StatusCode string `json:"status_code" gorm:"not null"`
StatusMessage string `json:"status_message" gorm:"type:text;not null"`
SignKey string `json:"signature_key" gorm:"type:text;column:signature_key;not null"`
Bank string `json:"bank"`
FraudStatus string `json:"fraud_status" gorm:"not null"`
PaymentType string `json:"payment_type" gorm:"not null"`
OrderID string `json:"order_id" gorm:"not null;unique_index:order_id_k"`
TransactionID string `json:"transaction_id" gorm:"not null;unique_index:transaction_id_k"`
TransactionTime time.Time `json:"-" gorm:"not null"`
TransactionStatus string `json:"transaction_status" gorm:"not null"`
GrossAmount string `json:"gross_amount" gorm:"not null"`
MaskedCard string `json:"masked_card"`
Currency string `json:"currency" gorm:"not null"`
CardType string `json:"card_type"`
ChannelResponseCode string `json:"channel_response_code" gorm:"not null"`
ChannelResponseMessage string `json:"channel_response_message"`
ApprovalCode string `json:"approval_code"`
}
// TableName returns the gorm table name
func (TransactionStatus) TableName() string {
return "midtrans_transaction_status"
}
// IsValid checks whether the status sent is indeed sent by midtrans by validating the
// data against its authentication key.
// See https://snap-docs.midtrans.com/#handling-notifications
func (m TransactionStatus) IsValid(authKey string) error {
key := fmt.Sprintf("%s%s%s%s", m.OrderID, m.StatusCode, m.GrossAmount, authKey)
h512 := sha512.New()
io.WriteString(h512, key)
if fmt.Sprintf("%x", h512.Sum(nil)) != m.SignKey {
return fmt.Errorf("%w: Invalid sign key", payment.ErrBadRequest)
}
return nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.