_id stringlengths 2 7 | title stringlengths 1 118 | partition stringclasses 3 values | text stringlengths 52 85.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q18200 | WriteTo | train | func (hdr *rpmHeader) WriteTo(outfile io.Writer, regionTag int) error {
if regionTag != 0 && regionTag >= RPMTAG_HEADERREGIONS {
return errors.New("invalid region tag")
}
// sort tags
var keys []int
for k := range hdr.entries {
if k < RPMTAG_HEADERREGIONS {
// discard existing regions
continue
}
keys = append(keys, k)
}
sort.Ints(keys)
entries := bytes.NewBuffer(make([]byte, 0, 16*len(keys)))
blobs := bytes.NewBuffer(make([]byte, 0, hdr.origSize))
for _, k := range keys {
if k == regionTag {
continue
}
if err := writeTag(entries, blobs, k, hdr.entries[k]); err != nil {
return err
}
}
intro := headerIntro{
Magic: introMagic,
Reserved: 0,
Entries: uint32(len(keys) + 1),
Size: uint32(blobs.Len() + 16),
}
if err := binary.Write(outfile, binary.BigEndian, &intro); err != nil {
return err
}
if err := writeRegion(outfile, blobs, regionTag, len(keys)); err != nil {
return err
}
totalSize := 96 + blobs.Len() + entries.Len()
if _, err := io.Copy(outfile, entries); err != nil {
return err
}
if _, err := io.Copy(outfile, blobs); err != nil {
return err
}
if regionTag == RPMTAG_HEADERSIGNATURES {
alignment := totalSize % 8
if alignment != 0 {
outfile.Write(make([]byte, 8-alignment))
}
}
return nil
} | go | {
"resource": ""
} |
q18201 | DumpSignatureHeader | train | func (hdr *RpmHeader) DumpSignatureHeader(sameSize bool) ([]byte, error) {
if len(hdr.lead) != 96 {
return nil, errors.New("invalid or missing RPM lead")
}
sigh := hdr.sigHeader
regionTag := RPMTAG_HEADERSIGNATURES
delete(sigh.entries, SIG_RESERVEDSPACE-_SIGHEADER_TAG_BASE)
if sameSize {
needed, err := sigh.size(regionTag)
if err != nil {
return nil, err
}
available := uint64(sigh.origSize)
if needed+16 <= available {
// Fill unused space with a RESERVEDSPACE tag
padding := make([]byte, available-needed-16)
sigh.entries[SIG_RESERVEDSPACE-_SIGHEADER_TAG_BASE] = entry{
dataType: RPM_BIN_TYPE,
count: int32(len(padding)),
contents: padding,
}
}
}
buf := new(bytes.Buffer)
buf.Write(hdr.lead)
if err := sigh.WriteTo(buf, regionTag); err != nil {
return nil, err
}
return buf.Bytes(), nil
} | go | {
"resource": ""
} |
q18202 | Encode | train | func (t Token) Encode() string {
bs, _ := t.MarshalText()
return string(bs)
} | go | {
"resource": ""
} |
q18203 | MarshalText | train | func (t Token) MarshalText() ([]byte, error) {
number := uint64(t)
var chars []byte
if number == 0 {
return chars, nil
}
for number > 0 {
result := number / base62Len
remainder := number % base62Len
chars = append(chars, Base62[remainder])
number = result
}
for i, j := 0, len(chars)-1; i < j; i, j = i+1, j-1 {
chars[i], chars[j] = chars[j], chars[i]
}
return chars, nil
} | go | {
"resource": ""
} |
q18204 | Decode | train | func Decode(token string) (Token, error) {
var t Token
err := (&t).UnmarshalText([]byte(token))
return t, err
} | go | {
"resource": ""
} |
q18205 | maxHashInt | train | func maxHashInt(length int) uint64 {
return uint64(math.Max(0, math.Min(math.MaxUint64, math.Pow(float64(base62Len), float64(length)))))
} | go | {
"resource": ""
} |
q18206 | New | train | func New() *Consumer {
return &Consumer{
Logger: nsqlog.Logger,
LogLevel: nsqlog.LogLevel,
handlers: make(map[topicChan]*queue),
}
} | go | {
"resource": ""
} |
q18207 | Connect | train | func (c *Consumer) Connect(addrs ...string) error {
for _, q := range c.handlers {
for _, addr := range addrs {
if err := q.ConnectToNSQD(addr); err != nil {
return err
}
}
}
return nil
} | go | {
"resource": ""
} |
q18208 | ConnectLookupd | train | func (c *Consumer) ConnectLookupd(addrs ...string) error {
for _, q := range c.handlers {
for _, addr := range addrs {
if err := q.ConnectToNSQLookupd(addr); err != nil {
return err
}
}
}
return nil
} | go | {
"resource": ""
} |
q18209 | Start | train | func (c *Consumer) Start(debug bool) error {
if debug {
for i := range c.handlers {
log.Printf("Handler: topic=%s channel=%s\n", i.topic, i.channel)
}
}
<-make(chan bool)
return nil
} | go | {
"resource": ""
} |
q18210 | WithMessage | train | func WithMessage(ctx context.Context, msg *Message) context.Context {
return context.WithValue(ctx, msgkey, msg)
} | go | {
"resource": ""
} |
q18211 | MessageFromContext | train | func MessageFromContext(ctx context.Context) (*Message, bool) {
value, ok := ctx.Value(msgkey).(*Message)
return value, ok
} | go | {
"resource": ""
} |
q18212 | Finish | train | func (m *Message) Finish(success bool) {
if success {
m.Message.Finish()
} else {
m.Message.Requeue(-1)
}
} | go | {
"resource": ""
} |
q18213 | ReadJSON | train | func (m *Message) ReadJSON(v interface{}) error {
return json.Unmarshal(m.Body, v)
} | go | {
"resource": ""
} |
q18214 | Publish | train | func Publish(topic string, body []byte) error {
return DefaultProducer.Publish(topic, body)
} | go | {
"resource": ""
} |
q18215 | PublishAsync | train | func PublishAsync(topic string, body []byte, doneChan chan *nsq.ProducerTransaction, args ...interface{}) error {
return DefaultProducer.PublishAsync(topic, body, doneChan, args...)
} | go | {
"resource": ""
} |
q18216 | MultiPublish | train | func MultiPublish(topic string, body [][]byte) error {
return DefaultProducer.MultiPublish(topic, body)
} | go | {
"resource": ""
} |
q18217 | ConnectConfig | train | func ConnectConfig(addr string, config *nsq.Config) error {
return DefaultProducer.ConnectConfig(addr, config)
} | go | {
"resource": ""
} |
q18218 | Connect | train | func (p *Producer) Connect(addr string) (err error) {
return p.ConnectConfig(addr, nsq.NewConfig())
} | go | {
"resource": ""
} |
q18219 | ConnectConfig | train | func (p *Producer) ConnectConfig(addr string, config *nsq.Config) (err error) {
p.Producer, err = nsq.NewProducer(addr, config)
p.Producer.SetLogger(p.Logger, p.LogLevel)
return
} | go | {
"resource": ""
} |
q18220 | PublishJSON | train | func (p *Producer) PublishJSON(topic string, v interface{}) error {
body, err := json.Marshal(v)
if err != nil {
return err
}
return p.Publish(topic, body)
} | go | {
"resource": ""
} |
q18221 | Get | train | func (s *Crd) Get(resource schema.GroupKind) (*apiext_v1b1.CustomResourceDefinition, error) {
objs, err := s.byIndex(byGroupKindIndexName, byGroupKindIndexKey(resource.Group, resource.Kind))
if err != nil {
return nil, err
}
switch len(objs) {
case 0:
return nil, nil
case 1:
crd := objs[0].(*apiext_v1b1.CustomResourceDefinition).DeepCopy()
// Objects from type-specific informers don't have GVK set
crd.Kind = "CustomResourceDefinition"
crd.APIVersion = apiext_v1b1.SchemeGroupVersion.String()
return crd, nil
default:
// Must never happen
panic(errors.Errorf("multiple CRDs by group %q and kind %q: %s", resource.Group, resource.Kind, objs))
}
} | go | {
"resource": ""
} |
q18222 | AddInformer | train | func (s *Multi) AddInformer(gvk schema.GroupVersionKind, informer cache.SharedIndexInformer) error {
s.mx.Lock()
defer s.mx.Unlock()
if _, ok := s.informers[gvk]; ok {
return errors.New("informer is already registered")
}
f := informer.GetIndexer().GetIndexers()[ByNamespaceAndControllerUIDIndex]
if f == nil {
// Informer does not have this index yet i.e. this is the first/sole multistore it is added to.
err := informer.AddIndexers(cache.Indexers{
ByNamespaceAndControllerUIDIndex: byNamespaceAndControllerUIDIndex,
})
if err != nil {
return errors.WithStack(err)
}
}
s.informers[gvk] = informer
return nil
} | go | {
"resource": ""
} |
q18223 | HashConfigMap | train | func HashConfigMap(configMap *core_v1.ConfigMap, h hash.Hash, filter sets.String) bool {
keys := make([]string, 0, len(configMap.Data))
search := sets.NewString(filter.UnsortedList()...)
for k := range configMap.Data {
if filter.Len() == 0 || filter.Has(k) {
keys = append(keys, k)
}
}
for k := range configMap.BinaryData {
if filter.Len() == 0 || filter.Has(k) {
keys = append(keys, k)
}
}
search.Delete(keys...)
if search.Len() != 0 {
// not all the provided keys in filter were found
return false
}
sort.Strings(keys)
for _, k := range keys {
io.WriteString(h, k) // nolint: gosec, errcheck
h.Write([]byte{0}) // nolint: gosec, errcheck
// The key is either in Data or BinaryData
data, inData := configMap.Data[k]
if inData {
io.WriteString(h, data) // nolint: gosec, errcheck
} else {
binaryData := configMap.BinaryData[k]
h.Write(binaryData) // nolint: gosec, errcheck
}
h.Write([]byte{0}) // nolint: gosec, errcheck
}
return true
} | go | {
"resource": ""
} |
q18224 | HashSecret | train | func HashSecret(secret *core_v1.Secret, h hash.Hash, filter sets.String) bool {
keys := make([]string, 0, len(secret.Data))
search := sets.NewString(filter.UnsortedList()...)
for k := range secret.Data {
if filter.Len() == 0 || filter.Has(k) {
keys = append(keys, k)
}
}
search.Delete(keys...)
if search.Len() != 0 {
// not all the provided keys in filter were found
return false
}
sort.Strings(keys)
for _, k := range keys {
io.WriteString(h, k) // nolint: gosec, errcheck
h.Write([]byte{0}) // nolint: gosec, errcheck
h.Write(secret.Data[k]) // nolint: gosec, errcheck
h.Write([]byte{0}) // nolint: gosec, errcheck
}
return true
} | go | {
"resource": ""
} |
q18225 | newBundles | train | func newBundles(c *SmithV1Client, namespace string) *bundles {
return &bundles{
client: c.RESTClient(),
ns: namespace,
}
} | go | {
"resource": ""
} |
q18226 | IsCrdConditionTrue | train | func IsCrdConditionTrue(crd *apiext_v1b1.CustomResourceDefinition, conditionType apiext_v1b1.CustomResourceDefinitionConditionType) bool {
return IsCrdConditionPresentAndEqual(crd, conditionType, apiext_v1b1.ConditionTrue)
} | go | {
"resource": ""
} |
q18227 | AddInformer | train | func (s *MultiBasic) AddInformer(gvk schema.GroupVersionKind, informer cache.SharedIndexInformer) error {
s.mx.Lock()
defer s.mx.Unlock()
if _, ok := s.informers[gvk]; ok {
return errors.New("informer is already registered")
}
s.informers[gvk] = informer
return nil
} | go | {
"resource": ""
} |
q18228 | GetInformers | train | func (s *MultiBasic) GetInformers() map[schema.GroupVersionKind]cache.SharedIndexInformer {
s.mx.RLock()
defer s.mx.RUnlock()
informers := make(map[schema.GroupVersionKind]cache.SharedIndexInformer, len(s.informers))
for gvk, inf := range s.informers {
informers[gvk] = inf
}
return informers
} | go | {
"resource": ""
} |
q18229 | Get | train | func (s *MultiBasic) Get(gvk schema.GroupVersionKind, namespace, name string) (obj runtime.Object, exists bool, e error) {
var informer cache.SharedIndexInformer
func() {
s.mx.RLock()
defer s.mx.RUnlock()
informer = s.informers[gvk]
}()
if informer == nil {
return nil, false, errors.Errorf("no informer for %s is registered", gvk)
}
return s.getFromIndexer(informer.GetIndexer(), gvk, namespace, name)
} | go | {
"resource": ""
} |
q18230 | BeforeCreate | train | func (c *Checker) BeforeCreate(logger *zap.Logger, spec *unstructured.Unstructured) (*unstructured.Unstructured /*updatedSpec*/, error) {
processor, ok := c.KnownTypes[spec.GroupVersionKind().GroupKind()]
if !ok {
return spec, nil
}
ctx := &Context{
Logger: logger,
Store: c.Store,
}
updatedSpec, err := processor.BeforeCreate(ctx, spec)
if err != nil {
return nil, errors.Wrap(err, "failed to pre-process object specification")
}
return util.RuntimeToUnstructured(updatedSpec)
} | go | {
"resource": ""
} |
q18231 | setSecretParametersChecksumAnnotation | train | func (s serviceInstance) setSecretParametersChecksumAnnotation(ctx *specchecker.Context, spec, actual *sc_v1b1.ServiceInstance) error {
if spec.Annotations[SecretParametersChecksumAnnotation] == Disabled {
return nil
}
var previousEncodedChecksum string
var updateCount int64
if actual != nil {
previousEncodedChecksum = actual.Annotations[SecretParametersChecksumAnnotation]
updateCount = actual.Spec.UpdateRequests
}
checkSum, err := s.calculateNewServiceInstanceCheckSum(ctx, spec)
if err != nil {
return errors.Wrap(err, "failed to generate new checksum")
}
if actual != nil && checkSum != previousEncodedChecksum {
spec.Spec.UpdateRequests = updateCount + 1
}
s.setInstanceAnnotation(spec, checkSum)
return nil
} | go | {
"resource": ""
} |
q18232 | setLastAppliedReplicasAnnotation | train | func (deployment) setLastAppliedReplicasAnnotation(ctx *specchecker.Context, spec, actual *apps_v1.Deployment) {
if spec.Annotations[LastAppliedReplicasAnnotation] == Disabled {
return
}
if spec.Spec.Replicas == nil {
var one int32 = 1
spec.Spec.Replicas = &one
}
specReplicas := *spec.Spec.Replicas
if actual == nil {
// add LastAppliedReplicas annotation if it doesn't exist
spec.Annotations[LastAppliedReplicasAnnotation] = strconv.Itoa(int(specReplicas))
return
}
lastAppliedReplicasConf, ok := actual.Annotations[LastAppliedReplicasAnnotation]
if !ok {
// add LastAppliedReplicas annotation if it doesn't exist
spec.Annotations[LastAppliedReplicasAnnotation] = strconv.Itoa(int(specReplicas))
return
}
// Parse last applied replicas from running config's annotation
// overrides with current replicas inside spec if parsing failure
lastAppliedReplicas, err := strconv.Atoi(strings.TrimSpace(lastAppliedReplicasConf))
if err != nil {
ctx.Logger.Warn("Overriding last applied replicas annotation due to parsing failure", zap.Error(err))
spec.Annotations[LastAppliedReplicasAnnotation] = strconv.Itoa(int(specReplicas))
return
}
if specReplicas == int32(lastAppliedReplicas) {
// spec not changed => use actual running config if it exists
// since it might be updated by other controller like HPA
// otherwise use spec replicas config
if actual.Spec.Replicas != nil {
*spec.Spec.Replicas = *actual.Spec.Replicas
}
} else {
// spec changed => update annotations and use spec replicas config
spec.Annotations[LastAppliedReplicasAnnotation] = strconv.Itoa(int(specReplicas))
}
} | go | {
"resource": ""
} |
q18233 | SmithV1 | train | func (c *Clientset) SmithV1() smithv1.SmithV1Interface {
return &fakesmithv1.FakeSmithV1{Fake: &c.Fake}
} | go | {
"resource": ""
} |
q18234 | Smith | train | func (c *Clientset) Smith() smithv1.SmithV1Interface {
return &fakesmithv1.FakeSmithV1{Fake: &c.Fake}
} | go | {
"resource": ""
} |
q18235 | setEmptyFieldsFromActual | train | func setEmptyFieldsFromActual(requested, actual interface{}, fields ...string) error {
requestedValue := reflect.ValueOf(requested).Elem()
actualValue := reflect.ValueOf(actual).Elem()
if requestedValue.Type() != actualValue.Type() {
return errors.Errorf("attempted to set fields from different types: %q from %q",
requestedValue, actualValue)
}
for _, field := range fields {
requestedField := requestedValue.FieldByName(field)
if !requestedField.IsValid() {
return errors.Errorf("no such field %q to cleanup", field)
}
actualField := actualValue.FieldByName(field)
if !actualField.IsValid() {
return errors.Errorf("no such field %q to cleanup", field)
}
if reflect.DeepEqual(requestedField.Interface(), reflect.Zero(requestedField.Type()).Interface()) {
requestedField.Set(actualField)
}
}
return nil
} | go | {
"resource": ""
} |
q18236 | Prepare | train | func (c *Controller) Prepare(crdInf cache.SharedIndexInformer, resourceInfs map[schema.GroupVersionKind]cache.SharedIndexInformer) error {
c.crdContext, c.crdContextCancel = context.WithCancel(context.Background())
crdInf.AddEventHandler(&crdEventHandler{
controller: c,
watchers: make(map[string]watchState),
})
deploymentInf := resourceInfs[apps_v1.SchemeGroupVersion.WithKind("Deployment")]
err := deploymentInf.AddIndexers(cache.Indexers{
byConfigMapNamespaceNameIndexName: deploymentByConfigMapNamespaceNameIndex,
bySecretNamespaceNameIndexName: deploymentBySecretNamespaceNameIndex,
})
if err != nil {
return errors.WithStack(err)
}
deploymentByIndex := deploymentInf.GetIndexer().ByIndex
// ConfigMap -> Deployment -> Bundle event propagation
configMapGVK := core_v1.SchemeGroupVersion.WithKind("ConfigMap")
configMapInf := resourceInfs[configMapGVK]
configMapInf.AddEventHandler(&handlers.LookupHandler{
Logger: c.Logger,
WorkQueue: c.WorkQueue,
Gvk: configMapGVK,
Lookup: c.lookupBundleByObjectByIndex(deploymentByIndex, byConfigMapNamespaceNameIndexName, byNamespaceNameIndexKey),
})
// Secret -> Deployment -> Bundle event propagation
secretGVK := core_v1.SchemeGroupVersion.WithKind("Secret")
secretInf := resourceInfs[secretGVK]
secretInf.AddEventHandler(&handlers.LookupHandler{
Logger: c.Logger,
WorkQueue: c.WorkQueue,
Gvk: secretGVK,
Lookup: c.lookupBundleByObjectByIndex(deploymentByIndex, bySecretNamespaceNameIndexName, byNamespaceNameIndexKey),
})
serviceInstanceInf, ok := resourceInfs[sc_v1b1.SchemeGroupVersion.WithKind("ServiceInstance")]
if ok { // Service Catalog support is enabled
// Secret -> ServiceInstance -> Bundle event propagation
err := serviceInstanceInf.AddIndexers(cache.Indexers{
bySecretNamespaceNameIndexName: serviceInstanceBySecretNamespaceNameIndex,
})
if err != nil {
return errors.WithStack(err)
}
serviceInstanceByIndex := serviceInstanceInf.GetIndexer().ByIndex
secretInf.AddEventHandler(&handlers.LookupHandler{
Logger: c.Logger,
WorkQueue: c.WorkQueue,
Gvk: secretGVK,
Lookup: c.lookupBundleByObjectByIndex(serviceInstanceByIndex, bySecretNamespaceNameIndexName, byNamespaceNameIndexKey),
})
// Secret -> ServiceBinding -> Bundle event propagation
serviceBindingInf := resourceInfs[sc_v1b1.SchemeGroupVersion.WithKind("ServiceBinding")]
err = serviceBindingInf.AddIndexers(cache.Indexers{
bySecretNamespaceNameIndexName: serviceBindingBySecretNamespaceNameIndex,
})
if err != nil {
return errors.WithStack(err)
}
serviceBindingByIndex := serviceBindingInf.GetIndexer().ByIndex
secretInf.AddEventHandler(&handlers.LookupHandler{
Logger: c.Logger,
WorkQueue: c.WorkQueue,
Gvk: secretGVK,
Lookup: c.lookupBundleByObjectByIndex(serviceBindingByIndex, bySecretNamespaceNameIndexName, byNamespaceNameIndexKey),
})
}
// Standard handler
for gvk, resourceInf := range resourceInfs {
resourceInf.AddEventHandler(&handlers.ControlledResourceHandler{
Logger: c.Logger,
WorkQueue: c.WorkQueue,
ControllerIndex: &controllerIndexAdapter{bundleStore: c.BundleStore},
ControllerGvk: smith_v1.BundleGVK,
Gvk: gvk,
})
}
return nil
} | go | {
"resource": ""
} |
q18237 | Run | train | func (c *Controller) Run(ctx context.Context) {
defer c.wg.Wait()
defer c.crdContextCancel() // should be executed after stopping is set to true
defer func() {
c.wgLock.Lock()
defer c.wgLock.Unlock()
c.stopping = true
}()
c.Logger.Info("Starting Bundle controller")
defer c.Logger.Info("Shutting down Bundle controller")
sink := core_v1_client.EventSinkImpl{
Interface: c.MainClient.CoreV1().Events(meta_v1.NamespaceNone),
}
recordingWatch := c.Broadcaster.StartRecordingToSink(&sink)
defer recordingWatch.Stop()
c.ReadyForWork()
<-ctx.Done()
} | go | {
"resource": ""
} |
q18238 | lookupBundleByObjectByIndex | train | func (c *Controller) lookupBundleByObjectByIndex(byIndex byIndexFunc, indexName string, indexKey indexKeyFunc) func(runtime.Object) ([]runtime.Object, error) {
return func(obj runtime.Object) ([]runtime.Object /*bundles*/, error) {
// obj is an object that is referred by some other object that might be in a Bundle
objMeta := obj.(meta_v1.Object)
// find all object that reference this obj
objsFromIndex, err := byIndex(indexName, indexKey(objMeta.GetNamespace(), objMeta.GetName()))
if err != nil {
return nil, err
}
var bundles []runtime.Object
for _, objFromIndex := range objsFromIndex {
runtimeObjFromIndex := objFromIndex.(runtime.Object)
metaObjFromIndex := objFromIndex.(meta_v1.Object)
gvks, _, err := c.Scheme.ObjectKinds(runtimeObjFromIndex)
if err != nil {
// Log and continue to try to process other objects if there are any more in objsFromIndex
// This shouldn't happen normally
c.Logger.
With(zap.Error(err), logz.Namespace(metaObjFromIndex), logz.Object(metaObjFromIndex)).
Sugar().Errorf("Could not determine GVK of an object")
continue
}
gks := make(map[schema.GroupKind]struct{}, len(gvks)) // not clear if duplicates are allowed, so de-dupe
for _, gvk := range gvks {
gks[gvk.GroupKind()] = struct{}{}
}
// find all Bundles that contain this object
for gk := range gks {
bundlesForObject, err := c.BundleStore.GetBundlesByObject(gk, metaObjFromIndex.GetNamespace(), metaObjFromIndex.GetName())
if err != nil {
// Log and continue to try to process other GKs
c.Logger.
With(zap.Error(err), logz.Namespace(metaObjFromIndex), logz.Object(metaObjFromIndex)).
Sugar().Errorf("Failed to get Bundles by object")
continue
}
for _, bundle := range bundlesForObject {
bundles = append(bundles, bundle)
}
}
}
return bundles, nil
}
} | go | {
"resource": ""
} |
q18239 | DeepCopy | train | func (in *Sleeper) DeepCopy() *Sleeper {
if in == nil {
return nil
}
out := new(Sleeper)
in.DeepCopyInto(out)
return out
} | go | {
"resource": ""
} |
q18240 | DeepCopy | train | func (in *SleeperList) DeepCopy() *SleeperList {
if in == nil {
return nil
}
out := new(SleeperList)
in.DeepCopyInto(out)
return out
} | go | {
"resource": ""
} |
q18241 | DeepCopy | train | func (in *SleeperSpec) DeepCopy() *SleeperSpec {
if in == nil {
return nil
}
out := new(SleeperSpec)
in.DeepCopyInto(out)
return out
} | go | {
"resource": ""
} |
q18242 | DeepCopy | train | func (in *SleeperStatus) DeepCopy() *SleeperStatus {
if in == nil {
return nil
}
out := new(SleeperStatus)
in.DeepCopyInto(out)
return out
} | go | {
"resource": ""
} |
q18243 | Get | train | func (s *BundleStore) Get(namespace, bundleName string) (*smith_v1.Bundle, error) {
bundle, exists, err := s.store.Get(smith_v1.BundleGVK, namespace, bundleName)
if err != nil || !exists {
return nil, err
}
return bundle.(*smith_v1.Bundle), nil
} | go | {
"resource": ""
} |
q18244 | GetBundlesByCrd | train | func (s *BundleStore) GetBundlesByCrd(crd *apiext_v1b1.CustomResourceDefinition) ([]*smith_v1.Bundle, error) {
return s.getBundles(byCrdGroupKindIndexName, byCrdGroupKindIndexKey(crd.Spec.Group, crd.Spec.Names.Kind))
} | go | {
"resource": ""
} |
q18245 | GetBundlesByObject | train | func (s *BundleStore) GetBundlesByObject(gk schema.GroupKind, namespace, name string) ([]*smith_v1.Bundle, error) {
return s.getBundles(byObjectIndexName, byObjectIndexKey(gk, namespace, name))
} | go | {
"resource": ""
} |
q18246 | Get | train | func (c *FakeBundles) Get(name string, options v1.GetOptions) (result *smithv1.Bundle, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(bundlesResource, c.ns, name), &smithv1.Bundle{})
if obj == nil {
return nil, err
}
return obj.(*smithv1.Bundle), err
} | go | {
"resource": ""
} |
q18247 | List | train | func (c *FakeBundles) List(opts v1.ListOptions) (result *smithv1.BundleList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(bundlesResource, bundlesKind, c.ns, opts), &smithv1.BundleList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &smithv1.BundleList{ListMeta: obj.(*smithv1.BundleList).ListMeta}
for _, item := range obj.(*smithv1.BundleList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
} | go | {
"resource": ""
} |
q18248 | Watch | train | func (c *FakeBundles) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(bundlesResource, c.ns, opts))
} | go | {
"resource": ""
} |
q18249 | Delete | train | func (c *FakeBundles) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(bundlesResource, c.ns, name), &smithv1.Bundle{})
return err
} | go | {
"resource": ""
} |
q18250 | Patch | train | func (c *FakeBundles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *smithv1.Bundle, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(bundlesResource, c.ns, name, pt, data, subresources...), &smithv1.Bundle{})
if obj == nil {
return nil, err
}
return obj.(*smithv1.Bundle), err
} | go | {
"resource": ""
} |
q18251 | ensureWatch | train | func (h *crdEventHandler) ensureWatch(logger *zap.Logger, crd *apiext_v1b1.CustomResourceDefinition) bool {
if crd.Name == smith_v1.BundleResourceName {
return false
}
if _, ok := h.watchers[crd.Name]; ok {
return true
}
if !resources.IsCrdConditionTrue(crd, apiext_v1b1.Established) {
logger.Info("Not adding a watch for CRD because it hasn't been established")
return false
}
if !resources.IsCrdConditionTrue(crd, apiext_v1b1.NamesAccepted) {
logger.Info("Not adding a watch for CRD because its names haven't been accepted")
return false
}
gvk := schema.GroupVersionKind{
Group: crd.Spec.Group,
Version: crd.Spec.Versions[0].Name,
Kind: crd.Spec.Names.Kind,
}
logger.Info("Configuring watch for CRD")
res, err := h.controller.SmartClient.ForGVK(gvk, h.controller.Namespace)
if err != nil {
logger.Error("Failed to get client for CRD", zap.Error(err))
return false
}
crdInf := cache.NewSharedIndexInformer(&cache.ListWatch{
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
return res.List(options)
},
WatchFunc: res.Watch,
}, &unstructured.Unstructured{}, h.controller.CrdResyncPeriod, cache.Indexers{})
h.controller.wgLock.Lock()
defer h.controller.wgLock.Unlock()
if h.controller.stopping {
return false
}
resourceHandler := &handlers.ControlledResourceHandler{
Logger: h.controller.Logger,
WorkQueue: h.controller.WorkQueue,
ControllerIndex: &controllerIndexAdapter{bundleStore: h.controller.BundleStore},
ControllerGvk: smith_v1.BundleGVK,
Gvk: gvk,
}
crdInf.AddEventHandler(resourceHandler)
err = h.controller.Store.AddInformer(gvk, crdInf)
if err != nil {
logger.Error("Failed to add informer for CRD to multisore", zap.Error(err))
return false
}
ctx, cancel := context.WithCancel(h.controller.crdContext)
h.watchers[crd.Name] = watchState{cancel: cancel}
h.controller.wg.StartWithChannel(ctx.Done(), crdInf.Run)
return true
} | go | {
"resource": ""
} |
q18252 | ensureNoWatch | train | func (h *crdEventHandler) ensureNoWatch(logger *zap.Logger, crd *apiext_v1b1.CustomResourceDefinition) bool {
crdWatch, ok := h.watchers[crd.Name]
if !ok {
// Nothing to do. This can happen if there was an error adding a watch
return false
}
logger.Info("Removing watch for CRD")
crdWatch.cancel()
delete(h.watchers, crd.Name)
gvk := schema.GroupVersionKind{
Group: crd.Spec.Group,
Version: crd.Spec.Versions[0].Name,
Kind: crd.Spec.Names.Kind,
}
h.controller.Store.RemoveInformer(gvk)
return true
} | go | {
"resource": ""
} |
q18253 | DeepCopy | train | func (in *Bundle) DeepCopy() *Bundle {
if in == nil {
return nil
}
out := new(Bundle)
in.DeepCopyInto(out)
return out
} | go | {
"resource": ""
} |
q18254 | DeepCopy | train | func (in *BundleList) DeepCopy() *BundleList {
if in == nil {
return nil
}
out := new(BundleList)
in.DeepCopyInto(out)
return out
} | go | {
"resource": ""
} |
q18255 | DeepCopy | train | func (in *BundleSpec) DeepCopy() *BundleSpec {
if in == nil {
return nil
}
out := new(BundleSpec)
in.DeepCopyInto(out)
return out
} | go | {
"resource": ""
} |
q18256 | DeepCopy | train | func (in *BundleStatus) DeepCopy() *BundleStatus {
if in == nil {
return nil
}
out := new(BundleStatus)
in.DeepCopyInto(out)
return out
} | go | {
"resource": ""
} |
q18257 | DeepCopy | train | func (in *PluginSpec) DeepCopy() *PluginSpec {
if in == nil {
return nil
}
out := new(PluginSpec)
in.DeepCopyInto(out)
return out
} | go | {
"resource": ""
} |
q18258 | DeepCopy | train | func (in *Reference) DeepCopy() *Reference {
if in == nil {
return nil
}
out := new(Reference)
in.DeepCopyInto(out)
return out
} | go | {
"resource": ""
} |
q18259 | DeepCopy | train | func (in *Resource) DeepCopy() *Resource {
if in == nil {
return nil
}
out := new(Resource)
in.DeepCopyInto(out)
return out
} | go | {
"resource": ""
} |
q18260 | DeepCopy | train | func (in *ResourceSpec) DeepCopy() *ResourceSpec {
if in == nil {
return nil
}
out := new(ResourceSpec)
in.DeepCopyInto(out)
return out
} | go | {
"resource": ""
} |
q18261 | DeepCopy | train | func (in *ResourceStatusData) DeepCopy() *ResourceStatusData {
if in == nil {
return nil
}
out := new(ResourceStatusData)
in.DeepCopyInto(out)
return out
} | go | {
"resource": ""
} |
q18262 | ProcessBundle | train | func (c *Controller) ProcessBundle(logger *zap.Logger, bundle *smith_v1.Bundle) (bool /*external*/, bool /*retriable*/, error) {
st := bundleSyncTask{
logger: logger,
bundleClient: c.BundleClient,
smartClient: c.SmartClient,
checker: c.Rc,
store: c.Store,
specChecker: c.SpecChecker,
bundle: bundle,
pluginContainers: c.PluginContainers,
scheme: c.Scheme,
catalog: c.Catalog,
bundleTransitionCounter: c.BundleTransitionCounter,
bundleResourceTransitionCounter: c.BundleResourceTransitionCounter,
recorder: c.Recorder,
}
var external bool
var retriable bool
var err error
if st.bundle.DeletionTimestamp != nil {
external, retriable, err = st.processDeleted()
} else {
external, retriable, err = st.processNormal()
}
if err != nil {
cause := errors.Cause(err)
// short circuit on conflicts
if api_errors.IsConflict(cause) {
return external, retriable, err
}
// proceed to handleProcessResult() for all other errors
}
// Updates bundle status
handleProcessRetriable, handleProcessErr := st.handleProcessResult(retriable, err)
// Inspect the resources for failures. They can fail for many different reasons.
// The priority of errors to bubble up to the ctrl layer are:
// 1. processDeleted/processNormal errors
// 2. Internal resource processing errors are raised first
// 3. External resource processing errors are raised last
// 4. handleProcessResult errors of any sort.
// Handle the errors from processDeleted/processNormal, taking precedence
// over the handleProcessErr if any.
if err != nil {
if handleProcessErr != nil {
st.logger.Error("Error processing Bundle", zap.Error(handleProcessErr))
}
return external, retriable || handleProcessRetriable, err
}
// Inspect resources, returning an error if necessary
allExternalErrors := true
hasRetriableResourceErr := false
var failedResources []string
for resName, resInfo := range st.processedResources {
resErr := resInfo.fetchError()
if resErr != nil {
allExternalErrors = allExternalErrors && resErr.isExternalError
hasRetriableResourceErr = hasRetriableResourceErr || resErr.isRetriableError
failedResources = append(failedResources, string(resName))
}
}
if len(failedResources) > 0 {
if handleProcessErr != nil {
st.logger.Error("Error processing Bundle", zap.Error(handleProcessErr))
}
// stable output
sort.Strings(failedResources)
err := errors.Errorf("error processing resource(s): %q", failedResources)
return allExternalErrors, hasRetriableResourceErr || handleProcessRetriable, err
}
// Otherwise, return the result from handleProcessResult
return false, handleProcessRetriable, handleProcessErr
} | go | {
"resource": ""
} |
q18263 | findObjectsToDelete | train | func (st *bundleSyncTask) findObjectsToDelete() (bool /*external*/, bool /*retriable*/, error) {
objs, err := st.store.ObjectsControlledBy(st.bundle.Namespace, st.bundle.UID)
if err != nil {
return false, false, err
}
st.objectsToDelete = make(map[objectRef]runtime.Object, len(objs))
for _, obj := range objs {
m := obj.(meta_v1.Object)
ref := objectRef{
GroupVersionKind: obj.GetObjectKind().GroupVersionKind(),
Name: m.GetName(),
}
st.objectsToDelete[ref] = obj
}
for _, res := range st.bundle.Spec.Resources {
var gvk schema.GroupVersionKind
var name string
switch {
case res.Spec.Object != nil:
gvk = res.Spec.Object.GetObjectKind().GroupVersionKind()
name = res.Spec.Object.(meta_v1.Object).GetName()
case res.Spec.Plugin != nil:
// Any prevalidation during resource processing is applicable here as the cleanup step
// always happens regardless of if processing failed or not. Thus it makes more sense
// to abort the cleanup in case of an invalid spec.
plugin, ok := st.pluginContainers[res.Spec.Plugin.Name]
if !ok {
return true, false, errors.Errorf("plugin %q is not a valid plugin", res.Spec.Plugin.Name)
}
gvk = plugin.Plugin.Describe().GVK
name = res.Spec.Plugin.ObjectName
default:
// neither "object" nor "plugin" field is specified. This shouldn't really happen (schema), so we
// should abort the deletion as a defensive mechanism for safety.
return true, false, errors.New("resource is neither object nor plugin")
}
delete(st.objectsToDelete, objectRef{
GroupVersionKind: gvk,
Name: name,
})
}
return false, false, nil
} | go | {
"resource": ""
} |
q18264 | pluginStatuses | train | func (st *bundleSyncTask) pluginStatuses() []smith_v1.PluginStatus {
// Plugin statuses
name2status := make(map[smith_v1.PluginName]struct{})
// most likely will be of the same size as before
pluginStatuses := make([]smith_v1.PluginStatus, 0, len(st.bundle.Status.PluginStatuses))
for _, res := range st.bundle.Spec.Resources { // Deterministic iteration order
if res.Spec.Plugin == nil {
continue // Not a plugin
}
pluginName := res.Spec.Plugin.Name
if _, ok := name2status[pluginName]; ok {
continue // Already reported
}
name2status[pluginName] = struct{}{}
var pluginStatus smith_v1.PluginStatus
pluginContainer, ok := st.pluginContainers[pluginName]
if ok {
describe := pluginContainer.Plugin.Describe()
pluginStatus = smith_v1.PluginStatus{
Name: pluginName,
Group: describe.GVK.Group,
Version: describe.GVK.Version,
Kind: describe.GVK.Kind,
Status: smith_v1.PluginStatusOk,
}
} else {
pluginStatus = smith_v1.PluginStatus{
Name: pluginName,
Status: smith_v1.PluginStatusNoSuchPlugin,
}
}
pluginStatuses = append(pluginStatuses, pluginStatus)
}
return pluginStatuses
} | go | {
"resource": ""
} |
q18265 | resourceConditions | train | func (st *bundleSyncTask) resourceConditions(res smith_v1.Resource) (
cond_v1.Condition, /* blockedCond */
cond_v1.Condition, /* inProgressCond */
cond_v1.Condition, /* readyCond */
cond_v1.Condition, /* errorCond */
) {
blockedCond := cond_v1.Condition{Type: smith_v1.ResourceBlocked, Status: cond_v1.ConditionFalse}
inProgressCond := cond_v1.Condition{Type: smith_v1.ResourceInProgress, Status: cond_v1.ConditionFalse}
readyCond := cond_v1.Condition{Type: smith_v1.ResourceReady, Status: cond_v1.ConditionFalse}
errorCond := cond_v1.Condition{Type: smith_v1.ResourceError, Status: cond_v1.ConditionFalse}
if resInfo, ok := st.processedResources[res.Name]; ok {
// Resource was processed
switch resStatus := resInfo.status.(type) {
case resourceStatusDependenciesNotReady:
blockedCond.Status = cond_v1.ConditionTrue
blockedCond.Reason = smith_v1.ResourceReasonDependenciesNotReady
blockedCond.Message = fmt.Sprintf("Not ready: %q", resStatus.dependencies)
case resourceStatusInProgress:
inProgressCond.Status = cond_v1.ConditionTrue
inProgressCond.Message = resStatus.message
case resourceStatusReady:
readyCond.Status = cond_v1.ConditionTrue
readyCond.Message = resStatus.message
case resourceStatusError:
errorCond.Status = cond_v1.ConditionTrue
errorCond.Message = resStatus.err.Error()
if resStatus.isRetriableError {
errorCond.Reason = smith_v1.ResourceReasonRetriableError
inProgressCond.Status = cond_v1.ConditionTrue
} else {
errorCond.Reason = smith_v1.ResourceReasonTerminalError
}
default:
blockedCond.Status = cond_v1.ConditionUnknown
inProgressCond.Status = cond_v1.ConditionUnknown
readyCond.Status = cond_v1.ConditionUnknown
errorCond.Status = cond_v1.ConditionTrue
errorCond.Reason = smith_v1.ResourceReasonTerminalError
errorCond.Message = fmt.Sprintf("internal error - unknown resource status type %T", resInfo.status)
}
} else {
// Resource was not processed
blockedCond.Status = cond_v1.ConditionUnknown
inProgressCond.Status = cond_v1.ConditionUnknown
readyCond.Status = cond_v1.ConditionUnknown
errorCond.Status = cond_v1.ConditionUnknown
}
return blockedCond, inProgressCond, readyCond, errorCond
} | go | {
"resource": ""
} |
q18266 | checkBundleConditionNeedsUpdate | train | func (st *bundleSyncTask) checkBundleConditionNeedsUpdate(condition *cond_v1.Condition) bool {
now := meta_v1.Now()
condition.LastTransitionTime = now
needsUpdate := cond_v1.PrepareCondition(st.bundle.Status.Conditions, condition)
if needsUpdate && condition.Status == cond_v1.ConditionTrue {
st.bundleTransitionCounter.
WithLabelValues(st.bundle.GetNamespace(), st.bundle.GetName(), string(condition.Type), condition.Reason).
Inc()
eventAnnotations := map[string]string{
smith.EventAnnotationReason: condition.Reason,
}
var eventType string
var reason string
switch condition.Type {
case smith_v1.BundleError:
eventType = core_v1.EventTypeWarning
reason = smith.EventReasonBundleError
case smith_v1.BundleInProgress:
eventType = core_v1.EventTypeNormal
reason = smith.EventReasonBundleInProgress
case smith_v1.BundleReady:
eventType = core_v1.EventTypeNormal
reason = smith.EventReasonBundleReady
default:
st.logger.Sugar().Errorf("Unexpected bundle condition type %q", condition.Type)
eventType = core_v1.EventTypeWarning
reason = smith.EventReasonUnknown
}
st.recorder.AnnotatedEventf(st.bundle, eventAnnotations, eventType, reason, condition.Message)
}
// Return true if one of the fields have changed.
return needsUpdate
} | go | {
"resource": ""
} |
q18267 | checkResourceConditionNeedsUpdate | train | func (st *bundleSyncTask) checkResourceConditionNeedsUpdate(resName smith_v1.ResourceName, condition *cond_v1.Condition) bool {
now := meta_v1.Now()
condition.LastTransitionTime = now
needsUpdate := true
// Try to find this resource status
_, status := st.bundle.Status.GetResourceStatus(resName)
if status != nil {
needsUpdate = cond_v1.PrepareCondition(status.Conditions, condition)
}
// Otherwise, no status for this resource, hence it's a new resource condition
if needsUpdate && condition.Status == cond_v1.ConditionTrue {
st.bundleResourceTransitionCounter.
WithLabelValues(st.bundle.GetNamespace(), st.bundle.GetName(), string(resName), string(condition.Type), condition.Reason).
Inc()
// blocked events are ignored because it's too spammy
if condition.Type != smith_v1.ResourceBlocked {
eventAnnotations := map[string]string{
smith.EventAnnotationResourceName: string(resName),
smith.EventAnnotationReason: condition.Reason,
}
var reason string
var eventType string
switch condition.Type {
case smith_v1.ResourceError:
eventType = core_v1.EventTypeWarning
reason = smith.EventReasonResourceError
case smith_v1.ResourceInProgress:
eventType = core_v1.EventTypeNormal
reason = smith.EventReasonResourceInProgress
case smith_v1.ResourceReady:
eventType = core_v1.EventTypeNormal
reason = smith.EventReasonResourceReady
default:
st.logger.Sugar().Errorf("Unexpected resource condition type %q", condition.Type)
eventType = core_v1.EventTypeWarning
reason = smith.EventReasonUnknown
}
st.recorder.AnnotatedEventf(st.bundle, eventAnnotations, eventType, reason, condition.Message)
}
}
// Return true if one of the fields have changed.
return needsUpdate
} | go | {
"resource": ""
} |
q18268 | ConvertType | train | func ConvertType(scheme *runtime.Scheme, in, out runtime.Object) error {
in = in.DeepCopyObject()
if err := scheme.Convert(in, out, nil); err != nil {
return err
}
gvkOut := out.GetObjectKind().GroupVersionKind()
if gvkOut.Kind == "" || gvkOut.Version == "" { // Group can be empty
// API machinery discards TypeMeta for typed objects. This is annoying.
gvks, _, err := scheme.ObjectKinds(in)
if err != nil {
return err
}
out.GetObjectKind().SetGroupVersionKind(gvks[0])
}
return nil
} | go | {
"resource": ""
} |
q18269 | RuntimeToUnstructured | train | func RuntimeToUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) {
gvk := obj.GetObjectKind().GroupVersionKind()
if gvk.Kind == "" || gvk.Version == "" { // Group can be empty
return nil, errors.Errorf("cannot convert %T to Unstructured: object Kind and/or object Version is empty", obj)
}
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj.DeepCopyObject())
if err != nil {
return nil, errors.WithStack(err)
}
return &unstructured.Unstructured{
Object: u,
}, nil
} | go | {
"resource": ""
} |
q18270 | evalPluginSpec | train | func (st *resourceSyncTask) evalPluginSpec(res *smith_v1.Resource, actual runtime.Object) (*unstructured.Unstructured, resourceStatus) {
pluginContainer, ok := st.pluginContainers[res.Spec.Plugin.Name]
if !ok {
return nil, resourceStatusError{
err: errors.Errorf("no such plugin %q", res.Spec.Plugin.Name),
isExternalError: true,
}
}
validationResult, err := pluginContainer.ValidateSpec(res.Spec.Plugin.Spec)
if err != nil {
return nil, resourceStatusError{err: err}
}
if len(validationResult.Errors) > 0 {
return nil, resourceStatusError{
err: errors.Wrap(k8s_errors.NewAggregate(validationResult.Errors), "spec failed validation against schema"),
isExternalError: true,
}
}
// validate above should guarantee that our plugin is there
dependencies, err := st.prepareDependencies(res.References)
if err != nil {
// there should be no error in processing dependencies. If there is, this
// is an internal issue.
return nil, resourceStatusError{
err: err,
}
}
result := pluginContainer.Plugin.Process(res.Spec.Plugin.Spec, &plugin.Context{
Namespace: st.bundle.Namespace,
Actual: actual,
Dependencies: dependencies,
})
var pluginObj runtime.Object
switch res := result.(type) {
case *plugin.ProcessResultSuccess:
pluginObj = res.Object
case *plugin.ProcessResultFailure:
return nil, resourceStatusError{
err: res.Error,
isRetriableError: res.IsRetriableError,
isExternalError: res.IsExternalError,
}
default:
return nil, resourceStatusError{
err: errors.Errorf("unexpected plugin result type %q", res.StatusType()),
}
}
// Make sure plugin is returning us something that obeys the PluginSpec.
object, err := util.RuntimeToUnstructured(pluginObj)
if err != nil {
return nil, resourceStatusError{
err: errors.Wrap(err, "plugin output cannot be converted from runtime.Object"),
}
}
expectedGVK := pluginContainer.Plugin.Describe().GVK
if object.GroupVersionKind() != expectedGVK {
return nil, resourceStatusError{
err: errors.Errorf("unexpected GVK from plugin (wanted %s, got %s)", expectedGVK, object.GroupVersionKind()),
}
}
// We are in charge of naming.
object.SetName(res.Spec.Plugin.ObjectName)
return object, nil
} | go | {
"resource": ""
} |
q18271 | createOrUpdate | train | func (st *resourceSyncTask) createOrUpdate(spec *unstructured.Unstructured, actual runtime.Object) (actualRet *unstructured.Unstructured, retriableRet bool, e error) {
// Prepare client
gvk := spec.GroupVersionKind()
resClient, err := st.smartClient.ForGVK(gvk, st.bundle.Namespace)
if err != nil {
return nil, false, errors.Wrapf(err, "failed to get the client for %s", gvk)
}
switch actual {
case nil:
return st.createResource(resClient, spec)
default:
return st.updateResource(resClient, spec, actual)
}
} | go | {
"resource": ""
} |
q18272 | updateResource | train | func (st *resourceSyncTask) updateResource(resClient dynamic.ResourceInterface, spec *unstructured.Unstructured, actual runtime.Object) (actualRet *unstructured.Unstructured, retriableError bool, e error) {
st.logger.Debug("Object found, checking spec", ctrlLogz.ObjectGk(spec.GroupVersionKind().GroupKind()), ctrlLogz.Object(spec))
// Compare spec and existing resource
updated, match, difference, err := st.specChecker.CompareActualVsSpec(st.logger, spec, actual)
if err != nil {
return nil, false, errors.Wrap(err, "specification check failed")
}
// Delete the DeletionTimestamp annotation if it is present
annotations := updated.GetAnnotations()
if _, ok := annotations[smith.DeletionTimestampAnnotation]; ok {
delete(annotations, smith.DeletionTimestampAnnotation)
updated.SetAnnotations(annotations)
match = false
}
if match {
st.logger.Debug("Object has correct spec", ctrlLogz.Object(spec))
return updated, false, nil
}
st.logger.Sugar().Infof("Objects are different (`a` is specification and `b` is the actual object): %s", difference)
// Update if different
updated, err = resClient.Update(updated, meta_v1.UpdateOptions{})
if err != nil {
if api_errors.IsConflict(err) {
// We let the next processKey() iteration, triggered by someone else updating the resource, finish the work.
return nil, false, errors.Wrap(err, "object update resulted in conflict (will re-process)")
}
// Unexpected error, will retry
apiStatusErr, ok := err.(api_errors.APIStatus)
if ok {
apiStatus := apiStatusErr.Status()
return nil, true, errors.Wrapf(err, "unexpected APIStatus (code %v, reason %q) while creating resource", apiStatus.Code, apiStatus.Reason)
}
return nil, true, errors.WithStack(err)
}
st.logger.Info("Object updated", ctrlLogz.Object(spec))
return updated, false, nil
} | go | {
"resource": ""
} |
q18273 | GetJSONPathString | train | func GetJSONPathString(obj interface{}, path string) (string, error) {
j := jsonpath.New("GetJSONPathString")
// If the key is missing, return an empty string without errors
j.AllowMissingKeys(true)
err := j.Parse(path)
if err != nil {
return "", errors.Wrapf(err, "JsonPath parse %s error", path)
}
var buf bytes.Buffer
err = j.Execute(&buf, obj)
if err != nil {
return "", errors.Wrap(err, "JsonPath execute error")
}
return buf.String(), nil
} | go | {
"resource": ""
} |
q18274 | GetJSONPathValue | train | func GetJSONPathValue(obj interface{}, path string, allowMissingKeys bool) (interface{}, error) {
j := jsonpath.New("GetJSONPathValue")
// If the key is missing, return an empty string without errors
j.AllowMissingKeys(allowMissingKeys)
err := j.Parse(path)
if err != nil {
return "", errors.Wrapf(err, "JsonPath parse %s error", path)
}
values, err := j.FindResults(obj)
if err != nil {
return "", errors.Wrap(err, "JsonPath execute error")
}
if len(values) == 0 {
return nil, nil
}
if len(values) > 1 {
return nil, errors.Errorf("single result expected, got %d", len(values))
}
if values[0] == nil || len(values[0]) == 0 || values[0][0].IsNil() {
return nil, nil
}
return values[0][0].Interface(), nil
} | go | {
"resource": ""
} |
q18275 | New | train | func New() *GoReq {
gr := &GoReq{
Data: make(map[string]interface{}),
Header: make(map[string]string),
FormData: url.Values{},
QueryData: url.Values{},
Client: nil,
Transport: &http.Transport{},
Cookies: make([]*http.Cookie, 0),
Errors: nil,
BasicAuth: struct{ Username, Password string }{},
Debug: false,
CurlCommand: false,
logger: log.New(os.Stderr, "[goreq]", log.LstdFlags),
retry: &RetryConfig{RetryCount: 0, RetryTimeout: 0, RetryOnHTTPStatus: nil},
bindResponseBody: nil,
}
return gr
} | go | {
"resource": ""
} |
q18276 | SetCurlCommand | train | func (gr *GoReq) SetCurlCommand(enable bool) *GoReq {
gr.CurlCommand = enable
return gr
} | go | {
"resource": ""
} |
q18277 | SetLogger | train | func (gr *GoReq) SetLogger(logger *log.Logger) *GoReq {
gr.logger = logger
return gr
} | go | {
"resource": ""
} |
q18278 | SetClient | train | func (gr *GoReq) SetClient(client *http.Client) *GoReq {
gr.Client = client
return gr
} | go | {
"resource": ""
} |
q18279 | Reset | train | func (gr *GoReq) Reset() *GoReq {
gr.URL = ""
gr.Method = ""
gr.Header = make(map[string]string)
gr.Data = make(map[string]interface{})
gr.FormData = url.Values{}
gr.QueryData = url.Values{}
gr.RawStringData = ""
gr.RawBytesData = make([]byte, 0)
gr.FilePath = ""
gr.FileParam = ""
gr.Cookies = make([]*http.Cookie, 0)
gr.Errors = nil
gr.retry = &RetryConfig{RetryCount: 0, RetryTimeout: 0, RetryOnHTTPStatus: nil}
gr.bindResponseBody = nil
return gr
} | go | {
"resource": ""
} |
q18280 | Get | train | func (gr *GoReq) Get(targetURL string) *GoReq {
//gr.Reset()
gr.Method = GET
gr.URL = targetURL
gr.Errors = nil
return gr
} | go | {
"resource": ""
} |
q18281 | Post | train | func (gr *GoReq) Post(targetURL string) *GoReq {
//gr.Reset()
gr.Method = POST
gr.URL = targetURL
gr.Errors = nil
return gr
} | go | {
"resource": ""
} |
q18282 | Head | train | func (gr *GoReq) Head(targetURL string) *GoReq {
//gr.Reset()
gr.Method = HEAD
gr.URL = targetURL
gr.Errors = nil
return gr
} | go | {
"resource": ""
} |
q18283 | Put | train | func (gr *GoReq) Put(targetURL string) *GoReq {
//gr.Reset()
gr.Method = PUT
gr.URL = targetURL
gr.Errors = nil
return gr
} | go | {
"resource": ""
} |
q18284 | Delete | train | func (gr *GoReq) Delete(targetURL string) *GoReq {
//gr.Reset()
gr.Method = DELETE
gr.URL = targetURL
gr.Errors = nil
return gr
} | go | {
"resource": ""
} |
q18285 | Patch | train | func (gr *GoReq) Patch(targetURL string) *GoReq {
//gr.Reset()
gr.Method = PATCH
gr.URL = targetURL
gr.Errors = nil
return gr
} | go | {
"resource": ""
} |
q18286 | Options | train | func (gr *GoReq) Options(targetURL string) *GoReq {
//gr.Reset()
gr.Method = OPTIONS
gr.URL = targetURL
gr.Errors = nil
return gr
} | go | {
"resource": ""
} |
q18287 | queryStruct | train | func (gr *GoReq) queryStruct(content interface{}) *GoReq {
if marshalContent, err := json.Marshal(content); err != nil {
gr.Errors = append(gr.Errors, err)
} else {
var val map[string]interface{}
if err := json.Unmarshal(marshalContent, &val); err != nil {
gr.Errors = append(gr.Errors, err)
} else {
for k, v := range val {
gr.QueryData.Add(k, v.(string))
}
}
}
return gr
} | go | {
"resource": ""
} |
q18288 | Timeout | train | func (gr *GoReq) Timeout(timeout time.Duration) *GoReq {
gr.Transport.Dial = func(network, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(network, addr, timeout)
if err != nil {
gr.Errors = append(gr.Errors, err)
return nil, err
}
conn.SetDeadline(time.Now().Add(timeout))
return conn, nil
}
return gr
} | go | {
"resource": ""
} |
q18289 | RedirectPolicy | train | func (gr *GoReq) RedirectPolicy(policy func(req Request, via []Request) error) *GoReq {
gr.CheckRedirect = func(r *http.Request, v []*http.Request) error {
vv := make([]Request, len(v))
for i, r := range v {
vv[i] = Request(r)
}
return policy(Request(r), vv)
}
if gr.Client != nil {
gr.Client.CheckRedirect = gr.CheckRedirect
}
return gr
} | go | {
"resource": ""
} |
q18290 | SendFile | train | func (gr *GoReq) SendFile(paramName, filePath string) *GoReq {
gr.FileParam = paramName
gr.FilePath = filePath
return gr
} | go | {
"resource": ""
} |
q18291 | checkCrumb | train | func (jenkins *Jenkins) checkCrumb(req *http.Request) (*http.Request, error) {
// api - store jenkins api useCrumbs response
api := struct {
UseCrumbs bool `json:"useCrumbs"`
}{}
err := jenkins.get("/api/json", url.Values{"tree": []string{"useCrumbs"}}, &api)
if err != nil {
return req, err
}
if !api.UseCrumbs {
// CSRF Protection is not enabled
return req, nil
}
// get crumb field and value
crumb := struct {
Crumb string `json:"crumb"`
CrumbRequestField string `json:"crumbRequestField"`
}{}
err = jenkins.get("/crumbIssuer", nil, &crumb)
if err != nil {
return req, err
}
// update header
req.Header.Set(crumb.CrumbRequestField, crumb.Crumb)
return req, nil
} | go | {
"resource": ""
} |
q18292 | GetJobs | train | func (jenkins *Jenkins) GetJobs() ([]Job, error) {
var payload = struct {
Jobs []Job `json:"jobs"`
}{}
err := jenkins.get("", nil, &payload)
return payload.Jobs, err
} | go | {
"resource": ""
} |
q18293 | GetJob | train | func (jenkins *Jenkins) GetJob(name string) (job Job, err error) {
err = jenkins.get(fmt.Sprintf("/job/%s", name), nil, &job)
return
} | go | {
"resource": ""
} |
q18294 | GetJobConfig | train | func (jenkins *Jenkins) GetJobConfig(name string) (job MavenJobItem, err error) {
err = jenkins.getXml(fmt.Sprintf("/job/%s/config.xml", name), nil, &job)
return
} | go | {
"resource": ""
} |
q18295 | GetBuild | train | func (jenkins *Jenkins) GetBuild(job Job, number int) (build Build, err error) {
err = jenkins.get(fmt.Sprintf("/job/%s/%d", job.Name, number), nil, &build)
return
} | go | {
"resource": ""
} |
q18296 | GetLastBuild | train | func (jenkins *Jenkins) GetLastBuild(job Job) (build Build, err error) {
err = jenkins.get(fmt.Sprintf("/job/%s/lastBuild", job.Name), nil, &build)
return
} | go | {
"resource": ""
} |
q18297 | CreateJob | train | func (jenkins *Jenkins) CreateJob(mavenJobItem MavenJobItem, jobName string) error {
mavenJobItemXml, _ := xml.Marshal(mavenJobItem)
reader := bytes.NewReader(mavenJobItemXml)
params := url.Values{"name": []string{jobName}}
return jenkins.postXml("/createItem", params, reader, nil)
} | go | {
"resource": ""
} |
q18298 | DeleteJob | train | func (jenkins *Jenkins) DeleteJob(job Job) error {
return jenkins.post(fmt.Sprintf("/job/%s/doDelete", job.Name), nil, nil)
} | go | {
"resource": ""
} |
q18299 | AddJobToView | train | func (jenkins *Jenkins) AddJobToView(viewName string, job Job) error {
params := url.Values{"name": []string{job.Name}}
return jenkins.post(fmt.Sprintf("/view/%s/addJobToView", viewName), params, nil)
} | go | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.