text
stringlengths
11
4.05M
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package models import ( "context" "database/sql" "net/http" "net/url" "time" "golang.org/x/net/http/httpproxy" "yunion.io/x/jsonutils" "yunion.io/x/log" "yunion.io/x/pkg/errors" "yunion.io/x/pkg/tristate" "yunion.io/x/pkg/util/compare" "yunion.io/x/pkg/utils" proxyapi "yunion.io/x/onecloud/pkg/apis/cloudcommon/proxy" api "yunion.io/x/onecloud/pkg/apis/cloudid" "yunion.io/x/onecloud/pkg/cloudcommon/db" "yunion.io/x/onecloud/pkg/cloudcommon/db/lockman" "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman" "yunion.io/x/onecloud/pkg/cloudid/options" "yunion.io/x/onecloud/pkg/cloudprovider" "yunion.io/x/onecloud/pkg/mcclient" "yunion.io/x/onecloud/pkg/mcclient/auth" "yunion.io/x/onecloud/pkg/mcclient/modules" "yunion.io/x/onecloud/pkg/util/httputils" ) // +onecloud:swagger-gen-ignore type SCloudaccountManager struct { db.SDomainLevelResourceBaseManager } var CloudaccountManager *SCloudaccountManager var isCloudacountSynced bool func init() { CloudaccountManager = &SCloudaccountManager{ SDomainLevelResourceBaseManager: db.NewDomainLevelResourceBaseManager( SCloudaccount{}, "cloudaccounts_tbl", "cloudaccount", "cloudaccounts", ), } CloudaccountManager.SetVirtualObject(CloudaccountManager) isCloudacountSynced = false } type SCloudaccount struct { db.SStandaloneResourceBase db.SDomainizedResourceBase Provider string `width:"64" charset:"ascii" list:"domain"` Brand string `width:"64" charset:"utf8" nullable:"true" list:"domain"` IamLoginUrl string `width:"512" charset:"ascii"` IsSupportCloudId tristate.TriState `nullable:"false" get:"domain" list:"domain" default:"false"` } func (manager *SCloudaccountManager) GetICloudaccounts() ([]SCloudaccount, error) { s := auth.GetAdminSession(context.Background(), options.Options.Region, "") data := []jsonutils.JSONObject{} offset := int64(0) params := jsonutils.NewDict() params.Set("scope", jsonutils.NewString("system")) params.Set("limit", jsonutils.NewInt(1024)) for { params.Set("offset", jsonutils.NewInt(offset)) result, err := modules.Cloudaccounts.List(s, params) if err != nil { return nil, errors.Wrap(err, "modules.Cloudaccounts.List") } data = append(data, result.Data...) if len(data) >= result.Total { break } offset += 1024 } accounts := []SCloudaccount{} err := jsonutils.Update(&accounts, data) if err != nil { return nil, errors.Wrap(err, "jsonutils.Update") } return accounts, nil } func (manager *SCloudaccountManager) GetCloudaccounts() ([]SCloudaccount, error) { accounts := []SCloudaccount{} q := manager.Query() err := db.FetchModelObjects(manager, q, &accounts) if err != nil { return nil, err } return accounts, nil } func (manager *SCloudaccountManager) syncCloudaccounts(ctx context.Context, userCred mcclient.TokenCredential) (localAccounts []SCloudaccount, result compare.SyncResult) { lockman.LockClass(ctx, manager, db.GetLockClassKey(manager, userCred)) defer lockman.ReleaseClass(ctx, manager, db.GetLockClassKey(manager, userCred)) accounts, err := manager.GetICloudaccounts() if err != nil { result.Error(errors.Wrap(err, "GetRegionCloudaccounts")) return } dbAccounts, err := manager.GetCloudaccounts() if err != nil { result.Error(errors.Wrap(err, "GetLocalCloudaccounts")) return } removed := make([]SCloudaccount, 0) commondb := make([]SCloudaccount, 0) commonext := make([]SCloudaccount, 0) added := make([]SCloudaccount, 0) err = compare.CompareSets(dbAccounts, accounts, &removed, &commondb, &commonext, &added) if err != nil { result.Error(errors.Wrap(err, "compare.CompareSets")) return } for i := 0; i < len(removed); i++ { err = removed[i].syncRemoveCloudaccount(ctx, userCred) if err != nil { result.AddError(err) continue } result.Delete() } for i := 0; i < len(commondb); i++ { err = commondb[i].syncWithICloudaccount(ctx, userCred, commonext[i]) if err != nil { result.UpdateError(err) continue } localAccounts = append(localAccounts, commondb[i]) result.Update() } for i := 0; i < len(added); i++ { account, err := manager.newFromICloudaccount(ctx, userCred, &added[i]) if err != nil { result.AddError(err) continue } localAccounts = append(localAccounts, *account) result.Add() } return } func (self *SCloudaccount) removeCloudproviders(ctx context.Context, userCred mcclient.TokenCredential) error { providers, err := self.GetCloudproviders() if err != nil { return errors.Wrap(err, "GetCloudproviders") } for i := range providers { err = providers[i].Delete(ctx, userCred) if err != nil { return errors.Wrap(err, "provider.Delete") } } return nil } func (self *SCloudaccount) GetCloudproviderId() string { return "" } func (self *SCloudaccount) removeCloudgroupcaches(ctx context.Context, userCred mcclient.TokenCredential) error { caches, err := self.GetCloudgroupcaches() if err != nil { return errors.Wrap(err, "GetCloudgroupcaches") } for i := range caches { err = caches[i].Delete(ctx, userCred) if err != nil { return errors.Wrap(err, "caches[i].Delete") } } return nil } func (self *SCloudaccount) syncRemoveCloudaccount(ctx context.Context, userCred mcclient.TokenCredential) error { err := self.syncRemoveClouduser(ctx, userCred) if err != nil { return errors.Wrap(err, "syncRemoveClouduser") } err = self.removeCloudproviders(ctx, userCred) if err != nil { return errors.Wrap(err, "removeCloudproviders") } err = self.removeCloudgroupcaches(ctx, userCred) if err != nil { return errors.Wrap(err, "removeCloudgroupcaches") } return nil } func (self *SCloudaccount) syncRemoveClouduser(ctx context.Context, userCred mcclient.TokenCredential) error { users, err := self.getCloudusers() if err != nil { return errors.Wrap(err, "getCloudusers") } for i := range users { err = users[i].RealDelete(ctx, userCred) if err != nil { return errors.Wrapf(err, "RealDelete user %s(%s)", users[i].Name, users[i].Id) } } return self.Delete(ctx, userCred) } func (manager *SCloudaccountManager) newFromICloudaccount(ctx context.Context, userCred mcclient.TokenCredential, account *SCloudaccount) (*SCloudaccount, error) { lockman.LockClass(ctx, manager, db.GetLockClassKey(manager, userCred)) defer lockman.ReleaseClass(ctx, manager, db.GetLockClassKey(manager, userCred)) account.SetModelManager(manager, account) err := manager.TableSpec().Insert(ctx, account) if err != nil { return nil, errors.Wrap(err, "Insert") } return account, nil } func (self *SCloudaccount) syncWithICloudaccount(ctx context.Context, userCred mcclient.TokenCredential, account SCloudaccount) error { _, err := db.UpdateWithLock(ctx, self, func() error { self.Name = account.Name self.DomainId = account.DomainId self.Brand = account.Brand self.IamLoginUrl = account.IamLoginUrl self.IsSupportCloudId = account.IsSupportCloudId return nil }) if err != nil { return errors.Wrap(err, "db.UpdateWithLock") } return nil } func (manager *SCloudaccountManager) SyncCloudaccounts(ctx context.Context, userCred mcclient.TokenCredential, isStart bool) { localAccounts, result := manager.syncCloudaccounts(ctx, userCred) log.Infof("SyncCloudaccounts: %s", result.Result()) for i, account := range localAccounts { lockman.LockObject(ctx, &localAccounts[i]) defer lockman.ReleaseObject(ctx, &localAccounts[i]) factory, err := account.GetProviderFactory() if err != nil { log.Errorf("GetProviderFactory: %v", err) continue } if !factory.IsClouduserBelongCloudprovider() { continue } result = account.syncCloudprovider(ctx, userCred) log.Infof("sync cloudprovider for cloudaccount %s(%s) result: %s", account.Name, account.Id, result.Result()) } isCloudacountSynced = true } // 避免第一次启动时,云账号列表为空,子账号及其他资源需要等待一个周期才能同步 func waitForSync(task string) { for isCloudacountSynced == false { log.Debugf("cloudaccount not sync try later do task %s", task) time.Sleep(time.Second * 30) } } func (self SCloudaccount) GetGlobalId() string { return self.Id } func (self SCloudaccount) GetExternalId() string { return self.Id } func (manager *SCloudaccountManager) FetchAccount(ctx context.Context, id string) (*SCloudaccount, error) { account, err := manager.FetchById(id) if err != nil { if err == sql.ErrNoRows { session := auth.GetAdminSession(context.Background(), options.Options.Region, "") result, err := modules.Cloudaccounts.Get(session, id, nil) if err != nil { return nil, errors.Wrap(err, "Cloudaccounts.Get") } _account := &SCloudaccount{} _account.SetModelManager(manager, _account) err = result.Unmarshal(_account) if err != nil { return nil, errors.Wrap(err, "result.Unmarshal") } lockman.LockRawObject(ctx, manager.KeywordPlural(), id) defer lockman.ReleaseRawObject(ctx, manager.KeywordPlural(), id) return _account, manager.TableSpec().InsertOrUpdate(ctx, _account) } return nil, errors.Wrap(err, "manager.FetchById") } return account.(*SCloudaccount), nil } type SCloudDelegate struct { Id string Name string Enabled bool Status string SyncStatus string AccessUrl string Account string Secret string Provider string Brand string ProxySetting proxyapi.SProxySetting } func (self *SCloudaccount) getCloudDelegate(ctx context.Context) (*SCloudDelegate, error) { s := auth.GetAdminSession(ctx, options.Options.Region, "") result, err := modules.Cloudaccounts.Get(s, self.Id, nil) if err != nil { return nil, errors.Wrap(err, "Cloudaccounts.Get") } account := &SCloudDelegate{} err = result.Unmarshal(account) if err != nil { return nil, errors.Wrap(err, "result.Unmarshal") } return account, nil } func (self *SCloudaccount) GetProvider() (cloudprovider.ICloudProvider, error) { delegate, err := self.getCloudDelegate(context.Background()) if err != nil { return nil, errors.Wrap(err, "getCloudDelegate") } return delegate.GetProvider() } func (self *SCloudaccount) GetCloudDelegaes(ctx context.Context) ([]SCloudDelegate, error) { s := auth.GetAdminSession(ctx, options.Options.Region, "") params := map[string]string{"cloudaccount": self.Id} result, err := modules.Cloudproviders.List(s, jsonutils.Marshal(params)) if err != nil { return nil, errors.Wrap(err, "Cloudproviders.List") } providers := []SCloudDelegate{} err = jsonutils.Update(&providers, result.Data) if err != nil { return nil, errors.Wrap(err, "jsonutils.Update") } return providers, nil } func (account *SCloudDelegate) getPassword() (string, error) { return utils.DescryptAESBase64(account.Id, account.Secret) } func (account *SCloudDelegate) getAccessUrl() string { return account.AccessUrl } func (self *SCloudaccount) GetProviderFactory() (cloudprovider.ICloudProviderFactory, error) { return cloudprovider.GetProviderFactory(self.Provider) } func (account *SCloudDelegate) GetProvider() (cloudprovider.ICloudProvider, error) { if !account.Enabled { return nil, errors.Errorf("Cloud account %s is not enabled", account.Name) } accessUrl := account.getAccessUrl() passwd, err := account.getPassword() if err != nil { return nil, err } var proxyFunc httputils.TransportProxyFunc { cfg := &httpproxy.Config{ HTTPProxy: account.ProxySetting.HTTPProxy, HTTPSProxy: account.ProxySetting.HTTPSProxy, NoProxy: account.ProxySetting.NoProxy, } cfgProxyFunc := cfg.ProxyFunc() proxyFunc = func(req *http.Request) (*url.URL, error) { return cfgProxyFunc(req.URL) } } return cloudprovider.GetProvider(cloudprovider.ProviderConfig{ Id: account.Id, Name: account.Name, Vendor: account.Provider, URL: accessUrl, Account: account.Account, Secret: passwd, ProxyFunc: proxyFunc, }) } func (manager *SCloudaccountManager) SyncCloudusers(ctx context.Context, userCred mcclient.TokenCredential, isStart bool) { waitForSync("SyncCloudusersTask") accounts, err := manager.GetCloudaccounts() if err != nil { log.Errorf("GetLocalCloudaccounts: %v", err) return } for i := range accounts { factory, err := accounts[i].GetProviderFactory() if err != nil { continue } if factory.IsSupportClouduser() { err = accounts[i].StartSyncCloudusersTask(ctx, userCred, "") if err != nil { log.Errorf("StartSyncCloudusersTask for account %s(%s) error: %v", accounts[i].Name, accounts[i].Provider, err) } } } } func (self *SCloudaccount) StartSyncCloudusersTask(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string) error { params := jsonutils.NewDict() task, err := taskman.TaskManager.NewTask(ctx, "SyncCloudusersTask", self, userCred, params, parentTaskId, "", nil) if err != nil { return errors.Wrap(err, "NewTask") } task.ScheduleRun(nil) return nil } func (self *SCloudaccount) StartSyncCloudgroupsTask(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string) error { params := jsonutils.NewDict() task, err := taskman.TaskManager.NewTask(ctx, "SyncCloudgroupsTask", self, userCred, params, parentTaskId, "", nil) if err != nil { return errors.Wrap(err, "NewTask") } task.ScheduleRun(nil) return nil } func (self *SCloudaccount) StartSyncCloudpoliciesTask(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string) error { params := jsonutils.NewDict() task, err := taskman.TaskManager.NewTask(ctx, "SyncCloudpoliciesTask", self, userCred, params, parentTaskId, "", nil) if err != nil { return errors.Wrap(err, "NewTask") } task.ScheduleRun(nil) return nil } func (self *SCloudaccount) getCloudusers() ([]SClouduser, error) { users := []SClouduser{} q := ClouduserManager.Query().Equals("cloudaccount_id", self.Id) err := db.FetchModelObjects(ClouduserManager, q, &users) if err != nil { return nil, errors.Wrap(err, "db.FetchModelObjects") } return users, nil } func (self *SCloudaccount) GetCloudusersByProviderId(cloudproviderId string) ([]SClouduser, error) { users := []SClouduser{} q := ClouduserManager.Query().Equals("status", api.CLOUD_USER_STATUS_AVAILABLE).Equals("cloudaccount_id", self.Id) if len(cloudproviderId) > 0 { q = q.Equals("cloudprovider_id", cloudproviderId) } err := db.FetchModelObjects(ClouduserManager, q, &users) if err != nil { return nil, errors.Wrap(err, "db.FetchModelObjects") } return users, nil } func (self *SCloudaccount) SyncCloudusers(ctx context.Context, userCred mcclient.TokenCredential, cloudproviderId string, iUsers []cloudprovider.IClouduser) ([]SClouduser, []cloudprovider.IClouduser, compare.SyncResult) { result := compare.SyncResult{} dbUsers, err := self.GetCloudusersByProviderId(cloudproviderId) if err != nil { result.Error(errors.Wrap(err, "GetCloudusersByProviderId")) return nil, nil, result } localUsers := []SClouduser{} remoteUsers := []cloudprovider.IClouduser{} removed := make([]SClouduser, 0) commondb := make([]SClouduser, 0) commonext := make([]cloudprovider.IClouduser, 0) added := make([]cloudprovider.IClouduser, 0) err = compare.CompareSets(dbUsers, iUsers, &removed, &commondb, &commonext, &added) if err != nil { result.Error(errors.Wrap(err, "compare.CompareSets")) return nil, nil, result } for i := 0; i < len(removed); i++ { if len(removed[i].ExternalId) > 0 { err = removed[i].RealDelete(ctx, userCred) if err != nil { result.AddError(err) continue } result.Delete() } } for i := 0; i < len(commondb); i++ { err = commondb[i].SyncWithClouduser(ctx, userCred, commonext[i], cloudproviderId) if err != nil { result.UpdateError(err) continue } localUsers = append(localUsers, commondb[i]) remoteUsers = append(remoteUsers, commonext[i]) result.Update() } for i := 0; i < len(added); i++ { if added[i].GetName() != cloudprovider.TEST_CLOUDID_USER_NAME { user, err := ClouduserManager.newFromClouduser(ctx, userCred, added[i], self.Id, cloudproviderId) if err != nil { result.AddError(err) continue } localUsers = append(localUsers, *user) remoteUsers = append(remoteUsers, added[i]) result.Add() } } return localUsers, remoteUsers, result } func (self *SCloudaccount) GetCloudpolicies() ([]SCloudpolicy, error) { q := CloudpolicyManager.Query().Equals("provider", self.Provider) policies := []SCloudpolicy{} err := db.FetchModelObjects(CloudpolicyManager, q, &policies) if err != nil { return nil, errors.Wrap(err, "db.FetchModelObjects") } return policies, nil } func (self *SCloudaccount) SyncCloudpolicies(ctx context.Context, userCred mcclient.TokenCredential, iPolicies []cloudprovider.ICloudpolicy) compare.SyncResult { result := compare.SyncResult{} dbPolicies, err := self.GetCloudpolicies() if err != nil { result.Error(errors.Wrap(err, "GetCloudpolicies")) return result } removed := make([]SCloudpolicy, 0) commondb := make([]SCloudpolicy, 0) commonext := make([]cloudprovider.ICloudpolicy, 0) added := make([]cloudprovider.ICloudpolicy, 0) err = compare.CompareSets(dbPolicies, iPolicies, &removed, &commondb, &commonext, &added) if err != nil { result.Error(errors.Wrap(err, "compare.CompareSets")) return result } for i := 0; i < len(removed); i++ { err = removed[i].Delete(ctx, userCred) if err != nil { result.AddError(err) continue } result.Delete() } for i := 0; i < len(commondb); i++ { err = commondb[i].SyncWithCloudpolicy(ctx, userCred, commonext[i]) if err != nil { result.UpdateError(err) continue } result.Update() } for i := 0; i < len(added); i++ { _, err := CloudpolicyManager.newFromCloudpolicy(ctx, userCred, added[i], self.Provider) if err != nil { result.AddError(err) continue } result.Add() } return result } func (self *SCloudaccount) GetCloudproviders() ([]SCloudprovider, error) { q := CloudproviderManager.Query().Equals("cloudaccount_id", self.Id) providers := []SCloudprovider{} err := db.FetchModelObjects(CloudproviderManager, q, &providers) if err != nil { return nil, errors.Wrap(err, "db.FetchModelObjects") } return providers, nil } func (self *SCloudaccount) GetICloudprovider() ([]SCloudprovider, error) { s := auth.GetAdminSession(context.Background(), options.Options.Region, "") params := map[string]string{"cloudaccount": self.Id} result, err := modules.Cloudproviders.List(s, jsonutils.Marshal(params)) if err != nil { return nil, errors.Wrap(err, "Cloudproviders.List") } providers := []SCloudprovider{} err = jsonutils.Update(&providers, result.Data) if err != nil { return nil, errors.Wrap(err, "jsonutils.Update") } return providers, nil } func (self *SCloudaccount) syncCloudprovider(ctx context.Context, userCred mcclient.TokenCredential) compare.SyncResult { result := compare.SyncResult{} providers, err := self.GetICloudprovider() if err != nil { result.Error(errors.Wrap(err, "GetRegionCloudprovider")) return result } dbProviders, err := self.GetCloudproviders() if err != nil { result.Error(errors.Wrap(err, "GetCloudproviders")) return result } removed := make([]SCloudprovider, 0) commondb := make([]SCloudprovider, 0) commonext := make([]SCloudprovider, 0) added := make([]SCloudprovider, 0) err = compare.CompareSets(dbProviders, providers, &removed, &commondb, &commonext, &added) if err != nil { result.Error(errors.Wrap(err, "compare.CompareSets")) return result } for i := 0; i < len(removed); i++ { err = removed[i].syncRemoveClouduser(ctx, userCred) if err != nil { result.AddError(err) continue } result.Delete() } for i := 0; i < len(commondb); i++ { err = commondb[i].syncWithRegionProvider(ctx, userCred, commonext[i]) if err != nil { result.UpdateError(err) continue } result.Update() } for i := 0; i < len(added); i++ { err = CloudproviderManager.newFromRegionProvider(ctx, userCred, added[i]) if err != nil { result.AddError(err) continue } result.Add() } return result } func (self *SCloudaccount) GetCloudgroups() ([]SCloudgroup, error) { groups := []SCloudgroup{} q := CloudgroupManager.Query().Equals("provider", self.Provider).Equals("domain_id", self.DomainId) err := db.FetchModelObjects(CloudgroupManager, q, &groups) if err != nil { return nil, errors.Wrap(err, "db.FetchModelObjects") } return groups, nil } func (self *SCloudaccount) GetCloudgroupcaches() ([]SCloudgroupcache, error) { caches := []SCloudgroupcache{} q := CloudgroupcacheManager.Query().Equals("cloudaccount_id", self.Id) err := db.FetchModelObjects(CloudgroupcacheManager, q, &caches) if err != nil { return nil, errors.Wrap(err, "db.FetchModelObjects") } return caches, nil } func (manager *SCloudaccountManager) GetSupportCreateCloudgroupAccounts() ([]SCloudaccount, error) { accounts := []SCloudaccount{} q := manager.Query().In("provider", cloudprovider.GetSupportCloudgroupProviders()) err := db.FetchModelObjects(manager, q, &accounts) if err != nil { return nil, errors.Wrap(err, "db.FetchModelObjects") } return accounts, nil } func (manager *SCloudaccountManager) SyncCloudpolicies(ctx context.Context, userCred mcclient.TokenCredential, isStart bool) { waitForSync("SyncCloudpoliciesTask") accounts, err := manager.GetCloudaccounts() if err != nil { log.Errorf("GetCloudaccounts error: %v", err) return } for i := range accounts { err = accounts[i].StartSyncCloudpolicyTask(ctx, userCred, "") if err != nil { log.Errorf("StartSyncCloudpolicyTask for account %s(%s) error: %v", accounts[i].Name, accounts[i].Provider, err) } } } func (self *SCloudaccount) StartSyncCloudpolicyTask(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string) error { params := jsonutils.NewDict() task, err := taskman.TaskManager.NewTask(ctx, "SyncCloudpoliciesTask", self, userCred, params, parentTaskId, "", nil) if err != nil { return errors.Wrap(err, "NewTask") } task.ScheduleRun(nil) return nil } func (manager *SCloudaccountManager) SyncCloudgroups(ctx context.Context, userCred mcclient.TokenCredential, isStart bool) { waitForSync("SyncCloudgroupsTask") accounts, err := manager.GetSupportCreateCloudgroupAccounts() if err != nil { log.Errorf("GetSupportCreateCloudgroupAccounts error: %v", err) return } for i := range accounts { err = accounts[i].StartSyncCloudgroupcacheTask(ctx, userCred, "") if err != nil { log.Errorf("StartSyncCloudgroupcacheTask for account %s(%s) error: %v", accounts[i].Name, accounts[i].Provider, err) } } } func (self *SCloudaccount) StartSyncCloudgroupcacheTask(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string) error { params := jsonutils.NewDict() task, err := taskman.TaskManager.NewTask(ctx, "SyncCloudgroupcachesTask", self, userCred, params, parentTaskId, "", nil) if err != nil { return errors.Wrap(err, "NewTask") } task.ScheduleRun(nil) return nil } func (self *SCloudaccount) SyncCloudgroupcaches(ctx context.Context, userCred mcclient.TokenCredential, iGroups []cloudprovider.ICloudgroup) compare.SyncResult { result := compare.SyncResult{} dbCaches, err := self.GetCloudgroupcaches() if err != nil { result.Error(errors.Wrap(err, "GetCloudgroupcaches")) return result } removed := make([]SCloudgroupcache, 0) commondb := make([]SCloudgroupcache, 0) commonext := make([]cloudprovider.ICloudgroup, 0) added := make([]cloudprovider.ICloudgroup, 0) err = compare.CompareSets(dbCaches, iGroups, &removed, &commondb, &commonext, &added) if err != nil { result.Error(errors.Wrap(err, "compare.CompareSets")) return result } for i := 0; i < len(removed); i++ { if len(removed[i].ExternalId) > 0 { // 只删除云上已经删除过的组 err = removed[i].RealDelete(ctx, userCred) if err != nil { result.DeleteError(err) continue } result.Delete() } } for i := 0; i < len(commondb); i++ { err = commondb[i].syncWithCloudgrup(ctx, userCred, commonext[i]) if err != nil { result.UpdateError(err) continue } result.Update() } for i := 0; i < len(added); i++ { _, err := self.newCloudgroup(ctx, userCred, added[i]) if err != nil { result.AddError(err) continue } result.Add() } return result } func (self *SCloudaccount) newCloudgroup(ctx context.Context, userCred mcclient.TokenCredential, iGroup cloudprovider.ICloudgroup) (*SCloudgroupcache, error) { group, err := self.GetOrCreateCloudgroup(ctx, userCred, iGroup) if err != nil { return nil, errors.Wrap(err, "GetOrCreateCloudgroup") } cache, err := CloudgroupcacheManager.newFromCloudgroup(ctx, userCred, iGroup, group, self.Id) if err != nil { return nil, errors.Wrap(err, "newFromCloudgroup") } return cache, nil } func (self *SCloudaccount) GetOrCreateCloudgroup(ctx context.Context, userCred mcclient.TokenCredential, iGroup cloudprovider.ICloudgroup) (*SCloudgroup, error) { groups, err := self.GetCloudgroups() if err != nil { return nil, errors.Wrap(err, "GetCloudgroups") } iPolicies, err := iGroup.GetISystemCloudpolicies() if err != nil { return nil, errors.Wrap(err, "GetICloudpolicies") } for i := range iPolicies { _, err := db.FetchByExternalId(CloudpolicyManager, iPolicies[i].GetGlobalId()) if err == nil { continue } if errors.Cause(err) != sql.ErrNoRows { return nil, errors.Wrapf(err, "db.FetchByExternalId(%s)", iPolicies[i].GetGlobalId()) } _, err = CloudpolicyManager.newFromCloudpolicy(ctx, userCred, iPolicies[i], self.Provider) if err != nil { return nil, errors.Wrap(err, "newFromCloudpolicy") } } for i := range groups { isEqual, err := groups[i].IsEqual(iPolicies) if err != nil { return nil, errors.Wrap(err, "IsEqual") } if isEqual { return &groups[i], nil } } group, err := CloudgroupManager.newCloudgroup(ctx, userCred, iGroup, self.Provider, self.DomainId) if err != nil { return nil, errors.Wrap(err, "newCloudgroup") } for i := range iPolicies { err = group.attachPolicyFromCloudpolicy(ctx, userCred, iPolicies[i]) if err != nil { return nil, errors.Wrap(err, "attachPolicyFromCloudpolicy") } } return group, nil }
package main import ( "bytes" "encoding/json" "errors" "io/ioutil" "log" "net/http" "net/http/cookiejar" "net/url" "strings" "time" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) // // key -> task // type Tasks map[string]*Task // func (vis Tasks) Get(key string) *Task { // return vis[key] // } // func (vis Tasks) Set(key string, vi *Task) { // vis[key] = vi // } // func (vis Tasks) Del(key string) { // delete(vis, key) // } type Task struct { Id bson.ObjectId `bson:"_id"` Key string `bson:"key"` // 可以唯一标识一个投票的 TODO Status string `bson:"status"` // 任务状态:prepare,doing,finished Url string `bson:"url"` // 短URL Supervoteid string `bson:"supervoteid"` Info map[string]interface{} `bson:"info"` // 投票信息。包括活动标题、到期时间、投票对象等 // Info string `bson:"info"` Item string `bson:"item"` // Item map[string]interface{} `bson:"item"` // 投的对象 User string `bson:"user"` // 下发任务的用户名 Votes uint64 `bson:"votes"` // 目标票数 Price float64 `bson:"price"` // 单价,单位是元/票 Speed uint64 `bson:"speed"` // TODO 暂未使用。每分钟的票数 CurVotes uint64 `bson:"curvotes"` // 当前已成功的票数 AlreadyVotes uint64 `bson:"alreadyvotes"` // 已经使用的账号数 RunnerCount int `bson:"runnercount"` // 在运行的runner数量 CreateTime time.Time `bson:"createtime"` FinishTime time.Time `bsoin:"finishtime"` } func GetKeyFromUrl(voteUrl string) string { // 解析longUrl中的参数 u, err := url.Parse(voteUrl) if err != nil { log.Printf("getKeyFromUrl: parse url error: %v", err) return "" } values := u.Query() if values.Get("__biz") == "" { log.Printf("getKeyFromUrl: __biz is empty") return "" } key := "__biz=" + values.Get("__biz") + "&mid=" + values.Get("mid") + "&idx=" + values.Get("idx") + "&sn=" + values.Get("sn") return key } func NewTask(shortOrLongUrl string) (*Task, error) { log.Printf("NewTask inputUrl: %v", shortOrLongUrl) // 设置cookiejar jar, err := cookiejar.New(nil) if err != nil { log.Printf("cookiejar.New() error: %v", err) return nil, err } client := &http.Client{ Jar: jar, } vi := &Task{ Url: strings.Replace(shortOrLongUrl, "https:", "http:", 1), Status: "prepare", } resp, err := client.Get(vi.Url) if err != nil { log.Printf("get shorturl error: %v", err) return nil, err } defer resp.Body.Close() resBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Printf("read body error: %v", err) return nil, err } // 可能是短连接,这时需要拿到长连接(不需请求,直接parse拿到参数即可) if strings.Contains(shortOrLongUrl, "/s/") { vi.Url = string(getByBound(resBody, []byte(`var msg_link = "`), []byte(`";`))) vi.Url = strings.Replace(vi.Url, "https:", "http:", 1) vi.Url = strings.Replace(vi.Url, `\x26amp;`, `&`, -1) log.Printf("longurl: %v", vi.Url) if vi.Url == "" { log.Printf("get longurl error") return nil, errors.New("get longurl error") } } vi.Supervoteid = string(getByBound(resBody, []byte(`supervoteid=`), []byte(`&`))) // log.Printf("supervoteid: %v", vi.Supervoteid) if vi.Supervoteid == "" { log.Printf("supervoteid is empty. maybe url is invalid") return nil, errors.New("supervoteid is empty. maybe url is invalid") } // 解析longUrl中的参数 u, err := url.Parse(vi.Url) if err != nil { log.Printf("parse url error: %v", err) return nil, err } values := u.Query() vi.Key = "__biz=" + values.Get("__biz") + "&mid=" + values.Get("mid") + "&idx=" + values.Get("idx") + "&sn=" + values.Get("sn") // 获取投票信息 values.Set("supervoteid", vi.Supervoteid) values.Set("action", "show") showUrl := getNewappmsgvoteShowUrl(values) // log.Printf("showUrl: %v", showUrl) resp, err = client.Get(showUrl) if err != nil { log.Printf("getNewappmsgvoteShowUrl error: %v", err) return nil, err } defer resp.Body.Close() resBody, err = ioutil.ReadAll(resp.Body) if err != nil { log.Printf("read body 2 error: %v", err) return nil, err } // voteInfoStr := string(getByBound(resBody, []byte(`var voteInfo=`), []byte(`;`))) voteInfoBytes := getByBound(resBody, []byte(`var voteInfo=`), []byte(`;`)) // log.Printf("voteInfoStr: %v ...", string(voteInfoBytes[:60])) vi.Info = make(map[string]interface{}) // err = json.Unmarshal([]byte(voteInfoStr), &vi.Info) err = jsonUnmarshal(voteInfoBytes, &vi.Info) log.Printf("task title: %v", vi.Info["title"]) if err != nil { log.Printf("json.Unmarshal voteInfo error: %v", err) return nil, err } // TODO 保存key,传到前端。后面下发任务时再传回来 // vi.Info["key"] = vi.Key return vi, nil } func (vi *Task) NewVoter(voteUrl string) (*Voter, error) { // log.Printf("NewVoter():") voteUrl = strings.Replace(voteUrl, "https:", "http:", 1) // 设置cookiejar jar, err := cookiejar.New(nil) if err != nil { log.Printf("cookiejar.New() error: %v", err) return nil, err } // 解析其他参数 u, err := url.Parse(voteUrl) if err != nil { log.Printf("parse url error: %v", err) return nil, err } return &Voter{ url: voteUrl, client: &http.Client{ Jar: jar, }, values: u.Query(), Info: vi, }, nil } func (vi *Task) Insert() error { return MgoInsert("weipiao", "task", vi) } // 给前端输出的任务信息 func tasksToArray(voteInfos []*Task) []map[string]interface{} { tasks := []map[string]interface{}{} for _, info := range voteInfos { task := map[string]interface{}{} task["title"] = info.Info["title"] task["votes"] = info.Votes task["curvotes"] = info.CurVotes task["alreadyvotes"] = info.AlreadyVotes task["status"] = info.Status task["createtime"] = info.CreateTime.Format("2006-01-02 15:04:05") task["finishtime"] = info.FinishTime.Format("2006-01-02 15:04:05") tasks = append(tasks, task) } return tasks } // 提交任务 // func (vi *Task) Submit() error { // // update votes/item等字段 TODO // vi.CreateTime = time.Now() // return MgoInsert("weipiao", "task", vi) // } func QueryTasksByUser(username string) ([]*Task, error) { var task []*Task // err := MgoFind("weipiao", "task", bson.M{"user": username}, &task) session := mongoSession.Clone() defer session.Close() c := session.DB("weipiao").C("task") // 按照createtime逆序 err := c.Find(bson.M{"user": username}).Sort("-createtime").All(&task) if err != nil { log.Printf("MgoFind(task) error: %v", err) return nil, err } return task, nil } func QueryTaskById(taskId string) (*Task, error) { // log.Printf("QueryTaskById(): %v", taskId) var tasks []*Task err := MgoFind("weipiao", "task", bson.M{"_id": bson.ObjectIdHex(taskId)}, &tasks) if err != nil { log.Printf("MgoFind(task) error: %v", err) return nil, err } if len(tasks) == 0 { return nil, errors.New("task not found by key") } return tasks[0], nil } func QueryTaskByKey(key string) (*Task, error) { var task []*Task err := MgoFind("weipiao", "task", bson.M{"key": key, "status": "doing"}, &task) if err != nil { log.Printf("MgoFind(task) error: %v", err) return nil, err } if len(task) == 0 { return nil, errors.New("task not found by key") } vi := task[0] // 领任务时票数+1。后面如果投失败,则-1 vi.IncrVotes() return vi, nil } func QueryTaskBySuperVoteId(supervoteid string) (*Task, error) { var task []*Task err := MgoFind("weipiao", "task", bson.M{"supervoteid": supervoteid}, &task) if err != nil { log.Printf("MgoFind(task) error: %v", err) return nil, err } if len(task) == 0 { return nil, errors.New("task not found: supervoteid: " + supervoteid) } return task[0], nil } func (vi *Task) IncrVotes() error { // log.Printf("vi.IncrVotes") err := MgoUpdate("weipiao", "task", bson.M{"_id": vi.Id}, bson.M{"$inc": bson.M{"curvotes": 1}}) if err != nil { log.Printf("mgoupdate incr curvotes error: %v", err) return err } err = MgoUpdate("weipiao", "task", bson.M{"_id": vi.Id}, bson.M{"$inc": bson.M{"alreadyvotes": 1}}) if err != nil { log.Printf("mgoupdate incr alreadyvotes error: %v", err) return err } vi.AlreadyVotes += 1 vi.CurVotes += 1 if vi.CurVotes < vi.Votes { return nil } vi.Status = "success" // log.Printf("task status: success, %v", vi.Id) return MgoUpdate("weipiao", "task", bson.M{"_id": vi.Id}, bson.M{"$set": bson.M{"status": vi.Status}}) // return MgoUpdate("weipiao", "task", bson.M{"key": vi.Key}, bson.M{"$set": bson.M{"curvotes": vi.CurVotes, "status": vi.Status}}) } func (vi *Task) DecrVotes() error { // log.Printf("vi.DecrVotes") err := MgoUpdate("weipiao", "task", bson.M{"_id": vi.Id}, bson.M{"$inc": bson.M{"curvotes": -1}}) if err != nil { log.Printf("mgoupdate decr curvotes error: %v", err) return err } // 不需要减掉alreadyvotes vi.CurVotes -= 1 if vi.CurVotes >= vi.Votes { return nil } vi.Status = "doing" // log.Printf("task status: doing, %v", vi.Id) return MgoUpdate("weipiao", "task", bson.M{"_id": vi.Id}, bson.M{"$set": bson.M{"status": vi.Status}}) // return MgoUpdate("weipiao", "task", bson.M{"key": vi.Key}, bson.M{"$set": bson.M{"curvotes": vi.CurVotes, "status": vi.Status}}) } func (vi *Task) SetFinishTime(finishtime time.Time) error { vi.FinishTime = finishtime return MgoUpdate("weipiao", "task", bson.M{"_id": vi.Id}, bson.M{"$set": bson.M{"finishtime": vi.FinishTime}}) } func (vi *Task) SetStatus(status string) error { vi.Status = status // return MgoUpdate("weipiao", "task", bson.M{"key": vi.Key}, bson.M{"$set": bson.M{"status": vi.Status}}) return MgoUpdate("weipiao", "task", bson.M{"_id": vi.Id}, bson.M{"$set": bson.M{"status": vi.Status}}) } func (task *Task) DecrRunnerCount() error { // 原子-1并返回内容 session := mongoSession.Clone() defer session.Close() c := session.DB("weipiao").C("task") change := mgo.Change{ Update: bson.M{"$inc": bson.M{"runnercount": -1}}, ReturnNew: true, } // var task2 Task _, err := c.Find(bson.M{"_id": bson.ObjectId(task.Id)}).Apply(change, &task) if err != nil { log.Printf("DecrRunnerCount change error: %v", err) return err } // log.Printf("still runnercount: %v", task.RunnerCount) // err := MgoUpdate("weipiao", "task", bson.M{"_id": bson.ObjectId(task.Id)}, bson.M{"$inc": bson.M{"runnercount": -1}}) // if err != nil { // log.Printf("DecrRunnerCount error: %v", err) // return -1 // } // task2, err := QueryTaskById(task.Id.Hex()) // if err != nil { // log.Printf("QueryTaskById error: %v", err) // return -1 // } return nil } func (task *Task) SetRunnerCount(count int) error { return MgoUpdate("weipiao", "task", bson.M{"_id": task.Id}, bson.M{"$set": bson.M{"runnercount": count}}) } func jsonUnmarshal(data []byte, v interface{}) error { d := json.NewDecoder(bytes.NewReader(data)) d.UseNumber() return d.Decode(v) } // // 一个PC完成后根据url做调整 // func TaskDispatch(voteUrl string) error { // log.Printf("TaskDispatch: url: %v", voteUrl) // var tasks []*Task // err := MgoFind("weipiao", "task", bson.M{"url": voteUrl, "status": "doing"}, &tasks) // if err != nil { // log.Printf("MgoFind(task) error: %v", err) // return err // } // if len(tasks) == 0 { // return errors.New("task not found: url: " + voteUrl) // } // task := tasks[0] // if task.Status != "doing" { // return nil // } // if task.Status == "doing" { // r := GetFreeRunner(task.Key) // TODO 还是要保证db.task中记录是有key的 // if r == nil { // log.Printf("TaskDispatch: no free runner: %v", task.Key) // return errors.New("TaskDispatch: no free runner") // } // r.DispatchTask(task) // } // return nil // }
package filemeta import ( "easyfiler/pkg/db" ) type FileMeta struct { FileSha1 string FileMD5 string FileName string FileSize int64 Location string UploadAt string } func UpdateFileMeta(f FileMeta) bool { return db.UploadAndFinished(f.FileSha1, f.FileName, f.Location, f.FileSize) } func GetFileMeta(filesha1 string) (filemeta FileMeta, err error) { tablefile, err := db.GetFileMeta(filesha1) if err != nil { return FileMeta{}, err } filemeta = FileMeta{ FileSha1: tablefile.FileHash, FileName: tablefile.FileName.String, FileSize: tablefile.FileSize.Int64, Location: tablefile.FileAddr.String, } return } func DelteFileMeta(fileSha1 string) { db.DeleteFileMeta(fileSha1) }
// package k8s contains check implementations that rely on interacting with // Kubernetes API server. package k8s import ( "github.com/redhat-openshift-ecosystem/openshift-preflight/certification/internal/shell" "github.com/redhat-openshift-ecosystem/openshift-preflight/cli" ) // Create a package-level openshiftEngine variable, that can be overridden // at the test level. var ( openshiftEngine cli.OpenshiftEngine podmanEngine cli.PodmanEngine ) func init() { openshiftEngine = OpenshiftEngine{} podmanEngine = shell.PodmanCLIEngine{} }
package openrtb_ext type ImpExtPangle struct { Token string `json:"token"` AppID string `json:"appid,omitempty"` PlacementID string `json:"placementid,omitempty"` }
package menu import "github.com/google/uuid" type Manager struct { menuItems map[string]*Item } func (m *Manager) AddItem(Title string, ParentMenuID string) *Item { if m.menuItems == nil { m.menuItems = make(map[string]*Item) } menuItem := Item{ ID: uuid.New(), Title: Title, Children: make(map[string]*Item), } if ParentMenuID != "" { m.addItemParent(&menuItem, ParentMenuID) } m.menuItems[menuItem.ID.String()] = &menuItem return &menuItem } func (m *Manager) GetItems() map[string]*Item { return m.menuItems } func (m *Manager) GetItem(MenuID string) *Item { return m.menuItems[MenuID] } func (m *Manager) addItemParent(MenuItem *Item, ParentMenuID string) { if ParentMenuItem, ok := m.menuItems[ParentMenuID]; ok { MenuItem.Parent = ParentMenuItem ParentMenuItem.Children[MenuItem.ID.String()] = MenuItem } } func (m Manager) Format(formatter Formatter) { formatter.Format(m.menuItems) }
package directmessages import ( "encoding/json" "io/ioutil" "testing" ) func TestItUnmarchalsMessageCreateJson(t *testing.T) { data, err := ioutil.ReadFile("testdata/messagecreate.json") if err != nil { t.Error("Failed to parse messagecreate.json", err) } var messageCreate MessageCreate json.Unmarshal(data, &messageCreate) if messageCreate.SenderID != "SenderId" { t.Error("SenderID expected", "SenderId", "got", messageCreate.SenderID) } if messageCreate.Target.RecipientID != "RecipientId" { t.Error("Target RecipientID expected", "RecipientId", "got", messageCreate.Target.RecipientID) } if messageCreate.MessageData.Text != "Some Text" { t.Error("MessageData Text expected", "Some Text", "got", messageCreate.MessageData.Text) } }
//if the number is divisible by 3 say "fizz", if divisible by 5, say "buzz", if by both say "fizz buzz" package main import ( "fmt" ) func main() { fmt.Println("===========Start============") i := 0 for { i++ switch { case (i%3 == 0 && i%5 == 0): fmt.Println("fizz buzz") case i%5 == 0: fmt.Println("buzz") case i%3 == 0: fmt.Println("fizz") default: fmt.Println("--------------------") } if i == 20 { break } } }
// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package selinux import ( "context" "chromiumos/tast/testing" ) // FileTestCase specifies a single test case for files to test for SELinux labels // Files should have been labeled by platform2/sepolicy/file_contexts/ or // platform2/sepolicy/policy/*/genfs_contexts with a few exceptions. // Exceptions include: // - type_transition rule to default assign a label for files created // under some condition. // - mv/cp files without preserving original labels but inheriting // labels from new parent directory (e.g. /var/log/mount-encrypted.log) type FileTestCase struct { Path string // absolute file path Context string // expected SELinux file context Recursive bool Filter FileLabelCheckFilter IgnoreErrors bool Log bool } // FilesTestInternal runs the test suite for SELinuxFilesSystem(Informational)? func FilesTestInternal(ctx context.Context, s *testing.State, testCases []FileTestCase) { for _, testCase := range testCases { filter := testCase.Filter if filter == nil { filter = CheckAll } expected, err := FileContextRegexp(testCase.Context) if err != nil { s.Errorf("Failed to compile expected context %q: %v", testCase.Context, err) continue } CheckContext(ctx, s, &CheckContextReq{ Path: testCase.Path, Expected: expected, Recursive: testCase.Recursive, Filter: filter, IgnoreErrors: testCase.IgnoreErrors, Log: testCase.Log, }) } }
package util import ( "io/ioutil" "strings" "time" ) // TimeOut general timeout var TimeOut = time.Duration(5 * time.Second) // fileToStringList generate list from a dic file func fileToStringList(path string) (strList []string, err error) { byteContent, err := ioutil.ReadFile(path) if err != nil { return } strContent := strings.TrimSpace(string(byteContent)) for _, line := range strings.Split(strContent, "\n") { lineStrip := strings.TrimSpace(line) if lineStrip == "" { continue } strList = append(strList, lineStrip) } return } // GetConfigStrList generate string list via a config value func GetConfigStrList(value string) (strList []string, err error) { if strings.HasPrefix(value, "[file]") { filePath := strings.SplitN(value, ":", 2)[1] strList, err = fileToStringList(filePath) if err != nil { return } return } for _, v := range strings.Split(value, ",") { item := strings.TrimSpace(v) strList = append(strList, item) } return }
package gcb const oauthScope = "https://www.googleapis.com/auth/cloud-platform" const triggerURL = "https://cloudbuild.googleapis.com/v1/projects/%s/triggers/%s:run"
package main import ( log "github.com/sirupsen/logrus" "lhc.go.game.user/libs/mysql" "lhc.go.game.user/conf" "lhc.go.game.user/router" "lhc.go.game.user/libs/redis" "lhc.go.game.user/utitls/logs" ) func main() { if err := conf.InitConfige();err!=nil{ log.Fatal("加载配置文件失败:%s.....",err) } if err := redis.InitRedis();err!=nil{ log.Fatal("redis启动失败:%s.....",err) } defer redis.Redis.Close() if err := logs.InitLogger();err!=nil{ log.Fatal("logs..初始化失败:%s.....",err) } if err:=mysql.InitMysql();err!=nil{ log.Fatal("mysql启动失败:%s.....",err) } defer mysql.MysqlConnet.Close() r := router.InitRouter() if err := r.Run(":8070");err!=nil{ log.Fatal("服务器启动失败失败:%s.....",err) } }
package odoo import ( "fmt" ) // IrMailServer represents ir.mail_server model. type IrMailServer struct { LastUpdate *Time `xmlrpc:"__last_update,omptempty"` Active *Bool `xmlrpc:"active,omptempty"` CreateDate *Time `xmlrpc:"create_date,omptempty"` CreateUid *Many2One `xmlrpc:"create_uid,omptempty"` DisplayName *String `xmlrpc:"display_name,omptempty"` Id *Int `xmlrpc:"id,omptempty"` Name *String `xmlrpc:"name,omptempty"` Sequence *Int `xmlrpc:"sequence,omptempty"` SmtpDebug *Bool `xmlrpc:"smtp_debug,omptempty"` SmtpEncryption *Selection `xmlrpc:"smtp_encryption,omptempty"` SmtpHost *String `xmlrpc:"smtp_host,omptempty"` SmtpPass *String `xmlrpc:"smtp_pass,omptempty"` SmtpPort *Int `xmlrpc:"smtp_port,omptempty"` SmtpUser *String `xmlrpc:"smtp_user,omptempty"` WriteDate *Time `xmlrpc:"write_date,omptempty"` WriteUid *Many2One `xmlrpc:"write_uid,omptempty"` } // IrMailServers represents array of ir.mail_server model. type IrMailServers []IrMailServer // IrMailServerModel is the odoo model name. const IrMailServerModel = "ir.mail_server" // Many2One convert IrMailServer to *Many2One. func (im *IrMailServer) Many2One() *Many2One { return NewMany2One(im.Id.Get(), "") } // CreateIrMailServer creates a new ir.mail_server model and returns its id. func (c *Client) CreateIrMailServer(im *IrMailServer) (int64, error) { ids, err := c.CreateIrMailServers([]*IrMailServer{im}) if err != nil { return -1, err } if len(ids) == 0 { return -1, nil } return ids[0], nil } // CreateIrMailServer creates a new ir.mail_server model and returns its id. func (c *Client) CreateIrMailServers(ims []*IrMailServer) ([]int64, error) { var vv []interface{} for _, v := range ims { vv = append(vv, v) } return c.Create(IrMailServerModel, vv) } // UpdateIrMailServer updates an existing ir.mail_server record. func (c *Client) UpdateIrMailServer(im *IrMailServer) error { return c.UpdateIrMailServers([]int64{im.Id.Get()}, im) } // UpdateIrMailServers updates existing ir.mail_server records. // All records (represented by ids) will be updated by im values. func (c *Client) UpdateIrMailServers(ids []int64, im *IrMailServer) error { return c.Update(IrMailServerModel, ids, im) } // DeleteIrMailServer deletes an existing ir.mail_server record. func (c *Client) DeleteIrMailServer(id int64) error { return c.DeleteIrMailServers([]int64{id}) } // DeleteIrMailServers deletes existing ir.mail_server records. func (c *Client) DeleteIrMailServers(ids []int64) error { return c.Delete(IrMailServerModel, ids) } // GetIrMailServer gets ir.mail_server existing record. func (c *Client) GetIrMailServer(id int64) (*IrMailServer, error) { ims, err := c.GetIrMailServers([]int64{id}) if err != nil { return nil, err } if ims != nil && len(*ims) > 0 { return &((*ims)[0]), nil } return nil, fmt.Errorf("id %v of ir.mail_server not found", id) } // GetIrMailServers gets ir.mail_server existing records. func (c *Client) GetIrMailServers(ids []int64) (*IrMailServers, error) { ims := &IrMailServers{} if err := c.Read(IrMailServerModel, ids, nil, ims); err != nil { return nil, err } return ims, nil } // FindIrMailServer finds ir.mail_server record by querying it with criteria. func (c *Client) FindIrMailServer(criteria *Criteria) (*IrMailServer, error) { ims := &IrMailServers{} if err := c.SearchRead(IrMailServerModel, criteria, NewOptions().Limit(1), ims); err != nil { return nil, err } if ims != nil && len(*ims) > 0 { return &((*ims)[0]), nil } return nil, fmt.Errorf("ir.mail_server was not found with criteria %v", criteria) } // FindIrMailServers finds ir.mail_server records by querying it // and filtering it with criteria and options. func (c *Client) FindIrMailServers(criteria *Criteria, options *Options) (*IrMailServers, error) { ims := &IrMailServers{} if err := c.SearchRead(IrMailServerModel, criteria, options, ims); err != nil { return nil, err } return ims, nil } // FindIrMailServerIds finds records ids by querying it // and filtering it with criteria and options. func (c *Client) FindIrMailServerIds(criteria *Criteria, options *Options) ([]int64, error) { ids, err := c.Search(IrMailServerModel, criteria, options) if err != nil { return []int64{}, err } return ids, nil } // FindIrMailServerId finds record id by querying it with criteria. func (c *Client) FindIrMailServerId(criteria *Criteria, options *Options) (int64, error) { ids, err := c.Search(IrMailServerModel, criteria, options) if err != nil { return -1, err } if len(ids) > 0 { return ids[0], nil } return -1, fmt.Errorf("ir.mail_server was not found with criteria %v and options %v", criteria, options) }
package optgen import ( "bytes" "fmt" "strings" ) type AcceptFunc func(expr Expr) Expr type Expr interface { Op() Operator Children() []Expr ChildName(pos int) string Value() interface{} Visit(accept AcceptFunc) Expr String() string Format(buf *bytes.Buffer, level int) } type expr struct { op Operator children []Expr value interface{} names map[int]string } func (e *expr) Op() Operator { return e.op } func (e *expr) Children() []Expr { return e.children } func (e *expr) ChildName(pos int) string { return e.names[pos] } func (e *expr) Value() interface{} { return e.value } func (e *expr) visitChildren(accept AcceptFunc) (children []Expr, replaced bool) { for i, child := range e.children { newChild := child.Visit(accept) if child != newChild { if children == nil { children = make([]Expr, len(e.children)) copy(children, e.children) } children[i] = newChild } } if children == nil { children = e.children } else { replaced = true } return } func (e *expr) String() string { var buf bytes.Buffer e.Format(&buf, 0) return buf.String() } func (e *expr) Format(buf *bytes.Buffer, level int) { if e.value != nil { if e.op == StringOp { buf.WriteByte('"') buf.WriteString(e.value.(string)) buf.WriteByte('"') } else if e.op == OpNameOp { buf.WriteString(e.value.(string)) buf.WriteString("Op") } else { buf.WriteString(fmt.Sprintf("%v", e.value)) } return } opName := strings.Title(e.op.String()) opName = opName[:len(opName)-2] if len(e.children) == 0 { buf.WriteByte('(') buf.WriteString(opName) buf.WriteByte(')') return } nested := false for _, child := range e.children { if child.Value() == nil && len(child.Children()) != 0 { nested = true break } } if !nested { buf.WriteByte('(') buf.WriteString(opName) for i, child := range e.children { buf.WriteByte(' ') if i < len(e.names) { buf.WriteString(e.names[i]) buf.WriteByte('=') } child.Format(buf, level) } buf.WriteByte(')') } else { buf.WriteByte('(') buf.WriteString(opName) buf.WriteByte('\n') level++ for i, child := range e.children { writeIndent(buf, level) if i < len(e.names) { buf.WriteString(e.names[i]) buf.WriteByte('=') } child.Format(buf, level) buf.WriteByte('\n') } level-- writeIndent(buf, level) buf.WriteByte(')') } } type RootExpr struct{ expr } func NewRootExpr() *RootExpr { children := []Expr{ NewDefineSetExpr(), NewRuleSetExpr(), } names := map[int]string{0: "Defines", 1: "Rules"} return &RootExpr{expr{op: RootOp, children: children, names: names}} } func (e *RootExpr) Defines() *DefineSetExpr { return e.children[0].(*DefineSetExpr) } func (e *RootExpr) Rules() *RuleSetExpr { return e.children[1].(*RuleSetExpr) } func (e *RootExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&RootExpr{expr{op: RootOp, children: children, names: e.names}}) } return accept(e) } type DefineSetExpr struct{ expr } func NewDefineSetExpr() *DefineSetExpr { return &DefineSetExpr{expr{op: DefineSetOp}} } func (e *DefineSetExpr) All() []Expr { return e.children } func (e *DefineSetExpr) Add(define *DefineExpr) { e.children = append(e.children, define) } func (e *DefineSetExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&DefineSetExpr{expr{op: DefineSetOp, children: children}}) } return accept(e) } type DefineExpr struct{ expr } func NewDefineExpr(name string, tags []string) *DefineExpr { children := []Expr{ NewStringExpr(name), NewTagsExpr(tags), } names := map[int]string{0: "Name", 1: "Tags"} return &DefineExpr{expr{op: DefineOp, children: children, names: names}} } func (e *DefineExpr) Name() string { return e.children[0].(*StringExpr).ValueAsString() } func (e *DefineExpr) Tags() *TagsExpr { return e.children[1].(*TagsExpr) } func (e *DefineExpr) ListField() *DefineFieldExpr { // If list-typed field is present, it will be the last field, or the second // to last field if a private field is present. index := len(e.children) - 1 if e.PrivateField() != nil { index-- } if index < 2 { return nil } defineField := e.children[index].(*DefineFieldExpr) if defineField.IsListType() { return defineField } return nil } func (e *DefineExpr) PrivateField() *DefineFieldExpr { // If private is present, it will be the last field. index := len(e.children) - 1 if index < 2 { return nil } defineField := e.children[index].(*DefineFieldExpr) if defineField.IsPrivateType() { return defineField } return nil } func (e *DefineExpr) Fields() []Expr { return e.children[2:] } func (e *DefineExpr) Add(field *DefineFieldExpr) { e.children = append(e.children, field) } func (e *DefineExpr) HasTag(tag string) bool { for _, elem := range e.Tags().All() { s := elem.(*StringExpr) if s.ValueAsString() == tag { return true } } return false } func (e *DefineExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&DefineExpr{expr{op: DefineOp, children: children, names: e.names}}) } return accept(e) } type DefineFieldExpr struct{ expr } func NewDefineFieldExpr(name, typ string) *DefineFieldExpr { children := []Expr{ NewStringExpr(name), NewStringExpr(typ), } names := map[int]string{0: "Name", 1: "Type"} return &DefineFieldExpr{expr{op: DefineFieldOp, children: children, names: names}} } func (e *DefineFieldExpr) Name() string { return e.children[0].(*StringExpr).ValueAsString() } func (e *DefineFieldExpr) Type() string { return e.children[1].(*StringExpr).ValueAsString() } func (e *DefineFieldExpr) IsExprType() bool { return e.Type() == "Expr" } func (e *DefineFieldExpr) IsListType() bool { return e.Type() == "ExprList" } func (e *DefineFieldExpr) IsPrivateType() bool { typ := e.Type() return typ != "Expr" && typ != "ExprList" } func (e *DefineFieldExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&DefineFieldExpr{expr{op: DefineFieldOp, children: children, names: e.names}}) } return accept(e) } type RuleSetExpr struct{ expr } func NewRuleSetExpr() *RuleSetExpr { return &RuleSetExpr{expr{op: RuleSetOp}} } func (e *RuleSetExpr) All() []Expr { return e.children } func (e *RuleSetExpr) Add(rule *RuleExpr) { e.children = append(e.children, rule) } func (e *RuleSetExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&RuleSetExpr{expr{op: RuleSetOp, children: children}}) } return accept(e) } type RuleExpr struct{ expr } func NewRuleExpr(header *RuleHeaderExpr, match Expr, replace Expr) *RuleExpr { children := []Expr{ header, match, replace, } names := map[int]string{0: "Header", 1: "Match", 2: "Replace"} return &RuleExpr{expr{op: RuleOp, children: children, names: names}} } func (e *RuleExpr) Header() *RuleHeaderExpr { return e.children[0].(*RuleHeaderExpr) } func (e *RuleExpr) Match() Expr { return e.children[1] } func (e *RuleExpr) Replace() Expr { return e.children[2] } func (e *RuleExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&RuleExpr{expr{op: RuleOp, children: children, names: e.names}}) } return accept(e) } type RuleHeaderExpr struct{ expr } func NewRuleHeaderExpr(name string, tags []string) *RuleHeaderExpr { children := []Expr{ NewStringExpr(name), NewTagsExpr(tags), } names := map[int]string{0: "Name", 1: "Tags"} return &RuleHeaderExpr{expr{op: RuleHeaderOp, children: children, names: names}} } func (e *RuleHeaderExpr) Name() string { return e.children[0].(*StringExpr).ValueAsString() } func (e *RuleHeaderExpr) Tags() *TagsExpr { return e.children[1].(*TagsExpr) } func (e *RuleHeaderExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&RuleHeaderExpr{expr{op: RuleHeaderOp, children: children, names: e.names}}) } return accept(e) } type BindExpr struct{ expr } func NewBindExpr(label string, target Expr) *BindExpr { children := []Expr{ NewStringExpr(label), target, } names := map[int]string{0: "Label", 1: "Target"} return &BindExpr{expr{op: BindOp, children: children, names: names}} } func (e *BindExpr) Label() string { return e.children[0].(*StringExpr).ValueAsString() } func (e *BindExpr) Target() Expr { return e.children[1] } func (e *BindExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&BindExpr{expr{op: BindOp, children: children, names: e.names}}) } return accept(e) } type RefExpr struct{ expr } func NewRefExpr(label string) *RefExpr { children := []Expr{ NewStringExpr(label), } names := map[int]string{0: "Label"} return &RefExpr{expr{op: RefOp, children: children, names: names}} } func (e *RefExpr) Label() string { return e.children[0].(*StringExpr).ValueAsString() } func (e *RefExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&RefExpr{expr{op: RefOp, children: children, names: e.names}}) } return accept(e) } type MatchNamesExpr struct{ expr } func NewMatchNamesExpr() *MatchNamesExpr { return &MatchNamesExpr{expr{op: MatchNamesOp}} } func (e *MatchNamesExpr) All() []Expr { return e.children } func (e *MatchNamesExpr) Name(index int) string { return e.children[index].(*StringExpr).ValueAsString() } func (e *MatchNamesExpr) Add(name *StringExpr) { e.children = append(e.children, name) } func (e *MatchNamesExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&MatchNamesExpr{expr{op: MatchNamesOp, children: children, names: e.names}}) } return accept(e) } type MatchFieldsExpr struct{ expr } func NewMatchFieldsExpr(names Expr) *MatchFieldsExpr { children := []Expr{ names, } namesMap := map[int]string{0: "Names"} return &MatchFieldsExpr{expr{op: MatchFieldsOp, children: children, names: namesMap}} } func (e *MatchFieldsExpr) Names() Expr { return e.children[0] } func (e *MatchFieldsExpr) Fields() []Expr { return e.children[1:] } func (e *MatchFieldsExpr) Add(match Expr) { e.children = append(e.children, match) } func (e *MatchFieldsExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&MatchFieldsExpr{expr{op: MatchFieldsOp, children: children, names: e.names}}) } return accept(e) } type MatchAndExpr struct{ expr } func NewMatchAndExpr(left, right Expr) *MatchAndExpr { return &MatchAndExpr{expr{op: MatchAndOp, children: []Expr{left, right}}} } func (e *MatchAndExpr) Left() Expr { return e.children[0] } func (e *MatchAndExpr) Right() Expr { return e.children[1] } func (e *MatchAndExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&MatchAndExpr{expr{op: MatchAndOp, children: children}}) } return accept(e) } type MatchInvokeExpr struct{ expr } func NewMatchInvokeExpr(funcName string) *MatchInvokeExpr { children := []Expr{ NewStringExpr(funcName), } names := map[int]string{0: "FuncName"} return &MatchInvokeExpr{expr{op: MatchInvokeOp, children: children, names: names}} } func (e *MatchInvokeExpr) FuncName() string { return e.children[0].(*StringExpr).ValueAsString() } func (e *MatchInvokeExpr) Args() []Expr { return e.children[1:] } func (e *MatchInvokeExpr) Add(match Expr) { e.children = append(e.children, match) } func (e *MatchInvokeExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&MatchInvokeExpr{expr{op: MatchInvokeOp, children: children, names: e.names}}) } return accept(e) } type MatchNotExpr struct{ expr } func NewMatchNotExpr(input Expr) *MatchNotExpr { return &MatchNotExpr{expr{op: MatchNotOp, children: []Expr{input}}} } func (e *MatchNotExpr) Input() Expr { return e.children[0] } func (e *MatchNotExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&MatchNotExpr{expr{op: MatchNotOp, children: children}}) } return accept(e) } type MatchAnyExpr struct{ expr } var matchAnySingleton = &MatchAnyExpr{expr{op: MatchAnyOp}} func NewMatchAnyExpr() *MatchAnyExpr { return matchAnySingleton } func (e *MatchAnyExpr) Visit(accept AcceptFunc) Expr { return accept(e) } type MatchListExpr struct{ expr } func NewMatchListExpr(matchItem Expr) *MatchListExpr { return &MatchListExpr{expr{op: MatchListOp, children: []Expr{matchItem}}} } func (e *MatchListExpr) MatchItem() Expr { return e.children[0] } func (e *MatchListExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&MatchListExpr{expr{op: MatchListOp, children: children, names: e.names}}) } return accept(e) } type ReplaceRootExpr struct{ expr } func NewReplaceRootExpr() *ReplaceRootExpr { return &ReplaceRootExpr{expr{op: ReplaceRootOp}} } func (e *ReplaceRootExpr) All() []Expr { return e.children } func (e *ReplaceRootExpr) Add(replace Expr) { e.children = append(e.children, replace) } func (e *ReplaceRootExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&ReplaceRootExpr{expr{op: ReplaceRootOp, children: children, names: e.names}}) } return accept(e) } type ConstructExpr struct{ expr } func NewConstructExpr(opName Expr) *ConstructExpr { children := []Expr{ opName, } names := map[int]string{0: "OpName"} return &ConstructExpr{expr{op: ConstructOp, children: children, names: names}} } func (e *ConstructExpr) OpName() Expr { return e.children[0] } func (e *ConstructExpr) Args() []Expr { return e.children[1:] } func (e *ConstructExpr) Add(arg Expr) { e.children = append(e.children, arg) } func (e *ConstructExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&ConstructExpr{expr{op: ConstructOp, children: children, names: e.names}}) } return accept(e) } type ConstructListExpr struct{ expr } func NewConstructListExpr() *ConstructListExpr { return &ConstructListExpr{expr{op: ConstructListOp}} } func (e *ConstructListExpr) Add(item Expr) { e.children = append(e.children, item) } func (e *ConstructListExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&ConstructListExpr{expr{op: ConstructListOp, children: children, names: e.names}}) } return accept(e) } type TagsExpr struct{ expr } func NewTagsExpr(tags []string) *TagsExpr { e := &TagsExpr{expr{op: TagsOp}} for _, tag := range tags { e.children = append(e.children, NewStringExpr(tag)) } return e } func (e *TagsExpr) All() []Expr { return e.children } func (e *TagsExpr) Contains(tag string) bool { for _, elem := range e.children { value := elem.(*StringExpr).Value() if value == tag { return true } } return false } func (e *TagsExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&TagsExpr{expr{op: TagsOp, children: children}}) } return accept(e) } type StringExpr struct{ expr } func NewStringExpr(s string) *StringExpr { return &StringExpr{expr{op: StringOp, value: s}} } func (e *StringExpr) ValueAsString() string { return e.value.(string) } func (e *StringExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&StringExpr{expr{op: StringOp, children: children}}) } return accept(e) } type OpNameExpr struct{ expr } func NewOpNameExpr(name string) *OpNameExpr { return &OpNameExpr{expr{op: OpNameOp, value: name}} } func (e *OpNameExpr) ValueAsName() string { return e.value.(string) } func (e *OpNameExpr) Visit(accept AcceptFunc) Expr { if children, replaced := e.visitChildren(accept); replaced { return accept(&OpNameExpr{expr{op: OpNameOp, children: children}}) } return accept(e) } func writeIndent(buf *bytes.Buffer, level int) { buf.WriteString(strings.Repeat("\t", level)) }
package spider //import "fmt" import "os" import "path" import "net/http" import "io" func Download(url, lujing, filename string)string{ _,err:=os.Stat(lujing) if err!=nil{ return "path error" } lujing = path.Join(lujing, filename) _,err =os.Stat(lujing) f,_:=os.Create(filename) //下载文件 res, err := http.Get(url) if err!=nil{ panic(err) } defer res.Body.Close() _, err = io.Copy(f, res.Body) if err!=nil{ panic(err) } f.Close() return "download ok" }
package testsupport import ( "fmt" "lib/db" "os" "os/exec" "strconv" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" ) type DBConnectionInfo struct { Hostname string Port string Username string Password string } type TestDatabase struct { Name string ConnInfo *DBConnectionInfo } func (d *TestDatabase) URL() string { return fmt.Sprintf("postgres://%s:%s@%s:%s/%s?sslmode=%s", d.ConnInfo.Username, d.ConnInfo.Password, d.ConnInfo.Hostname, d.ConnInfo.Port, d.Name, "disable") } func (d *TestDatabase) DBConfig() db.Config { port, err := strconv.Atoi(d.ConnInfo.Port) Expect(err).NotTo(HaveOccurred()) return db.Config{ Host: d.ConnInfo.Hostname, Port: port, Username: d.ConnInfo.Username, Password: d.ConnInfo.Password, Name: d.Name, SSLMode: "disable", } } func (d *TestDatabase) Destroy() { d.ConnInfo.RemoveDatabase(d) } func (c *DBConnectionInfo) CreateDatabase(dbName string) *TestDatabase { testDB := &TestDatabase{Name: dbName, ConnInfo: c} _, err := c.execSQL(fmt.Sprintf("CREATE DATABASE %s", dbName)) Expect(err).NotTo(HaveOccurred()) return testDB } func (c *DBConnectionInfo) RemoveDatabase(db *TestDatabase) { _, err := c.execSQL(fmt.Sprintf("DROP DATABASE %s", db.Name)) Expect(err).NotTo(HaveOccurred()) } func (c *DBConnectionInfo) execSQL(sqlCommand string) (string, error) { cmd := exec.Command("psql", "-h", c.Hostname, "-p", c.Port, "-U", c.Username, "-c", sqlCommand) cmd.Env = append(os.Environ(), "PGPASSWORD="+c.Password) session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter) Expect(err).NotTo(HaveOccurred()) Eventually(session, "9s").Should(gexec.Exit()) if session.ExitCode() != 0 { return "", fmt.Errorf("unexpected exit code: %d", session.ExitCode()) } return string(session.Out.Contents()), nil } func GetDBConnectionInfo() *DBConnectionInfo { return &DBConnectionInfo{ Hostname: "localhost", Port: "5432", Username: "postgres", Password: "", } }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package exprgen import ( "fmt" "strings" "unicode" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/norm" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" ) type customFuncs struct { f *norm.Factory mem *memo.Memo cat cat.Catalog } // NewColumn creates a new column in the metadata. func (c *customFuncs) NewColumn(name, typeStr string) opt.ColumnID { typ, err := ParseType(typeStr) if err != nil { panic(exprGenErr{err}) } return c.f.Metadata().AddColumn(name, typ) } // LookupColumn looks up a column that was already specified in the expression // so far (either via NewColumn or by using a table). func (c *customFuncs) LookupColumn(name string) opt.ColumnID { md := c.f.Metadata() var res opt.ColumnID for colID := opt.ColumnID(1); int(colID) <= md.NumColumns(); colID++ { if md.ColumnMeta(colID).Alias == name { if res != 0 { panic(errorf("ambiguous column %s", name)) } res = colID } } if res == 0 { panic(errorf("unknown column %s", name)) } return res } // ColList creates a ColList from a comma-separated list of column names, // looking up each column. func (c *customFuncs) ColList(cols string) opt.ColList { if cols == "" { return opt.ColList{} } strs := strings.Split(cols, ",") res := make(opt.ColList, len(strs)) for i, col := range strs { res[i] = c.LookupColumn(col) } return res } // ColSet creates a ColSet from a comma-separated list of column names, looking // up each column. func (c *customFuncs) ColSet(cols string) opt.ColSet { return c.ColList(cols).ToSet() } // MinPhysProps returns the singleton minimum set of physical properties. func (c *customFuncs) MinPhysProps() *physical.Required { return physical.MinRequired } // MakePhysProps returns a set of physical properties corresponding to the // input presentation and OrderingChoice. func (c *customFuncs) MakePhysProps( p physical.Presentation, o physical.OrderingChoice, ) *physical.Required { return c.mem.InternPhysicalProps(&physical.Required{ Presentation: p, Ordering: o, }) } // ExplainOptions creates a tree.ExplainOptions from a comma-separated list of // options. func (c *customFuncs) ExplainOptions(opts string) tree.ExplainOptions { explain, err := tree.MakeExplain(strings.Split(opts, ","), &tree.Select{}) if err != nil { panic(exprGenErr{err}) } return explain.(*tree.Explain).ExplainOptions } // Var creates a VariableOp for the given column. It allows (Var "name") as a // shorthand for (Variable (LookupColumn "name")). func (c *customFuncs) Var(colName string) opt.ScalarExpr { return c.f.ConstructVariable(c.LookupColumn(colName)) } // FindTable looks up a table in the metadata without creating it. // This is required to construct operators like IndexJoin which must // reference the same table multiple times. func (c *customFuncs) FindTable(name string) opt.TableID { tables := c.mem.Metadata().AllTables() var res opt.TableID for i := range tables { if string(tables[i].Table.Name()) == name { if res != 0 { panic(errorf("ambiguous table %q", name)) } res = tables[i].MetaID } } if res == 0 { panic(errorf("couldn't find table with name %q", name)) } return res } // Ordering parses a string like "+a,-b" into an Ordering. func (c *customFuncs) Ordering(str string) opt.Ordering { defer func() { if r := recover(); r != nil { panic(errorf("could not parse Ordering \"%s\"", str)) } }() return physical.ParseOrdering(c.substituteCols(str)) } // OrderingChoice parses a string like "+a,-(b|c)" into an OrderingChoice. func (c *customFuncs) OrderingChoice(str string) physical.OrderingChoice { defer func() { if r := recover(); r != nil { panic(errorf("could not parse OrderingChoice \"%s\"", str)) } }() return physical.ParseOrderingChoice(c.substituteCols(str)) } // substituteCols extracts every word (sequence of letters, numbers, and // underscores) from the string, looks up the column with that name, and // replaces the string with the column ID. E.g.: "+a,+b" -> "+1,+2". func (c *customFuncs) substituteCols(str string) string { var b strings.Builder lastPos := -1 maybeEmit := func(curPos int) { if lastPos != -1 { col := str[lastPos:curPos] fmt.Fprintf(&b, "%d", c.LookupColumn(col)) } lastPos = -1 } for i, r := range str { if unicode.IsLetter(r) || r == '_' || unicode.IsNumber(r) { if lastPos == -1 { lastPos = i } continue } maybeEmit(i) b.WriteRune(r) } maybeEmit(len(str)) return b.String() } // MakeLookupJoin is a wrapper around ConstructLookupJoin that swaps the order // of the private and the filters. This is useful because the expressions are // evaluated in order, and we want to be able to refer to the lookup columns in // the ON expression. For example: // // (MakeLookupJoin // (Scan [ (Table "def") (Cols "d,e") ]) // [ (JoinType "left-join") (Table "abc") (Index "abc@ab") (KeyCols "a") (Cols "a,b") ] // [ (Gt (Var "a") (Var "e")) ] // ) // // If the order of the last two was swapped, we wouldn't be able to look up // column a. func (c *customFuncs) MakeLookupJoin( input memo.RelExpr, lookupJoinPrivate *memo.LookupJoinPrivate, on memo.FiltersExpr, ) memo.RelExpr { return c.f.ConstructLookupJoin(input, on, lookupJoinPrivate) } // Sort adds a sort enforcer which sorts according to the ordering that will be // required by its parent. func (c *customFuncs) Sort(input memo.RelExpr) memo.RelExpr { return &memo.SortExpr{Input: input} } // rootSentinel is used as the root value when Root is used. type rootSentinel struct { expr memo.RelExpr required *physical.Required } // Presentation converts a ColList to a Presentation. func (c *customFuncs) Presentation(cols opt.ColList) physical.Presentation { res := make(physical.Presentation, len(cols)) for i := range cols { res[i].ID = cols[i] res[i].Alias = c.mem.Metadata().ColumnMeta(cols[i]).Alias } return res } // NoOrdering returns the empty OrderingChoice. func (c *customFuncs) NoOrdering() physical.OrderingChoice { return physical.OrderingChoice{} } // Root can be used only at the top level on an expression, to annotate the // root with a presentation and/or required ordering. The operator must be able // to provide the ordering. For example: // (Root // ( ... ) // (Presentation "a,b") // (OrderingChoice "+a") // ) func (c *customFuncs) Root( root memo.RelExpr, presentation physical.Presentation, ordering physical.OrderingChoice, ) *rootSentinel { props := &physical.Required{ Presentation: presentation, Ordering: ordering, } return &rootSentinel{expr: root, required: props} }
package main import "strings" // Leetcode 139. (medium) func wordBreak(s string, wordDict []string) bool { dp := make([]bool, len(s)+1) dp[0] = true dict := make(map[string]bool, len(wordDict)) for _, word := range wordDict { dict[word] = true } for i := 1; i <= len(s); i++ { for j := 0; j < i; j++ { if _, ok := dict[s[j:i]]; ok && dp[j] { dp[i] = true break } } } return dp[len(s)] } // Leetcode 140. (hard) func wordBreak2(s string, wordDict []string) []string { m := make(map[string]bool) for _, word := range wordDict { m[word] = true } dp := make([]bool, len(s)+1) dp[0] = true for i := 1; i <= len(s); i++ { for j := 1; j <= i; j++ { if m[s[j-1:i]] && dp[j-1] { dp[i] = true break } } } if !dp[len(s)] { return nil } return dfs(s, m, 0, []string{}, []string{}) } func dfs(s string, m map[string]bool, begin int, cur, res []string) []string { if begin == len(s) { tmp := strings.Join(cur, " ") res = append(res, tmp) return res } for i := begin; i < len(s); i++ { if !m[s[begin:i+1]] { continue } cur = append(cur, s[begin:i+1]) res = dfs(s, m, i+1, cur, res) cur = cur[:len(cur)-1] } return res }
/* * FED API * * FED API is designed to create FEDACH and FEDWIRE dictionaries. The FEDACH dictionary contains receiving depository financial institutions (RDFI’s) which are qualified to receive ACH entries. The FEDWIRE dictionary contains receiving depository financial institutions (RDFI’s) which are qualified to receive WIRE entries. This project implements a modern REST HTTP API for FEDACH Dictionary and FEDWIRE Dictionary. * * API version: v1 * Generated by: OpenAPI Generator (https://openapi-generator.tech) */ package client // WireLocation WIRELocation is the FEDWIRE delivery address type WireLocation struct { // City City string `json:"city,omitempty"` // State State string `json:"state,omitempty"` }
package ipin import ( "flag" "fmt" // "github.com/adonese/crypto" "github.com/google/uuid" ) var uid = uuid.New().String() func main() { key := flag.String("key", "", "public key from ebs") ipin := flag.String("ipin", "0000", "ipin you want to create its pin block") uid1 := flag.String("uuid", "", "uuid for transaction") flag.Parse() if uid1 != nil { fmt.Print(Encrypt(*key, *ipin, *uid1)) } else { fmt.Print(Encrypt(*key, *ipin, uid)) } }
// Copyright 2013 Travis Keep. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file or // at http://opensource.org/licenses/BSD-3-Clause. // Package functional provides functional programming constructs. package functional import ( "bufio" "errors" "io" "reflect" ) // Done indicates that the end of a Stream has been reached var ( Done = errors.New("functional: End of Stream reached.") Skipped = errors.New("functional: Value skipped.") nilM = nilMapper{} nilPieceL = []compositeMapperPiece{{mapper: nilM}} nilS = nilStream{} trueF = trueFilterer{} falseF = falseFilterer{} ) // Stream is a sequence emitted values. // Each call to Next() emits the next value in the stream. // A Stream that emits values of type T is a Stream of T. type Stream interface { // Next emits the next value in this Stream of T. // If Next returns nil, the next value is stored at ptr. // If Next returns Done, then the end of the Stream has been reached, // and the value ptr points to is unspecified. // If Next returns some other error, then the caller should close the // Stream with Close. ptr must be a *T. // Once Next returns Done, it should continue to return Done, and // Close should return nil. Next(ptr interface{}) error // Close indicates that the caller is finished with this Stream. If Caller // consumes all the values in this Stream, then it need not call Close. But // if Caller chooses not to consume the Stream entirely, it should call // Close. Caller should also call Close if Next returns an error other // than Done. Once Close returns nil, it should continue to return nil. // The result of calling Next after Close is undefined. io.Closer } // Tuple represents a tuple of values that ReadRows emits type Tuple interface { // Ptrs returns a pointer to each field in the tuple. Ptrs() []interface{} } // Filterer of T filters values in a Stream of T. type Filterer interface { // Filter returns nil if value ptr points to should be included or Skipped // if value should be skipped. Filter may return other errors. ptr must be // a *T. Filter(ptr interface{}) error } // Mapper maps a type T value to a type U value in a Stream. type Mapper interface { // Map does the mapping storing the mapped value at destPtr. // If Mapper returns Skipped, then no mapped value is stored at destPtr. // Map may return other errors. srcPtr is a *T; destPtr is a *U Map(srcPtr interface{}, destPtr interface{}) error } // CompositeMapper represents Mappers composed together e.g f(g(x)). // Programs using CompositeMapper should typically store and pass them as // values, not pointers. A CompositeMapper can be used by multiple goroutines // simultaneously if its underlying Mappers can be used by multiple goroutines // simultaneously. The zero value for CompositeMapper is a Mapper that maps // nothing (the Map method always returns Skipped). type CompositeMapper struct { _pieces []compositeMapperPiece } func (c CompositeMapper) Map(srcPtr interface{}, destPtr interface{}) error { return c.Fast().Map(srcPtr, destPtr) } // Fast returns a quicker version of this CompositeMapper that cannot be // used by multiple goroutines simultaneously as if FastCompose were used. func (c CompositeMapper) Fast() Mapper { pieces := c.pieces() fastPieces := make([]fastMapperPiece, len(pieces)) for i := range fastPieces { fastPieces[i].setFromCompositePiece(&pieces[i]) } return fastCompositeMapper{fastPieces} } func (c CompositeMapper) pieces() []compositeMapperPiece { if len(c._pieces) == 0 { return nilPieceL } return c._pieces } // Creater of T creates a new, pre-initialized, T and returns a pointer to it. type Creater func() interface {} // Copier of T copies the value at src to the value at dest. This type is // often needed when values of type T need to be pre-initialized. src and // dest are of type *T and both point to pre-initialized T. type Copier func(src, dest interface{}) // Rows represents rows in a database table. Most database API already have // a type that implements this interface type Rows interface { // Next advances to the next row. Next returns false if there is no next row. // Every call to Scan, even the first one, must be preceded by a call to Next. Next() bool // Reads the values out of the current row. args are pointer types. Scan(args ...interface{}) error } // NilStream returns a Stream that emits no values. func NilStream() Stream { return nilS } // Map applies f, which maps a type T value to a type U value, to a Stream // of T producing a new Stream of U. If s is // (x1, x2, x3, ...), Map returns the Stream (f(x1), f(x2), f(x3), ...). // If f returns false for a T value, then the corresponding U value is left // out of the returned stream. ptr is a *T providing storage for emitted values // from s. Calling Close on returned Stream closes s. If f is a // CompositeMapper, Fast() is called on it automatically. func Map(f Mapper, s Stream, ptr interface{}) Stream { ms, ok := s.(*mapStream) if ok { return &mapStream{FastCompose(f, ms.mapper, ptr), ms.Stream, ms.ptr} } cm, ok := f.(CompositeMapper) if ok { return &mapStream{cm.Fast(), s, ptr} } return &mapStream{f, s, ptr} } // Filter filters values from s, returning a new Stream of T. The returned // Stream's Next method reports any errors besides Skipped that the Filter // method of f returns. Calling Close on returned Stream closes s. // f is a Filterer of T; s is a Stream of T. func Filter(f Filterer, s Stream) Stream { fs, ok := s.(*filterStream) if ok { return &filterStream{All(fs.filterer, f), fs.Stream} } return &filterStream{f, s} } // Count returns an infinite Stream of int which emits all values beginning // at 0. func Count() Stream { return &count{0, 1} } // CountFrom returns an infinite Stream of int emitting values beginning at // start and increasing by step. func CountFrom(start, step int) Stream { return &count{start, step} } // Slice returns a Stream that will emit elements in s starting at index start // and continuing to but not including index end. Indexes are 0 based. If end // is negative, it means go to the end of s. Calling Close on returned Stream // closes s. When end of returned Stream is reached, it closes s if it has not // consumed s returning any Close error through Next. func Slice(s Stream, start int, end int) Stream { return &sliceStream{Stream: s, start: start, end: end} } // ReadRows returns the rows in a database table as a Stream of Tuple. When // end of returned Stream is reached, it closes r if r implements io.Closer // propagating any Close error through Next. Calling Close on returned // stream closes r if r implements io.Closer. func ReadRows(r Rows) Stream { c, _ := r.(io.Closer) return &rowStream{rows: r, maybeCloser: maybeCloser{c: c}} } // ReadLines returns the lines of text in r separated by either "\n" or "\r\n" // as a Stream of string. The emitted string types do not contain the // end of line characters. When end of returned Stream is reached, it closes // r if r implements io.Closer propagating any Close error through Next. // Calling Close on returned Stream closes r if r implements io.Closer. func ReadLines(r io.Reader) Stream { c, _ := r.(io.Closer) return &lineStream{bufio: bufio.NewReader(r), maybeCloser: maybeCloser{c: c}} } // Deferred returns a Stream that emits the values from the Stream f returns. // f is not called until the first time Next is called on the returned stream. // Calling Close on returned Stream closes the Stream f creates or does nothing // if f not called. func Deferred(f func() Stream) Stream { return &deferredStream{f: f} } // Cycle returns a Stream that repeatedly calls f and emits the resulting // values. Note that if f repeatedly returns the NilStream, calling Next() on // returned Stream will create an infinite loop. Calling Close on returned // Stream closes the last Stream f created or does nothing if f not called. // If f returns a Stream of T then Cycle also returns a Stream of T. func Cycle(f func() Stream) Stream { return &cycleStream{Stream: nilS, f: f} } // Concat concatenates multiple Streams into one. // If x = (x1, x2, ...) and y = (y1, y2, ...) then // Concat(x, y) = (x1, x2, ..., y1, y2, ...). // Calling Close on returned Stream closes all underlying streams. // If caller passes a slice to Concat, no copy is made of it. func Concat(s ...Stream) Stream { if len(s) == 0 { return nilS } if len(s) == 1 { return s[0] } return &concatStream{s: s} } // NewStreamFromValues converts a []T into a Stream of T. aSlice is a []T. // c is a Copier of T. If c is nil, regular assignment is used. // Calling Close on returned Stream does nothing. func NewStreamFromValues(aSlice interface{}, c Copier) Stream { sliceValue := getSliceValue(aSlice) if sliceValue.Len() == 0 { return nilS } return &plainStream{sliceValue: sliceValue, copyFunc: toSliceValueCopier(c)} } // NewStreamFromPtrs converts a []*T into a Stream of T. aSlice is a []*T. // c is a Copier of T. If c is nil, regular assignment is used. // Calling Close on returned Stream does nothing. func NewStreamFromPtrs(aSlice interface{}, c Copier) Stream { sliceValue := getSliceValue(aSlice) if sliceValue.Len() == 0 { return nilS } valueCopierFunc := toSliceValueCopier(c) copyFunc := func(src reflect.Value, dest interface{}) { valueCopierFunc(reflect.Indirect(src), dest) } return &plainStream{sliceValue: sliceValue, copyFunc: copyFunc} } // Flatten converts a Stream of Stream of T into a Stream of T. // Calling Close on returned Stream closes s and the last emitted Stream // from s. func Flatten(s Stream) Stream { return &flattenStream{stream: s, current: nilS} } // TakeWhile returns a Stream that emits the values in s until the Filter // method of f returns Skipped. The returned Stream's Next method reports // any errors besides Skipped that the Filter method of f returns. When // end of returned Stream is reached, it automatically closes s if s is // not exhausted. Calling Close on returned Stream closes s. // f is a Filterer of T; s is a Stream of T. func TakeWhile(f Filterer, s Stream) Stream { return &takeStream{Stream: s, f: f} } // DropWhile returns a Stream that emits the values in s starting at the // first value where the Filter method of f returns Skipped. The returned // Stream's Next method reports any errors that the Filter method of f // returns until it returns Skipped. Calling Close on returned Stream // closes s. f is a Filterer of T; s is a Stream of T. func DropWhile(f Filterer, s Stream) Stream { return &dropStream{Stream: s, f: f} } // Any returns a Filterer that returns Skipped if all of the fs return // Skipped. Otherwise it returns nil or the first error not equal to Skipped. func Any(fs ...Filterer) Filterer { if len(fs) == 0 { return falseF } if len(fs) == 1 { return fs[0] } ors := make([][]Filterer, len(fs)) for i := range fs { ors[i] = orList(fs[i]) } return orFilterer(filterFlatten(ors)) } // All returns a Filterer that returns nil if all of the // fs return nil. Otherwise it returns the first error encountered. func All(fs ...Filterer) Filterer { if len(fs) == 0 { return trueF } if len(fs) == 1 { return fs[0] } ands := make([][]Filterer, len(fs)) for i := range fs { ands[i] = andList(fs[i]) } return andFilterer(filterFlatten(ands)) } // Compose composes two Mappers together into one e.g f(g(x)). If g maps // type T values to type U values, and f maps type U values to type V // values, then Compose returns a CompositeMapper mapping T values to V values. // c is a Creater of U. Each time Map is called on returned CompositeMapper, // it invokes c to create a U value to receive the intermediate result from g. func Compose(f Mapper, g Mapper, c Creater) CompositeMapper { l := mapperLen(f) + mapperLen(g) pieces := make([]compositeMapperPiece, l) n := appendMapper(pieces, g) pieces[n - 1].creater = c appendMapper(pieces[n:], f) return CompositeMapper{pieces} } // FastCompose works like Compose except that it uses a *U value instead of // a Creater of U to link f ang g. ptr is the *U value. Intermediate results // from g are stored at ptr. Unlike Compose, the Mapper that FastCompose // returns cannot be used by multiple goroutines simultaneously since what // ptr points to changes with each call to Map. func FastCompose(f Mapper, g Mapper, ptr interface{}) Mapper { l := mapperLen(f) + mapperLen(g) pieces := make([]fastMapperPiece, l) n := appendFastMapper(pieces, g) pieces[n - 1].ptr = ptr appendFastMapper(pieces[n:], f) return fastCompositeMapper{pieces} } // NoCloseStream returns a Stream just like s but with a Close method that does // nothing. The returnes Stream will still automatically close itself when the // end of stream is reached. This function is useful for preventing a stream from // automatically closing its underlying stream. func NoCloseStream(s Stream) Stream { return noCloseStream{s} } // NoCloseRows returns a Rows just like r that does not implement io.Closer. func NoCloseRows(r Rows) Rows { _, ok := r.(io.Closer) if ok { return rowsWrapper{r} } return r } // NoCloseReader returns an io.Reader just like r that does not implement // io.Closer. func NoCloseReader(r io.Reader) io.Reader { _, ok := r.(io.Closer) if ok { return readerWrapper{r} } return r } // NewFilterer returns a new Filterer of T. f takes a *T returning nil // if T value pointed to it should be included or Skipped if it should not // be included. f can return other errors too. func NewFilterer(f func(ptr interface{}) error) Filterer { return funcFilterer(f) } // NewMapper returns a new Mapper mapping T values to U Values. In f, // srcPtr is a *T and destPtr is a *U pointing to pre-allocated T and U // values respectively. f returns Skipped if mapped value should be // skipped. f can also return other errors. func NewMapper(m func(srcPtr interface{}, destPtr interface{}) error) Mapper { return funcMapper(m) } type count struct { start int step int } func (c *count) Next(ptr interface{}) error { p := ptr.(*int) *p = c.start c.start += c.step return nil } func (c *count) Close() error { return nil } type mapStream struct { mapper Mapper Stream ptr interface{} } func (s *mapStream) Next(ptr interface{}) error { err := s.Stream.Next(s.ptr) for ; err == nil; err = s.Stream.Next(s.ptr) { if err = s.mapper.Map(s.ptr, ptr); err != Skipped { return err } } return err } type trueFilterer struct { } func (t trueFilterer) Filter(ptr interface{}) error { return nil } type falseFilterer struct { } func (f falseFilterer) Filter(ptr interface{}) error { return Skipped } type nilStream struct { } func (s nilStream) Next(ptr interface{}) error { return Done } func (s nilStream) Close() error { return nil } type nilMapper struct { } func (m nilMapper) Map(srcPtr, destPtr interface{}) error { return Skipped } type filterStream struct { filterer Filterer Stream } func (s *filterStream) Next(ptr interface{}) error { err := s.Stream.Next(ptr) for ; err == nil; err = s.Stream.Next(ptr) { if err = s.filterer.Filter(ptr); err != Skipped { return err } } return err } type sliceStream struct { Stream start int end int index int done bool } func (s *sliceStream) Next(ptr interface{}) error { if s.done { return Done } for s.end < 0 || s.index < s.end { err := s.Stream.Next(ptr) if err == Done { s.done = true return Done } if err != nil { return err } s.index++ if s.index > s.start { return nil } } s.done = true return finish(s.Close()) } type rowStream struct { rows Rows maybeCloser done bool } func (s *rowStream) Next(ptr interface{}) error { if s.done { return Done } if !s.rows.Next() { s.done = true return finish(s.Close()) } ptrs := ptr.(Tuple).Ptrs() return s.rows.Scan(ptrs...) } type lineStream struct { bufio *bufio.Reader maybeCloser done bool } func (s *lineStream) Next(ptr interface{}) error { if s.done { return Done } p := ptr.(*string) line, isPrefix, err := s.bufio.ReadLine() if err == io.EOF { s.done = true return finish(s.Close()) } if err != nil { return err } if !isPrefix { *p = string(line) return nil } *p, err = s.readRestOfLine(line) return err } func (s *lineStream) readRestOfLine(line []byte) (string, error) { lines := [][]byte{copyBytes(line)} for { l, isPrefix, err := s.bufio.ReadLine() if err == io.EOF { break } if err != nil { return "", err } lines = append(lines, copyBytes(l)) if !isPrefix { break } } return string(byteFlatten(lines)), nil } type deferredStream struct { f func() Stream s Stream done bool } func (d *deferredStream) Next(ptr interface{}) error { if d.done { return Done } if d.s == nil { d.s = d.f() } err := d.s.Next(ptr) if err == Done { d.done = true d.s = nil } return err } func (d *deferredStream) Close() error { if d.s != nil { return d.s.Close() } return nil } type cycleStream struct { Stream f func() Stream } func (c *cycleStream) Next(ptr interface{}) error { err := c.Stream.Next(ptr) for ; err == Done; err = c.Stream.Next(ptr) { c.Stream = c.f() } return err } type concatStream struct { s []Stream idx int } func (c *concatStream) Next(ptr interface{}) error { for ;c.idx < len(c.s); c.idx++ { err := c.s[c.idx].Next(ptr) if err == Done { continue } return err } return Done } func (c *concatStream) Close() error { var result error for i := range c.s { err := c.s[i].Close() if result == nil { result = err } } return result } type plainStream struct { sliceValue reflect.Value copyFunc func(src reflect.Value, dest interface{}) index int } func (s *plainStream) Next(ptr interface{}) error { if s.index == s.sliceValue.Len() { return Done } s.copyFunc(s.sliceValue.Index(s.index), ptr) s.index++ return nil } func (s *plainStream) Close() error { return nil } type flattenStream struct { stream Stream current Stream } func (s *flattenStream) Next(ptr interface{}) error { err := s.current.Next(ptr) for ; err == Done; err = s.current.Next(ptr) { var temp Stream serr := s.stream.Next(&temp) if serr != nil { return serr } s.current = temp } return err } func (s *flattenStream) Close() error { result := s.current.Close() err := s.stream.Close() if result == nil { result = err } return result } type takeStream struct { Stream f Filterer } func (s *takeStream) Next(ptr interface{}) error { if s.f == nil { return Done } err := s.Stream.Next(ptr) if err == Done { s.f = nil return Done } if err != nil { return err } if ferr := s.f.Filter(ptr); ferr != Skipped { return ferr } s.f = nil return finish(s.Close()) } type dropStream struct { Stream f Filterer } func (s *dropStream) Next(ptr interface{}) error { err := s.Stream.Next(ptr) if s.f == nil { return err } for ; err == nil; err = s.Stream.Next(ptr) { ferr := s.f.Filter(ptr) if ferr == Skipped { s.f = nil return nil } if ferr != nil { return ferr } } return err } type funcFilterer func(ptr interface{}) error func (f funcFilterer) Filter(ptr interface{}) error { return f(ptr) } type andFilterer []Filterer func (f andFilterer) Filter(ptr interface{}) error { for i := range f { if err := f[i].Filter(ptr); err != nil { return err } } return nil } type orFilterer []Filterer func (f orFilterer) Filter(ptr interface{}) error { for i := range f { if err := f[i].Filter(ptr); err != Skipped { return err } } return Skipped } type funcMapper func(srcPtr interface{}, destPtr interface{}) error func (m funcMapper) Map(srcPtr interface{}, destPtr interface{}) error { return m(srcPtr, destPtr) } type fastCompositeMapper struct { pieces []fastMapperPiece } func (m fastCompositeMapper) Map(srcPtr interface{}, destPtr interface{}) error { sPtr := srcPtr var dPtr interface{} length := len(m.pieces) for i := range m.pieces { piece := &m.pieces[i] if (i == length - 1) { dPtr = destPtr } else { dPtr = piece.ptr } if err := piece.mapper.Map(sPtr, dPtr); err != nil { return err } sPtr = dPtr } return nil } type compositeMapperPiece struct { mapper Mapper creater Creater } func (cmp *compositeMapperPiece) setFromFastPiece(fmp *fastMapperPiece) { cmp.mapper = fmp.mapper if fmp.ptr == nil { cmp.creater = nil } else { cmp.creater = newCreater(fmp.ptr) } } type fastMapperPiece struct { mapper Mapper ptr interface{} } func (fmp *fastMapperPiece) setFromCompositePiece(cmp *compositeMapperPiece) { fmp.mapper = cmp.mapper if cmp.creater == nil { fmp.ptr = nil } else { fmp.ptr = cmp.creater() } } type readerWrapper struct { io.Reader } type rowsWrapper struct { Rows } type noCloseStream struct { Stream } func (s noCloseStream) Close() error { return nil } type maybeCloser struct { c io.Closer e error } func (mc *maybeCloser) Close() error { if mc.c != nil { mc.e = mc.c.Close() mc.c = nil } return mc.e } func orList(f Filterer) []Filterer { switch i := f.(type) { case orFilterer: return i case falseFilterer: return nil } return []Filterer{f} } func andList(f Filterer) []Filterer { switch i := f.(type) { case andFilterer: return i case trueFilterer: return nil } return []Filterer{f} } func filterFlatten(fs [][]Filterer) []Filterer { var l int for i := range fs { l += len(fs[i]) } result := make([]Filterer, l) n := 0 for i := range fs { n += copy(result[n:], fs[i]) } return result } func mapperLen(m Mapper) int { switch am := m.(type) { case CompositeMapper: return len(am.pieces()) case fastCompositeMapper: return len(am.pieces) } return 1 } func appendMapper(pieces []compositeMapperPiece, m Mapper) int { switch am := m.(type) { case CompositeMapper: return copy(pieces, am.pieces()) case fastCompositeMapper: for i := range am.pieces { pieces[i].setFromFastPiece(&am.pieces[i]) } return len(am.pieces) default: pieces[0] = compositeMapperPiece{mapper: m} } return 1 } func appendFastMapper(pieces []fastMapperPiece, m Mapper) int { switch am := m.(type) { case CompositeMapper: ampieces := am.pieces() for i := range ampieces { pieces[i].setFromCompositePiece(&ampieces[i]) } return len(ampieces) case fastCompositeMapper: return copy(pieces, am.pieces) default: pieces[0] = fastMapperPiece{mapper: m} } return 1 } func newCreater(ptr interface{}) Creater { return func() interface{} { return ptr } } func finish(e error) error { if e == nil { return Done } return e } func copyBytes(b []byte) []byte { result := make([]byte, len(b)) copy(result, b) return result } func byteFlatten(b [][]byte) []byte { var l int for i := range b { l += len(b[i]) } result := make([]byte, l) n := 0 for i := range b { n += copy(result[n:], b[i]) } return result } func toSliceValueCopier(c Copier) func(src reflect.Value, dest interface{}) { if c == nil { return assignFromValue } return func(src reflect.Value, dest interface{}) { c(src.Addr().Interface(), dest) } } func assignCopier(src, dest interface{}) { srcP := reflect.ValueOf(src) assignFromValue(reflect.Indirect(srcP), dest) } func assignFromValue(src reflect.Value, dest interface{}) { destP := reflect.ValueOf(dest) reflect.Indirect(destP).Set(src) } func getSliceValue(aSlice interface{}) reflect.Value { sliceValue := reflect.ValueOf(aSlice) if sliceValue.Kind() != reflect.Slice { panic("Slice argument expected") } return sliceValue }
package datastore import ( "appengine" "appengine/datastore" M "github.com/ionous/sashimi/compiler/model" "github.com/ionous/sashimi/meta" "github.com/ionous/sashimi/metal" ) func NewModelStore(ctx appengine.Context, m *M.Model, parent *datastore.Key) *ModelStore { // yuck! mdl uses kvs ( for value lookup ), kvs uses mdl (for keycreation and the load saver objects); if we shadowed the meta, we could avoid this. kvs := &KeyValues{} mdl := metal.NewMetal(m, kvs) kvs.mdl = mdl kvs.KeyGen = NewKeyGen(ctx, parent) kvs.ctx = ctx kvs.Reset() return &ModelStore{kvs, mdl} } func (ds *ModelStore) Model() meta.Model { return ds.mdl } // Flush writes any pending changes to the datastore. func (ds *ModelStore) Flush() error { return ds.kvs.Save() } // Drop clears the local cache of fetched data. func (ds *ModelStore) Drop() { ds.kvs.Reset() } type ModelStore struct { kvs *KeyValues mdl meta.Model }
package tx import ( ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/transmutate-io/cryptocore/types" ) var ( _ Tx = (*TxETH)(nil) _ TxStateBased = (*TxETH)(nil) ) type TxETH struct{ Tx *ethtypes.Transaction } func (tx *TxETH) Hash() types.Bytes { return types.Bytes(tx.Tx.Hash().Bytes()) } func (tx *TxETH) ID() types.Bytes { return tx.Hash() } func (tx *TxETH) To() string { return tx.Tx.To().Hex() } func (tx *TxETH) Value() types.Amount { return types.NewAmountBig(tx.Tx.Value(), 18) }
package randstr import ( "strings" "testing" ) func TestGenerate(t *testing.T) { for round := 100; round > 0; round-- { out := Generate(5) if len(out) != 5 { t.Logf("Result length error") t.Fail() } for _, r := range out { ch := string(r) if !strings.Contains(seeds, ch) { t.Logf("Result contains wrong character: %s", ch) t.Fail() } } } }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package props import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" ) // AvailableRuleProps is a bit set that indicates when lazily-populated Rule // properties are initialized and ready for use. type AvailableRuleProps int8 const ( // PruneCols is set when the Relational.Rule.PruneCols field is populated. PruneCols AvailableRuleProps = 1 << iota // RejectNullCols is set when the Relational.Rule.RejectNullCols field is // populated. RejectNullCols // InterestingOrderings is set when the Relational.Rule.InterestingOrderings // field is populated. InterestingOrderings // HasHoistableSubquery is set when the Scalar.Rule.HasHoistableSubquery // is populated. HasHoistableSubquery // UnfilteredCols is set when the Relational.Rule.UnfilteredCols field is // populated. UnfilteredCols // WithUses is set when the Shared.Rule.WithUses field is populated. WithUses ) // Shared are properties that are shared by both relational and scalar // expressions. type Shared struct { // Populated is set to true once the properties have been built for the // operator. Populated bool // HasSubquery is true if the subtree rooted at this node contains a subquery. // The subquery can be a Subquery, Exists, Any, or ArrayFlatten expression. // Subqueries are the only place where a relational node can be nested within a // scalar expression. HasSubquery bool // HasCorrelatedSubquery is true if the scalar expression tree contains a // subquery having one or more outer columns. The subquery can be a Subquery, // Exists, or Any operator. These operators usually need to be hoisted out of // scalar expression trees and turned into top-level apply joins. This // property makes detection fast and easy so that the hoister doesn't waste // time searching subtrees that don't contain subqueries. HasCorrelatedSubquery bool // VolatilitySet contains the set of volatilities contained in the expression. VolatilitySet VolatilitySet // CanMutate is true if the subtree rooted at this expression contains at // least one operator that modifies schema (like CreateTable) or writes or // deletes rows (like Insert). CanMutate bool // HasPlaceholder is true if the subtree rooted at this expression contains // at least one Placeholder operator. HasPlaceholder bool // OuterCols is the set of columns that are referenced by variables within // this sub-expression, but are not bound within the scope of the expression. // For example: // // SELECT * // FROM a // WHERE EXISTS(SELECT * FROM b WHERE b.x = a.x AND b.y = 5) // // For the EXISTS expression, a.x is an outer column, meaning that it is // defined "outside" the EXISTS expression (hence the name "outer"). The // SELECT expression binds the b.x and b.y references, so they are not part // of the outer column set. The outer SELECT binds the a.x column, and so // its outer column set is empty. // // Note that what constitutes an "outer column" is dependent on an // expression's location in the query. For example, while the b.x and b.y // columns are not outer columns on the EXISTS expression, they *are* outer // columns on the inner WHERE condition. OuterCols opt.ColSet // Rule props are lazily calculated and typically only apply to a single // rule. See the comment above Relational.Rule for more details. Rule struct { // WithUses tracks information about the WithScans inside the given // expression which reference WithIDs outside of that expression. WithUses WithUsesMap } } // WithUsesMap stores information about each WithScan referencing an outside // WithID, grouped by each WithID. type WithUsesMap map[opt.WithID]WithUseInfo // WithUseInfo contains information about the usage of a specific WithID. type WithUseInfo struct { // Count is the number of WithScan operators which reference this WithID. Count int // UsedCols is the union of columns used by all WithScan operators which // reference this WithID. UsedCols opt.ColSet } // Relational properties describe the content and characteristics of relational // data returned by all expression variants within a memo group. While each // expression in the group may return rows or columns in a different order, or // compute the result using different algorithms, the same set of data is // returned and can then be transformed into whatever layout or presentation // format that is desired, according to the required physical properties. type Relational struct { Shared // OutputCols is the set of columns that can be projected by the expression. // Ordering, naming, and duplication of columns is not representable by this // property; those are physical properties. OutputCols opt.ColSet // NotNullCols is the subset of output columns which cannot be NULL. The // nullability of columns flows from the inputs and can also be derived from // filters that reject nulls. NotNullCols opt.ColSet // Cardinality is the number of rows that can be returned from this relational // expression. The number of rows will always be between the inclusive Min and // Max bounds. If Max=math.MaxUint32, then there is no limit to the number of // rows returned by the expression. Cardinality Cardinality // FuncDepSet is a set of functional dependencies (FDs) that encode useful // relationships between columns in a base or derived relation. Given two sets // of columns A and B, a functional dependency A-->B holds if A uniquely // determines B. In other words, if two different rows have equal values for // columns in A, then those two rows will also have equal values for columns // in B. For example: // // a1 a2 b1 // -------- // 1 2 5 // 1 2 5 // // FDs assist the optimizer in proving useful properties about query results. // This information powers many optimizations, including eliminating // unnecessary DISTINCT operators, simplifying ORDER BY columns, removing // Max1Row operators, and mapping semi-joins to inner-joins. // // The methods that are most useful for optimizations are: // Key: extract a candidate key for the relation // ColsAreStrictKey: determine if a set of columns uniquely identify rows // ReduceCols: discard redundant columns to create a candidate key // // For more details, see the header comment for FuncDepSet. FuncDeps FuncDepSet // Stats is the set of statistics that apply to this relational expression. // See statistics.go and memo/statistics_builder.go for more details. Stats Statistics // Rule encapsulates the set of properties that are maintained to assist // with specific sets of transformation rules. They are not intended to be // general purpose in nature. Typically, they're used by rules which need to // decide whether to push operators down into the tree. These properties // "bubble up" information about the subtree which can aid in that decision. // // Whereas the other logical relational properties are filled in by the memo // package upon creation of a new memo group, the rules properties are filled // in by one of the transformation packages, since deriving rule properties // is so closely tied with maintenance of the rules that depend upon them. // For example, the PruneCols set is connected to the PruneCols normalization // rules. The decision about which columns to add to PruneCols depends upon // what works best for those rules. Neither the rules nor their properties // can be considered in isolation, without considering the other. Rule struct { // Available contains bits that indicate whether lazily-populated Rule // properties have been initialized. For example, if the UnfilteredCols // bit is set, then the Rule.UnfilteredCols field has been initialized // and is ready for use. Available AvailableRuleProps // PruneCols is the subset of output columns that can potentially be // eliminated by one of the PruneCols normalization rules. Those rules // operate by pushing a Project operator down the tree that discards // unused columns. For example: // // SELECT y FROM xyz WHERE x=1 ORDER BY y LIMIT 1 // // The z column is never referenced, either by the filter or by the // limit, and would be part of the PruneCols set for the Limit operator. // The final Project operator could then push down a pruning Project // operator that eliminated the z column from its subtree. // // PruneCols is built bottom-up. It typically starts out containing the // complete set of output columns in a leaf expression, but quickly // empties out at higher levels of the expression tree as the columns // are referenced. Drawing from the example above: // // Limit PruneCols : [z] // Select PruneCols: [y, z] // Scan PruneCols : [x, y, z] // // Only a small number of relational operators are capable of pruning // columns (e.g. Scan, Project). A pruning Project operator pushed down // the tree must journey downwards until it finds a pruning-capable // operator. If a column is part of PruneCols, then it is guaranteed that // such an operator exists at the end of the journey. Operators that are // not capable of filtering columns (like Explain) will not add any of // their columns to this set. // // PruneCols is lazily populated by rules in prune_cols.opt. It is // only valid once the Rule.Available.PruneCols bit has been set. PruneCols opt.ColSet // RejectNullCols is the subset of nullable output columns that can // potentially be made not-null by one of the RejectNull normalization // rules. Those rules work in concert with the predicate pushdown rules // to synthesize a "col IS NOT NULL" filter and push it down the tree. // See the header comments for the reject_nulls.opt file for more // information and an example. // // RejectNullCols is built bottom-up by rulePropsBuilder, and only contains // nullable outer join columns that can be simplified. The columns can be // propagated up through multiple operators, giving higher levels of the // tree a window into the structure of the tree several layers down. In // particular, the null rejection rules use this property to determine when // it's advantageous to synthesize a new "IS NOT NULL" filter. Without this // information, the rules can clutter the tree with extraneous and // marginally useful null filters. // // RejectNullCols is lazily populated by rules in reject_nulls.opt. It is // only valid once the Rule.Available.RejectNullCols bit has been set. RejectNullCols opt.ColSet // InterestingOrderings is a list of orderings that potentially could be // provided by the operator without sorting. Interesting orderings normally // come from scans (index orders) and are bubbled up through some operators. // // Note that all prefixes of an interesting order are "interesting"; the // list doesn't need to contain orderings that are prefixes of some other // ordering in the list. // // InterestingOrderings is lazily populated by interesting_orderings.go. // It is only valid once the Rule.Available.InterestingOrderings bit has // been set. InterestingOrderings opt.OrderingSet // UnfilteredCols is the set of all columns for which rows from their base // table are guaranteed not to have been filtered. Rows may be duplicated, // but no rows can be missing. Even columns which are not output columns are // included as long as table rows are guaranteed not filtered. For example, // an unconstrained, unlimited Scan operator can add all columns from its // table to this property, but a Select operator cannot add any columns, as // it may have filtered rows. // // UnfilteredCols is lazily populated by GetJoinMultiplicityFromInputs. It // is only valid once the Rule.Available.UnfilteredCols bit has been set. UnfilteredCols opt.ColSet } } // Scalar properties are logical properties that are computed for scalar // expressions that return primitive-valued types. Scalar properties are // lazily populated on request. type Scalar struct { Shared // Constraints is the set of constraints deduced from a boolean expression. // For the expression to be true, all constraints in the set must be // satisfied. The constraints are not guaranteed to be exactly equivalent to // the expression, see TightConstraints. Constraints *constraint.Set // FuncDeps is a set of functional dependencies (FDs) inferred from a // boolean expression. This field is only populated for Filters expressions. // // - Constant column FDs such as ()-->(1,2) from conjuncts such as // x = 5 AND y = 10. // - Equivalent column FDs such as (1)==(2), (2)==(1) from conjuncts such // as x = y. // // It is useful to calculate FDs on Filters expressions, because it allows // additional filters to be inferred for push-down. For example, consider // the query: // // SELECT * FROM a, b WHERE a.x = b.x AND a.x > 5; // // By adding the equivalency FD for a.x = b.x, we can infer an additional // filter, b.x > 5. This allows us to rewrite the query as: // // SELECT * FROM (SELECT * FROM a WHERE a.x > 5) AS a, // (SELECT * FROM b WHERE b.x > 5) AS b WHERE a.x = b.x; // // For more details, see the header comment for FuncDepSet. FuncDeps FuncDepSet // TightConstraints is true if the expression is exactly equivalent to the // constraints. If it is false, the constraints are weaker than the // expression. TightConstraints bool // Rule encapsulates the set of properties that are maintained to assist // with specific sets of transformation rules. See the Relational.Rule // comment for more details. Rule struct { // Available contains bits that indicate whether lazily-populated Rule // properties have been initialized. For example, if the // HasHoistableSubquery bit is set, then the Rule.HasHoistableSubquery // field has been initialized and is ready for use. Available AvailableRuleProps // HasHoistableSubquery is true if the scalar expression tree contains a // subquery having one or more outer columns, and if the subquery needs // to be hoisted up into its parent query as part of query decorrelation. // The subquery can be a Subquery, Exists, or Any operator. These operators // need to be hoisted out of scalar expression trees and turned into top- // level apply joins. This property makes detection fast and easy so that // the hoister doesn't waste time searching subtrees that don't contain // subqueries. // // HasHoistableSubquery is lazily populated by rules in decorrelate.opt. // It is only valid once the Rule.Available.HasHoistableSubquery bit has // been set. HasHoistableSubquery bool } } // IsAvailable returns true if the specified rule property has been populated // on this relational properties instance. func (r *Relational) IsAvailable(p AvailableRuleProps) bool { return (r.Rule.Available & p) != 0 } // SetAvailable sets the available bits for the given properties, in order to // mark them as populated on this relational properties instance. func (r *Relational) SetAvailable(p AvailableRuleProps) { r.Rule.Available |= p } // IsAvailable returns true if the specified rule property has been populated // on this scalar properties instance. func (s *Scalar) IsAvailable(p AvailableRuleProps) bool { return (s.Rule.Available & p) != 0 } // SetAvailable sets the available bits for the given properties, in order to // mark them as populated on this scalar properties instance. func (s *Scalar) SetAvailable(p AvailableRuleProps) { s.Rule.Available |= p }
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You // may not use this file except in compliance with the License. A copy of // the License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is // distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF // ANY KIND, either express or implied. See the License for the specific // language governing permissions and limitations under the License. package secretcache_test import ( "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/secretsmanager" "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface" ) // A struct to be used in unit tests as a mock Client type mockSecretsManagerClient struct { secretsmanageriface.SecretsManagerAPI MockedGetResult *secretsmanager.GetSecretValueOutput MockedDescribeResult *secretsmanager.DescribeSecretOutput GetSecretValueErr error DescribeSecretErr error GetSecretValueCallCount int DescribeSecretCallCount int } // Initialises a mock Client with dummy outputs for GetSecretValue and DescribeSecret APIs func newMockedClientWithDummyResults() (mockSecretsManagerClient, string, string) { createDate := time.Now().Add(-time.Hour * 12) // 12 hours ago versionId := getStrPtr("very-random-uuid") otherVersionId := getStrPtr("other-random-uuid") versionStages := []*string{getStrPtr("hello"), getStrPtr("versionStage-42"), getStrPtr("AWSCURRENT")} otherVersionStages := []*string{getStrPtr("AWSPREVIOUS")} versionIdsToStages := make(map[string][]*string) versionIdsToStages[*versionId] = versionStages versionIdsToStages[*otherVersionId] = otherVersionStages secretId := getStrPtr("dummy-secret-name") secretString := getStrPtr("my secret string") mockedGetResult := secretsmanager.GetSecretValueOutput{ ARN: getStrPtr("dummy-arn"), CreatedDate: &createDate, Name: secretId, SecretString: secretString, VersionId: versionId, VersionStages: versionStages, } mockedDescribeResult := secretsmanager.DescribeSecretOutput{ ARN: getStrPtr("dummy-arn"), Name: secretId, Description: getStrPtr("my dummy description"), VersionIdsToStages: versionIdsToStages, } return mockSecretsManagerClient{ MockedDescribeResult: &mockedDescribeResult, MockedGetResult: &mockedGetResult, }, *secretId, *secretString } // Overrides the interface method to return dummy result. func (m *mockSecretsManagerClient) GetSecretValueWithContext(context aws.Context, input *secretsmanager.GetSecretValueInput, opts ...request.Option) (*secretsmanager.GetSecretValueOutput, error) { m.GetSecretValueCallCount++ if m.GetSecretValueErr != nil { return nil, m.GetSecretValueErr } return m.MockedGetResult, nil } // Overrides the interface method to return dummy result. func (m *mockSecretsManagerClient) DescribeSecretWithContext(context aws.Context, input *secretsmanager.DescribeSecretInput, opts ...request.Option) (*secretsmanager.DescribeSecretOutput, error) { m.DescribeSecretCallCount++ if m.DescribeSecretErr != nil { return nil, m.DescribeSecretErr } return m.MockedDescribeResult, nil } // Helper function to get a string pointer for input string. func getStrPtr(str string) *string { return &str }
package scaling import ( "context" "fmt" "github.com/giantswarm/microerror" "github.com/giantswarm/micrologger" "github.com/giantswarm/e2etests/v2/scaling/provider" ) type Config struct { Logger micrologger.Logger Provider provider.Interface } type Scaling struct { logger micrologger.Logger provider provider.Interface } func New(config Config) (*Scaling, error) { if config.Logger == nil { return nil, microerror.Maskf(invalidConfigError, "%T.Logger must not be empty", config) } if config.Provider == nil { return nil, microerror.Maskf(invalidConfigError, "%T.Provider must not be empty", config) } s := &Scaling{ logger: config.Logger, provider: config.Provider, } return s, nil } func (s *Scaling) Test(ctx context.Context) error { var err error var numMasters int { s.logger.LogCtx(ctx, "level", "debug", "message", "looking for the number of masters") numMasters, err = s.provider.NumMasters(ctx) if err != nil { return microerror.Mask(err) } s.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("found %d masters", numMasters)) } var numWorkers int { s.logger.LogCtx(ctx, "level", "debug", "message", "looking for the number of workers") numWorkers, err = s.provider.NumWorkers(ctx) if err != nil { return microerror.Mask(err) } s.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("found %d workers", numWorkers)) } { s.logger.LogCtx(ctx, "level", "debug", "message", "scaling up one worker") err = s.provider.AddWorker(ctx) if err != nil { return microerror.Mask(err) } s.logger.LogCtx(ctx, "level", "debug", "message", "scaled up one worker") } { s.logger.LogCtx(ctx, "level", "debug", "message", "waiting for scaling up to be complete") err = s.provider.WaitForNodes(ctx, numMasters+numWorkers+1) if err != nil { return microerror.Mask(err) } s.logger.LogCtx(ctx, "level", "debug", "message", "scaling up complete") } { s.logger.LogCtx(ctx, "level", "debug", "message", "scaling down one worker") err = s.provider.RemoveWorker(ctx) if err != nil { return microerror.Mask(err) } s.logger.LogCtx(ctx, "level", "debug", "message", "scaled down one worker") } { s.logger.LogCtx(ctx, "level", "debug", "message", "waiting for scaling down to be complete") err = s.provider.WaitForNodes(ctx, numMasters+numWorkers) if err != nil { return microerror.Mask(err) } s.logger.LogCtx(ctx, "level", "debug", "message", "scaling down complete") } return nil }
package models import "time" type Post struct { Username string Name string Path string CreatedOn time.Time }
package main import ( "context" "fmt" "github.com/gorilla/websocket" "io" "os" "os/signal" "syscall" "time" ) // Orca handles the communication between the // local port and the proxy port type Orca struct { tunnelConn *TunnelConnection localConn *LocalConn queuedDataToSend []byte queuedDataToReceive []byte } func NewOrca(ctx context.Context) (*Orca, error) { project := os.Getenv("PROJECT_ID") zone := os.Getenv("ZONE") instance := os.Getenv("INSTANCE") port := os.Getenv("PORT") localPort := os.Getenv("LOCAL_PORT") tcPipeReader, tcPipeWriter := io.Pipe() tc, err := NewTunnelConnection(ctx, WithProject(project), WithZone(zone), WithPort(port), WithInstanceName(instance), WithTunnelReader(tcPipeReader), WithTunnelWriter(tcPipeWriter)) if err != nil { return nil, err } lcPipeReader, lcPipeWriter := io.Pipe() lc, err := NewLocalConn(ctx, WithLocalConnPort(localPort), WithLocalConnReader(lcPipeReader), WithLocalConnWriter(lcPipeWriter)) if err != nil { return nil, err } orca := &Orca{tunnelConn: tc, localConn: lc} return orca, nil } func (orca *Orca) Run() error { ctx := context.Background() c := make(chan os.Signal) signal.Notify(c, os.Interrupt, syscall.SIGTERM) // going off of max size from the python library localbuf := make([]byte, 16384) socketbuf := make([]byte, 16384) fmt.Printf("Listening on port %s", orca.localConn.port) err := orca.localConn.Accept() if err != nil { return err } fmt.Println("Client connected") go func() { for !orca.tunnelConn.connected { time.Sleep(1 * time.Second) } for { n, err := orca.localConn.Read(localbuf) if err != nil { panic(err) } msg := localbuf[:n] newMsg := NewIAPDataMessage(msg) err = newMsg.CreateDataFrame() if err != nil { panic(err) } _, err = orca.tunnelConn.Write(newMsg.data) if err != nil { panic(err) } } }() go func() { err := orca.tunnelConn.Connect(ctx) if err != nil { panic(err) } for { n, err := orca.tunnelConn.Read(socketbuf) if err != nil { panic(err) } d := socketbuf[:n] msg := NewIAPMessage(d) tag := msg.PeekMessageTag() switch tag { case MessageAck: fmt.Println("Got Message Ack From IAP") continue case MessageConnectSuccessSid: fmt.Println("Got Success SID") newMsg := msg.AsConnectSIDMessage() orca.tunnelConn.SetSid(newMsg.GetSID()) continue case MessageData: fmt.Println("Got Data Message") newMsg := msg.AsDataMessage() orca.tunnelConn.bytesReceived += newMsg.GetDataLength() ackBytes := make([]byte, 10) ackMsg := NewIAPAckMessage(ackBytes) ackMsg.SetTag(MessageAck) ackMsg.SetAck(uint64(orca.tunnelConn.bytesReceived)) err = orca.tunnelConn.websocketConn.WriteMessage(websocket.BinaryMessage, ackMsg.data) if err != nil { panic(err) } orca.tunnelConn.bytesAcked += orca.tunnelConn.bytesReceived _, err = orca.localConn.Write(newMsg.data) if err != nil { panic(err) } continue default: fmt.Printf("Unknown tag: %d", tag) panic("unkown tag") break } } }() <-c return nil }
package handler import ( "net/http" "github.com/gin-gonic/gin" ) // HealthHandler - ヘルスチェック用の handler type HealthHandler interface { GetHealth(c *gin.Context) } type healthHandler struct { } // NewHealthHandler - healthHandler の生成 func NewHealthHandler() HealthHandler { return &healthHandler{} } func (h *healthHandler) GetHealth(c *gin.Context) { c.JSON(http.StatusOK, gin.H{"status": "UP"}) }
package main import "fmt" func main() { var a, b int a = a ^ b b = a ^ b a = a ^ b fmt.Print(a, b) }
package gophorem import ( "bytes" "context" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "net/url" ) // Client makes all the API calls to dev.to. type Client struct { apiKey string baseURL string http *http.Client } // Arguments are used for passing query parameters to the dev.to api. type Arguments map[string]string // Defaults returns an empty map of arguments. func Defaults() Arguments { return make(map[string]string) } func (a Arguments) toQueryParams() url.Values { res := make(url.Values) for k, v := range a { res.Add(k, v) } return res } // Option allows the client to be configured with different options. type Option func(*Client) func withBaseURL(url string) Option { return func(c *Client) { c.baseURL = url } } // WithAPIKey sets the dev.to api key to use for this client. // see https://docs.dev.to/api/#section/Authentication for how to set one up. func WithAPIKey(apiKey string) Option { return func(c *Client) { c.apiKey = apiKey } } // NewClient creates a dev.to client with the provided options. func NewClient(foremURL string, opts ...Option) *Client { res := &Client{ baseURL: foremURL, http: &http.Client{}, } for _, o := range opts { o(res) } return res } // NewDevtoClient creates a dev.to client with the provided options. func NewDevtoClient(opts ...Option) *Client { res := &Client{ baseURL: "https://dev.to/api", http: &http.Client{}, } for _, o := range opts { o(res) } return res } func (c *Client) getRequest(ctx context.Context, method, url string, payload interface{}) (*http.Request, error) { b := bytes.NewBuffer(nil) if method == http.MethodPost || method == http.MethodPut { j, err := json.Marshal(payload) if err != nil { return nil, err } b = bytes.NewBuffer(j) } req, err := http.NewRequestWithContext(ctx, method, url, b) if err != nil { return nil, err } if c.apiKey != "" { req.Header.Add("api-key", c.apiKey) } return req, err } // get returns an error if the http client cannot perform a HTTP GET for the provided URL. func (c *Client) get(ctx context.Context, url string, target interface{}) error { req, err := c.getRequest(ctx, http.MethodGet, url, nil) if err != nil { return err } resp, err := c.http.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return errors.New("error from forem api") } b, err := ioutil.ReadAll(resp.Body) if err != nil { return err } return json.Unmarshal(b, &target) } // save returns an error if the http client cannot save the request to dev.to.. func (c *Client) save(ctx context.Context, httpMethod string, url string, payload interface{}, target interface{}) error { req, err := c.getRequest(ctx, httpMethod, url, payload) if err != nil { return err } req.Header.Add("Content-Type", "application/json") resp, err := c.http.Do(req) if err != nil { return err } defer resp.Body.Close() b, err := ioutil.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { return fmt.Errorf("error from forem api. httpCode: %d, response: %s", resp.StatusCode, b) } return json.Unmarshal(b, &target) } // put returns an error if the http client cannot perform a HTTP PUT for the provided URL. func (c *Client) put(ctx context.Context, url string, payload interface{}, target interface{}) error { return c.save(ctx, http.MethodPut, url, payload, target) } // post returns an error if the http client cannot perform a HTTP POST for the provided URL. func (c *Client) post(ctx context.Context, url string, payload interface{}, target interface{}) error { return c.save(ctx, http.MethodPost, url, payload, target) } // delete returns an error if the http client cannot perform a HTTP DELETE for the provided URL. func (c *Client) delete(ctx context.Context, url string, payload interface{}) error { req, err := c.getRequest(ctx, http.MethodDelete, url, nil) if err != nil { return err } resp, err := c.http.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return errors.New("error from forem api") } return nil }
package kubernetes import ( "testing" "github.com/stretchr/testify/assert" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func Test_createPlatformInfoFromAPIObjects(t *testing.T) { tests := []struct { name string tag string apiGroups []*v1.APIGroup apiResourceList []*v1.APIResourceList useV1Beta1PDB bool pdbPreferredVersion string pspPreferredVersion string pdbOtherVersion string pspOtherVersion string }{ { name: "v1 preferred, PDB v1 prferred, PSP, PDB v1beta1 not proferred", apiGroups: []*v1.APIGroup{ newApiGroupPointer( v1.APIGroup{ Name: "policy", Versions: []v1.GroupVersionForDiscovery{ { GroupVersion: "policy/v1", }, { GroupVersion: "policy/v1beta1", }, }, PreferredVersion: v1.GroupVersionForDiscovery{ GroupVersion: "policy/v1", }, }, ), }, apiResourceList: createDefaultApiResourceList(), useV1Beta1PDB: false, pdbPreferredVersion: "policy/v1", pspPreferredVersion: "", pdbOtherVersion: "policy/v1beta1", pspOtherVersion: "policy/v1beta1", }, { name: "v1beta1 preferred, PDB, PSP v1beta1 prferred, PDB v1 not proferred", tag: "tag 1", apiGroups: []*v1.APIGroup{ newApiGroupPointer( v1.APIGroup{ Name: "policy", Versions: []v1.GroupVersionForDiscovery{ { GroupVersion: "policy/v1", }, { GroupVersion: "policy/v1beta1", }, }, PreferredVersion: v1.GroupVersionForDiscovery{ GroupVersion: "policy/v1beta1", }, }, ), }, apiResourceList: createDefaultApiResourceList(), useV1Beta1PDB: true, pdbPreferredVersion: "policy/v1beta1", pspPreferredVersion: "policy/v1beta1", pdbOtherVersion: "policy/v1", pspOtherVersion: "", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { platformInfo := NewPlatformInfo(nil, tt.apiGroups, tt.apiResourceList) assert.Equal(t, tt.useV1Beta1PDB, platformInfo.UseV1Beta1PDB()) assert.Equal(t, tt.pdbPreferredVersion, platformInfo.apiPreferredVersions["PodDisruptionBudget"]) assert.Equal(t, tt.pspPreferredVersion, platformInfo.apiPreferredVersions["PodSecurityPolicy"]) assert.Equal(t, tt.pdbOtherVersion, platformInfo.apiOtherVersions["PodDisruptionBudget"]) assert.Equal(t, tt.pspOtherVersion, platformInfo.apiOtherVersions["PodSecurityPolicy"]) }) } } func Test_getPDBFlag(t *testing.T) { tests := []struct { name string preferred map[string]string other map[string]string useV1Beta1PDB bool supportsPSP bool }{ { name: "Chooses preferred version of PodDisruptionBudget", preferred: map[string]string{ "PodDisruptionBudget": "policy/v1", "PodSecurityPolicy": "anything", }, other: map[string]string{ "PodDisruptionBudget": "policy/v1beta1", }, useV1Beta1PDB: false, supportsPSP: true, }, { name: "Chooses preferred version of PodDisruptionBudget", preferred: map[string]string{ "PodDisruptionBudget": "policy/v1beta1", }, other: map[string]string{ "PodDisruptionBudget": "policy/v1", "PodSecurityPolicy": "anything", }, useV1Beta1PDB: true, supportsPSP: true, }, { name: "Unrecognized preferred version, defaults to v1", preferred: map[string]string{ "PodDisruptionBudget": "xyz", }, other: map[string]string{}, useV1Beta1PDB: false, supportsPSP: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { platformInfo := NewPlatformInfoFromVersionMaps(nil, tt.preferred, tt.other) assert.Equal(t, tt.useV1Beta1PDB, platformInfo.UseV1Beta1PDB()) assert.Equal(t, tt.supportsPSP, platformInfo.supportsPSP()) assert.Equal(t, tt.supportsPSP, containsObjectKind(platformInfo.GetAgentResourcesKind(false), PodSecurityPoliciesKind)) }) } } func Test_getDatadogAgentVersions(t *testing.T) { tests := []struct { name string apiGroups []*v1.APIGroup apiResourceList []*v1.APIResourceList preferred string other string }{ { name: "v2 preferred, v1 other", apiGroups: []*v1.APIGroup{ newApiGroupPointer( v1.APIGroup{ Name: "datadoghq", Versions: []v1.GroupVersionForDiscovery{ { GroupVersion: "datadoghq/v1alpha1", }, { GroupVersion: "datadoghq/v2alpha1", }, }, PreferredVersion: v1.GroupVersionForDiscovery{ GroupVersion: "datadoghq/v2alpha1", }, }, ), }, apiResourceList: createDatadogAgentResourceList(), preferred: "datadoghq/v2alpha1", other: "datadoghq/v1alpha1", }, { name: "v2 only, v2 preferred, other empty", apiGroups: []*v1.APIGroup{ newApiGroupPointer( v1.APIGroup{ Name: "datadoghq", Versions: []v1.GroupVersionForDiscovery{ { GroupVersion: "datadoghq/v2alpha1", }, }, PreferredVersion: v1.GroupVersionForDiscovery{ GroupVersion: "datadoghq/v2alpha1", }, }, ), }, apiResourceList: []*v1.APIResourceList{ newApiResourceListPointer( v1.APIResourceList{ GroupVersion: "datadoghq/v2alpha1", APIResources: []v1.APIResource{ { Kind: "DatadogAgent", }, }, }, )}, preferred: "datadoghq/v2alpha1", other: "", }, { name: "No API groups and resources, versions empty", apiGroups: []*v1.APIGroup{ newApiGroupPointer( v1.APIGroup{}, ), }, apiResourceList: []*v1.APIResourceList{}, preferred: "", other: "", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { platformInfo := NewPlatformInfo(nil, tt.apiGroups, tt.apiResourceList) preffered, other := platformInfo.GetApiVersions("DatadogAgent") assert.Equal(t, tt.preferred, preffered) assert.Equal(t, tt.other, other) }) } } func createDefaultApiResourceList() []*v1.APIResourceList { return []*v1.APIResourceList{ newApiResourceListPointer( v1.APIResourceList{ GroupVersion: "policy/v1", APIResources: []v1.APIResource{ { Kind: "PodDisruptionBudget", }, }, }, ), newApiResourceListPointer( v1.APIResourceList{ GroupVersion: "policy/v1beta1", APIResources: []v1.APIResource{ { Kind: "PodDisruptionBudget", }, { Kind: "PodSecurityPolicy", }, }, }, ), newApiResourceListPointer( v1.APIResourceList{ GroupVersion: "datadoghq/v1alpha1", APIResources: []v1.APIResource{ { Kind: "DatadogAgent", }, }, }, ), } } func createDatadogAgentResourceList() []*v1.APIResourceList { return []*v1.APIResourceList{ newApiResourceListPointer( v1.APIResourceList{ GroupVersion: "datadoghq/v1alpha1", APIResources: []v1.APIResource{ { Kind: "DatadogAgent", }, }, }, ), newApiResourceListPointer( v1.APIResourceList{ GroupVersion: "datadoghq/v2alpha1", APIResources: []v1.APIResource{ { Kind: "DatadogAgent", }, }, }, ), } } func newApiGroupPointer(apiGroup v1.APIGroup) *v1.APIGroup { return &apiGroup } func newApiResourceListPointer(apiResourceList v1.APIResourceList) *v1.APIResourceList { return &apiResourceList } func containsObjectKind(list []ObjectKind, s ObjectKind) bool { for _, v := range list { if v == s { return true } } return false }
/* Copyright 2021-2023 ICS-FORTH. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package service import ( "fmt" "github.com/carv-ics-forth/frisbee/api/v1alpha1" "github.com/carv-ics-forth/frisbee/pkg/lifecycle" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) // updateLifecycle returns the update lifecycle of the cluster. func (r *Controller) updateLifecycle(service *v1alpha1.Service) bool { // Skip any CR which are already completed, or uninitialized. if service.Status.Phase.Is(v1alpha1.PhaseUninitialized, v1alpha1.PhaseSuccess, v1alpha1.PhaseFailed) { return false } return lifecycle.SingleJob(r.view, &service.Status.Lifecycle) } // convertPodLifecycle translates the Pod's Lifecycle to Frisbee Lifecycle. func convertPodLifecycle(obj client.Object) v1alpha1.Lifecycle { pod := obj.(*corev1.Pod) /*---------------------------------------------------* * Corner Cases *---------------------------------------------------*/ if pod.CreationTimestamp.IsZero() { return v1alpha1.Lifecycle{ Phase: v1alpha1.PhaseFailed, Reason: "EmptyCreationTime", Message: fmt.Sprintf("Something is wrong with Pod '%s'.", pod.GetLabels()), } } // If the Pod is marked for deletion, but is not completed, then is probably deleted. if !pod.GetDeletionTimestamp().IsZero() && !(pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed) { return v1alpha1.Lifecycle{ Phase: v1alpha1.PhaseFailed, Reason: "PodDeletion", Message: fmt.Sprintf("Pod '%s' is probably being deleted", pod.GetLabels()), } } /*---------------------------------------------------* * Normal Execution Fow *---------------------------------------------------*/ switch pod.Status.Phase { case corev1.PodPending: return v1alpha1.Lifecycle{ Phase: v1alpha1.PhasePending, Reason: pod.Status.Reason, Message: pod.Status.Message, } case corev1.PodRunning: // Termination rules. Note the evaluation of "Main" and "Sidecars" containers do not follow any ordering. // It is equally possible for a "Sidecar" to be evaluated before and after the "Main" container. // // -- "Main" container is in terminal state -- // In this case, the entire job is complete, regardless of the state of sidecar containers. // The job's completion status (Success or Failed) depends on the exit code of the main container. // // -- "Sidecar" container is in terminal state. -- // This captures the condition in which a sidecar container is complete before the main container. // In this case, the result depends on the status of the main container. // 1) If the main container is in terminal state, the result follows the conditions of "Main in terminal state". // 2) Otherwise, if the sidecar has failed, the result is failure. // 3) if the sidecar is successful, the status remains running. var failedSidecar *v1alpha1.Lifecycle for _, container := range pod.Status.ContainerStatuses { // the container is still running if container.State.Terminated == nil { continue } if container.Name == v1alpha1.MainContainerName { // main has failed if container.State.Terminated.ExitCode != 0 { return v1alpha1.Lifecycle{ Phase: v1alpha1.PhaseFailed, Reason: container.State.Terminated.Reason, Message: container.State.Terminated.Message, } } // main is successful return v1alpha1.Lifecycle{ Phase: v1alpha1.PhaseSuccess, Reason: container.State.Terminated.Reason, Message: container.State.Terminated.Message, } } // sidecar has failed. cache the result. if main is complete, it has precedence. // if main is still running, the error will be returned at the of the loop. if container.State.Terminated.ExitCode != 0 { failedSidecar = &v1alpha1.Lifecycle{ Phase: v1alpha1.PhaseFailed, Reason: container.State.Terminated.Reason, Message: container.State.Terminated.Message, } } } // lazy failure, in order to give precedence to "main" rules. if failedSidecar != nil { return *failedSidecar } // All containers are still running return v1alpha1.Lifecycle{ Phase: v1alpha1.PhaseRunning, Reason: pod.Status.Reason, Message: pod.Status.Message, } case corev1.PodSucceeded: return v1alpha1.Lifecycle{ Phase: v1alpha1.PhaseSuccess, Reason: pod.Status.Reason, Message: pod.Status.Message, } case corev1.PodFailed: // A usual source for empty reason is invalid container parameters reason := pod.Status.Reason if reason == "" { reason = "ContainerError" } message := pod.Status.Message if message == "" { message = "Check the container logs" } return v1alpha1.Lifecycle{ Phase: v1alpha1.PhaseFailed, Reason: reason, Message: message, } default: panic("unhandled lifecycle condition") } }
//Package gologging provides a logger implementation based on the github.com/op/go-logging pkg package gologging import ( "fmt" "io" "log/syslog" "os" "github.com/devopsfaith/krakend/config" "github.com/devopsfaith/krakend/logging" gologging "github.com/op/go-logging" ) // Namespace is the key to look for extra configuration details const Namespace = "github_com/devopsfaith/krakend-gologging" var ( // ErrEmptyValue is the error returned when there is no config under the namespace ErrWrongConfig = fmt.Errorf("getting the extra config for the krakend-gologging module") // DefaultPattern is the pattern to use for rendering the logs LogstashPattern = `{"@timestamp":"%{time:200-01-02T15:04:05.000+00:00}", "@version": 1, "level": "%{level}", "message": "%{message}", "module": "%{module}"}` DefaultPattern = ` %{time:2006/01/02 - 15:04:05.000} %{color}▶ %{level:.6s}%{color:reset} %{message}` ActivePattern = DefaultPattern defaultFormatterSelector = func(io.Writer) string { return ActivePattern } ) // SetFormatterSelector sets the ddefaultFormatterSelector function func SetFormatterSelector(f func(io.Writer) string) { defaultFormatterSelector = f } // NewLogger returns a krakend logger wrapping a gologging logger func NewLogger(cfg config.ExtraConfig, ws ...io.Writer) (logging.Logger, error) { logConfig, ok := ConfigGetter(cfg).(Config) if !ok { return nil, ErrWrongConfig } module := "KRAKEND" loggr := gologging.MustGetLogger(module) if logConfig.StdOut { ws = append(ws, os.Stdout) } if logConfig.Syslog { var err error var w *syslog.Writer w, err = syslog.New(syslog.LOG_CRIT, logConfig.Prefix) if err != nil { return nil, err } ws = append(ws, w) } if logConfig.Format == "logstash" { ActivePattern = LogstashPattern logConfig.Prefix = "" } if logConfig.Format == "custom" { ActivePattern = logConfig.CustomFormat logConfig.Prefix = "" } backends := []gologging.Backend{} for _, w := range ws { backend := gologging.NewLogBackend(w, logConfig.Prefix, 0) pattern := defaultFormatterSelector(w) format := gologging.MustStringFormatter(pattern) backendLeveled := gologging.AddModuleLevel(gologging.NewBackendFormatter(backend, format)) logLevel, err := gologging.LogLevel(logConfig.Level) if err != nil { return nil, err } backendLeveled.SetLevel(logLevel, module) backends = append(backends, backendLeveled) } gologging.SetBackend(backends...) return Logger{loggr}, nil } // ConfigGetter implements the config.ConfigGetter interface func ConfigGetter(e config.ExtraConfig) interface{} { v, ok := e[Namespace] if !ok { return nil } tmp, ok := v.(map[string]interface{}) if !ok { return nil } cfg := Config{} if v, ok := tmp["stdout"]; ok { cfg.StdOut = v.(bool) } if v, ok := tmp["syslog"]; ok { cfg.Syslog = v.(bool) } if v, ok := tmp["level"]; ok { cfg.Level = v.(string) } if v, ok := tmp["prefix"]; ok { cfg.Prefix = v.(string) } if v, ok := tmp["format"]; ok { cfg.Format = v.(string) } if v, ok := tmp["custom_format"]; ok { cfg.CustomFormat = v.(string) } return cfg } // Config is the custom config struct containing the params for the logger type Config struct { Level string StdOut bool Syslog bool Prefix string Format string CustomFormat string } // Logger is a wrapper over a github.com/op/go-logging logger type Logger struct { logger *gologging.Logger } // Debug implements the logger interface func (l Logger) Debug(v ...interface{}) { l.logger.Debug(v...) } // Info implements the logger interface func (l Logger) Info(v ...interface{}) { l.logger.Info(v...) } // Warning implements the logger interface func (l Logger) Warning(v ...interface{}) { l.logger.Warning(v...) } // Error implements the logger interface func (l Logger) Error(v ...interface{}) { l.logger.Error(v...) } // Critical implements the logger interface func (l Logger) Critical(v ...interface{}) { l.logger.Critical(v...) } // Fatal implements the logger interface func (l Logger) Fatal(v ...interface{}) { l.logger.Fatal(v...) }
package scrapers import ( //"github.com/berryhill/web-scrapper/models" "github.com/PuerkitoBio/goquery" ) // simulating a db collection var products_store = map[int]string { 1: "flyrods", } type Scraper interface { getBrand(item *goquery.Selection) (string, error) getName(item *goquery.Selection) (string, error) getTitle(item *goquery.Selection) (string, error) getPrice(item *goquery.Selection) (string, error) getUrl(item *goquery.Selection) (string, error) getImg(item *goquery.Selection) (string, error) getDetails(item *goquery.Selection) ([]string, error) //Scrape() (products []*models.Product, errs []error) Scrape() (response map[string]int, errs []error) }
package test import ( "github.com/sirupsen/logrus" "github.com/tietang/props/ini" "github.com/tietang/props/kvs" "go-resk/src/infra" "go-resk/src/infra/base" ) func init() { logrus.Info("测试Starter初始化") infra.Register(&base.PropsStarter{}) infra.Register(&base.DbxStarter{}) infra.Register(&base.LogStarter{}) infra.Register(&base.ValidatorStarter{}) path := kvs.GetCurrentFilePath("config.ini", 1) conf := ini.NewIniFileCompositeConfigSource(path) app := infra.NewBootApplication(conf) app.Start() }
package cmd import ( "fmt" "os" "strings" "time" "github.com/fugue/fugue-client/client/environments" "github.com/fugue/fugue-client/client/scans" "github.com/fugue/fugue-client/format" "github.com/fugue/fugue-client/models" "github.com/go-openapi/runtime" "github.com/spf13/cobra" ) // NewTriggerScanCommand returns a command that scans a specified environment func NewTriggerScanCommand() *cobra.Command { var wait bool var scanFailureExitCode int cmd := &cobra.Command{ Use: "scan [environment_id]", Short: "Trigger a scan", Args: cobra.ExactArgs(1), Run: func(cmd *cobra.Command, args []string) { client, auth := getClient() environmentID := args[0] getEnvironmentParams := environments.NewGetEnvironmentParams() getEnvironmentParams.EnvironmentID = environmentID getEnvironmentResp, err := client.Environments.GetEnvironment(getEnvironmentParams, auth) if err != nil { switch respError := err.(type) { case *runtime.APIError: if respError.Code == 404 { Fatal("Environment not found", DefaultErrorExitCode) } CheckErr(err) default: CheckErr(err) } } if strings.ToLower(getEnvironmentResp.Payload.Provider) == "repository" { Fatal("Repository environment scans should be started through regula", DefaultErrorExitCode) } createParams := scans.NewCreateScanParams() createParams.EnvironmentID = environmentID createResp, err := client.Scans.CreateScan(createParams, auth) if err != nil { switch respError := err.(type) { case *scans.CreateScanBadRequest: Fatal(respError.Payload.Message, DefaultErrorExitCode) case *runtime.APIError: if respError.Code == 404 { Fatal("Environment not found", DefaultErrorExitCode) } CheckErr(err) default: CheckErr(err) } } scanID := createResp.Payload.ID params := scans.NewGetScanParams() params.ScanID = scanID var scan *models.ScanWithSummary var summary models.ResourceSummary for { resp, err := client.Scans.GetScan(params, auth) CheckErr(err) scan = resp.Payload if resp.Payload.ResourceSummary != nil { summary = *resp.Payload.ResourceSummary } if scan.Status != "IN_PROGRESS" || !wait { break } time.Sleep(time.Second * 30) } var items []interface{} if !wait { items = []interface{}{ Item{"SCAN_ID", scan.ID}, Item{"CREATED_AT", format.Unix(scan.CreatedAt)}, Item{"STATUS", scan.Status}, } } else { message := "-" if scan.Message != "" { message = scan.Message } items = []interface{}{ Item{"SCAN_ID", scan.ID}, Item{"CREATED_AT", format.Unix(scan.CreatedAt)}, Item{"FINISHED_AT", format.Unix(scan.FinishedAt)}, Item{"STATUS", scan.Status}, Item{"MESSAGE", message}, Item{"RESOURCE_COUNT", summary.Total}, Item{"RESOURCE_TYPES", summary.ResourceTypes}, Item{"COMPLIANT", summary.Compliant}, Item{"NONCOMPLIANT", summary.Noncompliant}, Item{"RULES_PASSED", summary.RulesPassed}, Item{"RULES_FAILED", summary.RulesFailed}, } } table, err := format.Table(format.TableOpts{ Rows: items, Columns: []string{"Attribute", "Value"}, ShowHeader: true, }) CheckErr(err) for _, tableRow := range table { fmt.Println(tableRow) } if wait && scan.Status == "ERROR" { os.Exit(int(scanFailureExitCode)) } }, } cmd.Flags().BoolVar(&wait, "wait", false, "Wait for scan to complete") cmd.Flags().IntVar(&scanFailureExitCode, "scan-failure-exit-code", 0, "Sets the exit code to raise when a scan fails. Default is 0. Used with the wait flag") return cmd } func init() { rootCmd.AddCommand(NewTriggerScanCommand()) }
package transshipment import ( "testing" ) func TestOverwrite(t *testing.T) { }
/* * Copyright 2018, Oath Inc. * Licensed under the terms of the MIT license. See LICENSE file in the project root for terms. */ package main import ( "github.com/serhii-samoilenko/pod-startup-lock/lock/config" "github.com/serhii-samoilenko/pod-startup-lock/lock/service" "github.com/serhii-samoilenko/pod-startup-lock/lock/state" ) func main() { conf := config.Parse() endpointChecker := service.NewEndpointChecker( conf.HealthPassTimeout, conf.HealthFailTimeout, conf.HealthEndpoints, ) healthFunc := endpointChecker.HealthFunction() lock := state.NewLock(conf.ParallelLocks) handler := service.NewLockHandler(lock, conf.LockTimeout, healthFunc) go service.Run(conf.Host, conf.Port, handler) go endpointChecker.Run() select {} // Wait forever and let child goroutines run }
// Package triangle : all about triangle operation. package triangle import ( "math" ) // Kind is to determine what kind triangle it is. type Kind int const ( // NaT means not a triangle NaT = iota // Equ means equilateral triangle Equ // Iso means isosceles triangle Iso // Sca means scalene triangle Sca ) // KindFromSides to determine triangle kind by three side. func KindFromSides(a, b, c float64) Kind { if a+b+c == 0 || math.IsInf(a+b+c, 0) || math.IsNaN(a+b+c) || a+b < c || a+c < b || b+c < a { return NaT } if a == b && a == c { return Equ } if a == b || a == c || b == c { return Iso } return Sca }
package models import ( "errors" "time" "github.com/KEXPCapstone/shelves-server/library/models/releases" "github.com/globalsign/mgo/bson" ) type Shelf struct { ID bson.ObjectId `json:"id" bson:"_id"` OwnerID bson.ObjectId `json:"ownerId"` OwnerName string `json:"ownerName"` Name string `json:"name"` Releases []*releases.Release `json:"releases"` Description string `json:"description"` // Maybe DateCreated time.Time `json:"dateCreated"` DateLastEdit time.Time `json:"dateLastEdit"` Featured bool `json:"featured"` } type NewShelf struct { Name string `json:"name"` Description string `json:"description"` Featured bool `json:"featured"` } func (ns *NewShelf) Validate() error { if len(ns.Name) == 0 { return errors.New(ErrEmptyShelfName) } return nil } func (ns *NewShelf) ToShelf(userID bson.ObjectId, ownerName string) (*Shelf, error) { if err := ns.Validate(); err != nil { return nil, err } shelf := Shelf{ ID: bson.NewObjectId(), OwnerID: userID, OwnerName: ownerName, Name: ns.Name, Releases: []*releases.Release{}, Description: ns.Description, DateCreated: time.Now(), DateLastEdit: time.Now(), Featured: ns.Featured, } return &shelf, nil }
package request import ( "bytes" "errors" "io/ioutil" "net/http" "net/url" "path" "testing" ) type clientFunc func(req *http.Request) (*http.Response, error) func (f clientFunc) Do(req *http.Request) (*http.Response, error) { return f(req) } func TestRequestSetHeaders(t *testing.T) { server := clientFunc(func(req *http.Request) (*http.Response, error) { if req.Header.Get("Content-Type") != "application/json" { t.Errorf("unexpected headers: %#v", req.Header) } return &http.Response{ StatusCode: http.StatusForbidden, Body: ioutil.NopCloser(bytes.NewReader([]byte{})), }, nil }) NewRequest(server, &url.URL{}).Do() } func TestRequestURI(t *testing.T) { var ( requestURL, _ = url.Parse("https://auth0.example.io/v5") resource = "users" resourceName = "johndoe" expectedURL = requestURL.Path + "/" + path.Join(resource, resourceName) ) r := NewRequest(nil, requestURL). Resource(resource). Name(resourceName) if r.URL().Path != expectedURL { t.Errorf("expected: %s, got: %s", expectedURL, r.URL().Path) } } func TestRequestDo(t *testing.T) { testCases := []struct { Request *Request Err bool ErrFn func(error) bool }{ { Request: &Request{err: errors.New("an request error")}, Err: true, }, { Request: &Request{ Client: clientFunc(func(req *http.Request) (*http.Response, error) { return nil, errors.New("error from server") }), }, Err: true, }, } for i, testCase := range testCases { body, err := testCase.Request.Do().Raw() hasErr := err != nil if hasErr != testCase.Err { t.Errorf("%d: expected: %t, got: %t: %v", i, testCase.Err, hasErr, err) } if hasErr && body != nil { t.Errorf("%d: body should be nil when error is returned", i) } } }
package server import ( "net/http" "net/http/httptest" "strings" "testing" ) func TestSimpleHealthCheck(t *testing.T) { path := "/status.txt" s := NewSimpleHealthCheck(path) a := NewActivityMonitor() s.Start(a) req, _ := http.NewRequest("GET", path, nil) wr := httptest.NewRecorder() s.ServeHTTP(wr, req) if wr.Code != http.StatusOK { t.Errorf("SimpleHealthCheck expected 200 response code, got %d", wr.Code) } if gotBody := wr.Body.String(); !strings.HasPrefix(gotBody, "ok-") { t.Errorf("SimpleHealthCheck expected response body to start with 'ok-', got %s", gotBody) } s.Stop() wr = httptest.NewRecorder() s.ServeHTTP(wr, req) if wr.Code != http.StatusOK { t.Errorf("SimpleHealthCheck expected 200 response code, got %d", wr.Code) } if gotBody := wr.Body.String(); !strings.HasPrefix(gotBody, "ok-") { t.Errorf("SimpleHealthCheck expected response body to start with 'ok-', got %s", gotBody) } }
package main import ( "github.com/fedesog/webdriver" "log" "time" ) func main() { chromeDriver := webdriver.NewChromeDriver("./chromedriver83.0.4103.39") err := chromeDriver.Start() if err != nil { log.Println(err) } desired := webdriver.Capabilities{"Platform": "Linux"} required := webdriver.Capabilities{} session, err := chromeDriver.NewSession(desired, required) if err != nil { log.Println(err) } err = session.Url("https://www.helloweba.net/demo/2017/unlock/") if err != nil { log.Println(err) } time.Sleep(5 * time.Second) el, err := session.FindElement(webdriver.ClassName, "slide-to-unlock-handle") //el,err := session.FindElement(webdriver.XPath,"/html/body/div/div/div/h2/a") //log.Printf("%v",el) el.Click() time.Sleep(1 * time.Second) //session.Click(webdriver.LeftButton) //session.Click(webdriver.LeftButton) //if err := session.MoveTo(el,5,0);err != nil{ // fmt.Println(err,"错误"); //}else{ // fmt.Println("移动成功"); //} //el.Click() //webdriver.MoveTo time.Sleep(60 * time.Second) session.Delete() chromeDriver.Stop() }
package camo import ( "encoding/binary" "errors" "fmt" "io" "net" ) const ( // IPv4HeaderLen is IPv4 header length without extension headers IPv4HeaderLen = 20 // IPv6HeaderLen is IPv6 header length without extension headers IPv6HeaderLen = 40 ) // GetIPPacketVersion gets ip protocol version from ip packet func GetIPPacketVersion(b []byte) int { return int(b[0] >> 4) } // IPv4Header represents an IPv4 header type IPv4Header []byte // Version is protocol version func (b IPv4Header) Version() int { return int(b[0] >> 4) } // Len is header length func (b IPv4Header) Len() int { return int(b[0]&0x0f) << 2 } // TOS is type-of-service func (b IPv4Header) TOS() int { return int(b[1]) } // TotalLen is packet total length func (b IPv4Header) TotalLen() int { return int(binary.BigEndian.Uint16(b[2:4])) } // ID is identification func (b IPv4Header) ID() int { return int(binary.BigEndian.Uint16(b[4:6])) } // Flags is IPv4 flags func (b IPv4Header) Flags() int { return (int(binary.BigEndian.Uint16(b[6:8])) & 0xe000) >> 13 } // FragOff is fragment offset func (b IPv4Header) FragOff() int { return int(binary.BigEndian.Uint16(b[6:8])) & 0x1fff } // TTL is time-to-live func (b IPv4Header) TTL() int { return int(b[8]) } // Protocol is next protocol func (b IPv4Header) Protocol() int { return int(b[9]) } // Checksum is IPv4 header checksum func (b IPv4Header) Checksum() int { return int(binary.BigEndian.Uint16(b[10:12])) } // Src is source address func (b IPv4Header) Src() net.IP { return net.IP(b[12:16]) } // Dst is destination address func (b IPv4Header) Dst() net.IP { return net.IP(b[16:20]) } // Options is extension headers func (b IPv4Header) Options() []byte { hdrlen := b.Len() if hdrlen > IPv4HeaderLen { if len(b) >= hdrlen { return b[IPv4HeaderLen:hdrlen] } return b[IPv4HeaderLen:] } return nil } func (b IPv4Header) String() string { return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", b.Version(), b.Len(), b.TOS(), b.TotalLen(), b.ID(), b.Flags(), b.FragOff(), b.TTL(), b.Protocol(), b.Checksum(), b.Src(), b.Dst()) } // IPv6Header represents an IPv6 base header type IPv6Header []byte // Version is protocol version func (b IPv6Header) Version() int { return int(b[0]) >> 4 } // TrafficClass is traffic class func (b IPv6Header) TrafficClass() int { return int(b[0]&0x0f)<<4 | int(b[1])>>4 } // FlowLabel is flow label func (b IPv6Header) FlowLabel() int { return int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]) } // PayloadLen is payload length func (b IPv6Header) PayloadLen() int { return int(binary.BigEndian.Uint16(b[4:6])) } // NextHeader is next header func (b IPv6Header) NextHeader() int { return int(b[6]) } // HopLimit is hop limit func (b IPv6Header) HopLimit() int { return int(b[7]) } // Src is source address func (b IPv6Header) Src() net.IP { return net.IP(b[8:24]) } // Dst is destination address func (b IPv6Header) Dst() net.IP { return net.IP(b[24:40]) } func (b IPv6Header) String() string { return fmt.Sprintf("ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v", b.Version(), b.TrafficClass(), b.FlowLabel(), b.PayloadLen(), b.NextHeader(), b.HopLimit(), b.Src(), b.Dst()) } var errBadPacketRead = errors.New("bad packet read") // ReadIPPacket reads a IPv4/IPv6 packet from the io.Reader func ReadIPPacket(r io.Reader, b []byte) (int, error) { if len(b) < IPv4HeaderLen { return 0, io.ErrShortBuffer } n, err := io.ReadFull(r, b[:IPv4HeaderLen]) if err != nil { return 0, err } var totalLen int switch GetIPPacketVersion(b) { case 4: totalLen = IPv4Header(b).TotalLen() if totalLen < IPv4HeaderLen { return 0, errBadPacketRead } case 6: // how to handle jumbo frame? totalLen = IPv6Header(b).PayloadLen() + IPv6HeaderLen default: return 0, errBadPacketRead } if totalLen > len(b) { return 0, io.ErrShortBuffer } _, err = io.ReadFull(r, b[n:totalLen]) if err != nil { return 0, err } return totalLen, nil } type packetIO struct { rw io.ReadWriteCloser } func (p *packetIO) Read(b []byte) (int, error) { return ReadIPPacket(p.rw, b) } func (p *packetIO) Write(b []byte) (int, error) { return p.rw.Write(b) } func (p *packetIO) Close() error { return p.rw.Close() }
package main import ( "context" "encoding/json" "flag" "fmt" "log" "math" "net/http" "strconv" "sync" "time" "github.com/pkg/errors" "github.com/prometheus/client_golang/api" hetznerapi "github.com/mxschmitt/golang-hetzner-robot-metrics/pkg/api" "github.com/julienschmidt/httprouter" "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) var store = &Store{} type Store struct { Data *hetznerapi.LiveData sync.RWMutex } func handleGetServer(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { store.RLock() found := false if store.Data != nil { for _, server := range store.Data.Server { if strconv.Itoa(server.Key) == ps.ByName("id") { json.NewEncoder(w).Encode(server) found = true break } } } if !found { json.NewEncoder(w).Encode(nil) } store.RUnlock() } func main() { addr := flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") flag.Parse() hetznerServersGauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "hetzner_robot_servers_price", Help: "Hetzner Robot Server", }, []string{"key"}) prometheus.MustRegister(hetznerServersGauge) removeOldServers := func(data *hetznerapi.LiveData) error { client, err := api.NewClient(api.Config{ Address: "http://prometheus:9090", }) if err != nil { return errors.Wrap(err, "could not create new prometheus api client") } prometheusAPI := v1.NewAPI(client) resp, err := prometheusAPI.LabelValues(context.Background(), "key") if err != nil { return errors.Wrap(err, "could not query to prometheus") } soldServersMatches := []string{} for _, label := range resp { contains := false for _, server := range data.Server { if strconv.Itoa(server.Key) == string(label) { contains = true } } if !contains { soldServersMatches = append(soldServersMatches, fmt.Sprintf(`hetzner_robot_servers_price{key="%s"}`, label)) } } if len(soldServersMatches) > 0 { fmt.Printf("Deleting sold servers: %d\n", len(soldServersMatches)) if err := prometheusAPI.DeleteSeries(context.Background(), soldServersMatches, time.Unix(0, 0), time.Now()); err != nil { return errors.Wrap(err, "could not delete sold servers") } fmt.Println("Deleted sold servers successfully") if err := prometheusAPI.CleanTombstones(context.Background()); err != nil { return errors.Wrap(err, "could not clear Tombstones") } fmt.Println("Cleaned Tombstones") } return nil } go func() { for { data, err := hetznerapi.GetLiveData() if err != nil { log.Printf("could not get live data: %v", err) continue } store.Lock() store.Data = data store.Unlock() for _, server := range data.Server { price, err := strconv.ParseFloat(server.Price, 64) if err != nil { log.Printf("could not parse price: %v", err) continue } price = math.Round(price * 1.19) hetznerServersGauge.WithLabelValues(strconv.Itoa(server.Key)).Set(price) } if err := removeOldServers(data); err != nil { log.Printf("could not remove old servers: %v", err) } log.Printf("Crawled %d servers with hash %s", len(data.Server), data.Hash) // Sleep 1 minute time.Sleep(60 * time.Second) } }() log.Printf("Listening on %s", *addr) router := httprouter.New() router.Handler("GET", "/metrics", promhttp.Handler()) router.GET("/hetzner/server/:id", handleGetServer) log.Fatal(http.ListenAndServe(*addr, router)) }
package main //847. 访问所有节点的最短路径 //存在一个由 n 个节点组成的无向连通图,图中的节点按从 0 到 n - 1 编号。 // //给你一个数组 graph 表示这个图。其中,graph[i] 是一个列表,由所有与节点 i 直接相连的节点组成。 // //返回能够访问所有节点的最短路径的长度。你可以在任一节点开始和停止,也可以多次重访节点,并且可以重用边。 // // // //示例 1: // // //输入:graph = [[1,2,3],[0],[0],[0]] //输出:4 //解释:一种可能的路径为 [1,0,2,0,3] //示例 2: // // // //输入:graph = [[1],[0,2,4],[1,3,4],[2],[1,2]] //输出:4 //解释:一种可能的路径为 [0,1,4,2,3] // // //提示: // //n == graph.length //1 <= n <= 12 //0 <= graph[i].length <n //graph[i] 不包含 i //如果 graph[a] 包含 b ,那么 graph[b] 也包含 a //输入的图总是连通图 func shortestPathLength(graph [][]int) int { n := len(graph) seen := make([][]bool, n) type tuple struct{ u, mask, dist int } queue := make([]tuple, 0) for i := 0; i < n; i++ { seen[i] = make([]bool, 1<<n) seen[i][1<<i] = true queue = append(queue, tuple{i, 1 << i, 0}) } for { top := queue[0] queue = queue[1:] if top.mask == (1<<n)-1 { return top.dist } for _, v := range graph[top.u] { mask := top.mask | 1<<v if !seen[v][mask] { queue = append(queue, tuple{v, mask, top.dist + 1}) seen[v][mask] = true } } } }
// Copyright 2017 The Aiicy Team. // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain // a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. package template import ( "container/list" "crypto/md5" "fmt" "html/template" "io" "mime" "path/filepath" "runtime" "strings" "time" "github.com/Aiicy/AiicyDS/modules/markdown" "github.com/microcosm-cc/bluemonday" "gopkg.in/editorconfig/editorconfig-core-go.v1" "github.com/Aiicy/AiicyDS/modules/base" "github.com/Aiicy/AiicyDS/modules/setting" ) func NewFuncMap() []template.FuncMap { return []template.FuncMap{map[string]interface{}{ "GoVer": func() string { return strings.Title(runtime.Version()) }, "UseHTTPS": func() bool { return strings.HasPrefix(setting.AppUrl, "https") }, "AppName": func() string { return setting.AppName }, "AppSubUrl": func() string { return setting.AppSubUrl }, "AppUrl": func() string { return setting.AppUrl }, "AppVer": func() string { return setting.AppVer }, "AppDomain": func() string { return setting.Domain }, "DisableGravatar": func() bool { return setting.DisableGravatar }, "ShowFooterTemplateLoadTime": func() bool { return setting.ShowFooterTemplateLoadTime }, "LoadTimes": func(startTime time.Time) string { return fmt.Sprint(time.Since(startTime).Nanoseconds()/1e6) + "ms" }, "AvatarLink": base.AvatarLink, "Safe": Safe, "Sanitize": bluemonday.UGCPolicy().Sanitize, "Str2html": Str2html, "TimeSince": base.TimeSince, "RawTimeSince": base.RawTimeSince, "FileSize": base.FileSize, "Subtract": base.Subtract, "add": func(nums ...interface{}) int { total := 0 for _, num := range nums { if n, ok := num.(int); ok { total += n } } return total }, "DateFmtLong": func(t time.Time) string { return t.Format(time.RFC1123Z) }, "DateFmtShort": func(t time.Time) string { return t.Format("Jan 02, 2006") }, "List": List, "substring": func(str string, start, length int) string { if len(str) == 0 { return "" } end := start + length if length == -1 { end = len(str) } if len(str) < end { return str } return str[start:end] }, "Join": strings.Join, "MD5": base.EncodeMD5, "EscapePound": EscapePound, "ThemeColorMetaTag": func() string { return setting.UI.ThemeColorMetaTag }, "FilenameIsImage": func(filename string) bool { mimeType := mime.TypeByExtension(filepath.Ext(filename)) return strings.HasPrefix(mimeType, "image/") }, "TabSizeClass": func(ec *editorconfig.Editorconfig, filename string) string { if ec != nil { def := ec.GetDefinitionForFilename(filename) if def.TabWidth > 0 { return fmt.Sprintf("tab-size-%d", def.TabWidth) } } return "tab-size-8" }, }} } func Safe(raw string) template.HTML { return template.HTML(raw) } func Str2html(raw string) template.HTML { return template.HTML(markdown.Sanitizer.Sanitize(raw)) } func List(l *list.List) chan interface{} { e := l.Front() c := make(chan interface{}) go func() { for e != nil { c <- e.Value e = e.Next() } close(c) }() return c } type Actioner interface { GetOpType() int GetActUserName() string GetRepoUserName() string GetRepoName() string GetRepoPath() string GetRepoLink() string GetBranch() string GetContent() string GetCreate() time.Time GetIssueInfos() []string } func EscapePound(str string) string { return strings.NewReplacer("%", "%25", "#", "%23", " ", "%20", "?", "%3F").Replace(str) } const qiniuDomain = "http://studygolang.qiniudn.com" // 获取头像 func Gravatar(avatar string, emailI interface{}, size uint16) string { if avatar != "" { return fmt.Sprintf("%s/avatar/%s?imageView2/2/w/%d", qiniuDomain, avatar, size) } email, ok := emailI.(string) if !ok { return fmt.Sprintf("%s/avatar/gopher28.png?imageView2/2/w/%d", qiniuDomain, size) } return fmt.Sprintf("http://gravatar.duoshuo.com/avatar/%s?s=%d", Md5(email), size) } func Md5(text string) string { hashMd5 := md5.New() io.WriteString(hashMd5, text) return fmt.Sprintf("%x", hashMd5.Sum(nil)) }
package main import ( "math" "os" "github.com/veandco/go-sdl2/gfx" "github.com/veandco/go-sdl2/sdl" ) type Point [2]float64 type Sphere = [10][10]Point const ( FRAMES_PER_SECOND = 60 MS_PER_FRAME = uint32(1000 / FRAMES_PER_SECOND) ) var ( window *sdl.Window render *sdl.Renderer right = true scale = 120.0 phase = 0.0 dp = 2.5 dx = 2.1 y_ang = 0.0 px = 320.0 py = 0.0 ) func main() { sdl.Init(sdl.INIT_EVERYTHING) window, _ = sdl.CreateWindow("Amiga Boing Ball", sdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED, 640, 512, sdl.WINDOW_OPENGL) render, _ = sdl.CreateRenderer(window, -1, sdl.RENDERER_ACCELERATED) for { listen_for_events() start_ticks := sdl.GetTicks() run_loop() render.Present() sync_framerate(start_ticks) } } func run_loop() { clear_background() do_physics() var points = calc_points(math.Mod(phase, 22.5)) transform(points) draw_shadow(points) draw_grid() fill_tiles(points, phase >= 22.5) } func listen_for_events() { for { evt := sdl.PollEvent() escape_pressed := sdl.GetKeyboardState()[sdl.GetScancodeFromKey(sdl.K_ESCAPE)] != 0 if escape_pressed || (evt != nil && evt.GetType() == sdl.QUIT) { sdl.Quit() os.Exit(0) } break } } func sync_framerate(start_ticks uint32) { frame_ms := sdl.GetTicks() - start_ticks if frame_ms < MS_PER_FRAME { sdl.Delay(MS_PER_FRAME - frame_ms) } } func clear_background() { render.SetDrawColor(170, 170, 170, 255) // light gray render.Clear() } func do_physics() { phase_shift := dp if right { phase_shift = 45.0 - dp } phase = math.Mod(phase+phase_shift, 45.0) if right { px += dx } else { px -= dx } if px >= 505 { right = false } else if px <= 135 { right = true } y_ang = math.Mod((y_ang + 1.5), 360.0) py = 350.0 - 200.0*math.Abs(math.Cos(y_ang*math.Pi/180.0)) } func get_lat(phase float64, i int) float64 { if i == 0 { return -90.0 } else if i == 9 { return 90.0 } else { return -90.0 + phase + ((float64(i) - 1.0) * 22.5) } } func calc_points(phase float64) *Sphere { sin_lat := make([]float64, 10) for i := 0; i < len(sin_lat); i++ { lat := get_lat(phase, i) sin_lat[i] = math.Sin(lat * math.Pi / 180.0) } points := Sphere{} for j := 0; j < len(sin_lat)-1; j++ { lon := -90.0 + float64(j)*22.5 y := math.Sin(lon * math.Pi / 180.0) l := math.Cos(lon * math.Pi / 180.0) for k := 0; k < len(sin_lat); k++ { x := sin_lat[k] * l points[k][j] = Point{x, y} } } return &points } func transform(points *Sphere) { tilt_sphere(points, 17.0) scale_and_translate(points) } func tilt_sphere(points *Sphere, ang float64) { st := math.Sin(ang * math.Pi / 180.0) ct := math.Cos(ang * math.Pi / 180.0) for i := 0; i < len(points); i++ { for j := 0; j < len(points); j++ { pt := points[i][j] x := pt[0]*ct - pt[1]*st y := pt[0]*st + pt[1]*ct points[i][j] = Point{x, y} } } } func scale_and_translate(points *Sphere) { for i := 0; i < len(points); i++ { for j := 0; j < len(points); j++ { pt := points[i][j] x := pt[0]*scale + px y := pt[1]*scale + py points[i][j] = Point{x, y} } } } func draw_shadow(points *Sphere) { poly_x, poly_y := make([]int16, 16), make([]int16, 16) for i := 0; i <= 8; i++ { p := points[0][i] poly_x[i] = int16(p[0]) + 50 poly_y[i] = int16(p[1]) } for i := 0; i <= 8; i++ { p := points[9][8-i] poly_x[7+i] = int16(p[0]) + 50 poly_y[7+i] = int16(p[1]) } gfx.FilledPolygonRGBA(render, poly_x, poly_y, 102, 102, 102, 255) // dark gray } func draw_grid() { render.SetDrawColor(183, 45, 168, 255) // purple render.SetScale(1, 1) var is = make([]int, 13) for i := range is { y := int32(i * 36) render.DrawLine(50, y, 590, y) } is = make([]int, 16) for i := range is { x := int32(50 + i*36) render.DrawLine(x, 0, x, 432) } for i := range is { render.DrawLine(int32(50+i*36), 432, int32(float32(i)*42.66), 480) } ys := []int{442, 454, 468} is = make([]int, 3) for i := range is { y := ys[i] x1 := 50.0 - 50.0*(float32(y)-432.0)/(480.0-432.0) render.DrawLine(int32(x1), int32(y), int32(640-x1), int32(y)) } } func fill_tiles(points *Sphere, alter bool) { poly_n := uint8(4) for j := 0; j < 8; j++ { for i := 0; i <= 8; i++ { p1 := points[i][j] p2 := points[i+1][j] p3 := points[i+1][j+1] p4 := points[i][j+1] poly_x, poly_y := make([]int16, poly_n), make([]int16, poly_n) poly_x[0] = int16(p1[0]) poly_y[0] = int16(p1[1]) poly_x[1] = int16(p2[0]) poly_y[1] = int16(p2[1]) poly_x[2] = int16(p3[0]) poly_y[2] = int16(p3[1]) poly_x[3] = int16(p4[0]) poly_y[3] = int16(p4[1]) r, g, b := uint8(255), uint8(255), uint8(255) if alter { r, g, b = uint8(255), uint8(0), uint8(0) } gfx.FilledPolygonRGBA(render, poly_x, poly_y, r, g, b, 255) alter = !alter } } }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package service_test import ( "bytes" "context" "reflect" "testing" "time" "github.com/google/gapid/core/app/auth" "github.com/google/gapid/core/assert" "github.com/google/gapid/core/event/task" "github.com/google/gapid/core/log" "github.com/google/gapid/core/net/grpcutil" "github.com/google/gapid/core/os/device/bind" "github.com/google/gapid/core/os/device/host" "github.com/google/gapid/gapis/api" "github.com/google/gapid/gapis/capture" gapis "github.com/google/gapid/gapis/client" "github.com/google/gapid/gapis/database" "github.com/google/gapid/gapis/replay" "github.com/google/gapid/gapis/server" "github.com/google/gapid/gapis/service" "github.com/google/gapid/gapis/service/path" "github.com/google/gapid/gapis/stringtable" "github.com/google/gapid/test/integration/gles/snippets" "google.golang.org/grpc" ) func startServerAndGetGrpcClient(ctx context.Context, config server.Config) (service.Service, error, func()) { l := grpcutil.NewPipeListener("pipe:servicetest") schan := make(chan *grpc.Server, 1) go server.NewWithListener(ctx, l, config, schan) svr := <-schan conn, err := grpcutil.Dial(ctx, "pipe:servicetest", grpc.WithInsecure(), grpc.WithUnaryInterceptor(auth.ClientInterceptor(config.AuthToken)), grpc.WithDialer(grpcutil.GetDialer(ctx)), ) if err != nil { return nil, log.Err(ctx, err, "Dialing GAPIS"), nil } client := gapis.Bind(conn) return client, nil, func() { client.Close() svr.GracefulStop() } } func setup(t *testing.T) (context.Context, server.Server, func()) { ctx := log.Testing(t) r := bind.NewRegistry() ctx = bind.PutRegistry(ctx, r) m := replay.New(ctx) ctx = replay.PutManager(ctx, m) ctx = database.Put(ctx, database.NewInMemory(ctx)) r.AddDevice(ctx, bind.Host(ctx)) client, err, shutdown := startServerAndGetGrpcClient(ctx, cfg) assert.For(ctx, "err").ThatError(err).Succeeded() return ctx, client, shutdown } func text(text string) *stringtable.Node { return &stringtable.Node{Node: &stringtable.Node_Text{Text: &stringtable.Text{Text: text}}} } var ( stringtables = []*stringtable.StringTable{ &stringtable.StringTable{ Info: &stringtable.Info{ CultureCode: "animals", }, Entries: map[string]*stringtable.Node{ "fish": text("glub"), "dog": text("barks"), "cat": text("meows"), "fox": text("?"), }, }, } cfg = server.Config{ Info: &service.ServerInfo{ Name: "testbot2000", VersionMajor: 123, VersionMinor: 456, Features: []string{"moo", "meow", "meh"}, }, StringTables: stringtables, } testCaptureData []byte drawCmdIndex uint64 swapCmdIndex uint64 ) func init() { check := func(err error) { if err != nil { panic(err) } } ctx := context.Background() deviceScanDone, onDeviceScanDone := task.NewSignal() onDeviceScanDone(ctx) cfg.DeviceScanDone = deviceScanDone ctx = database.Put(ctx, database.NewInMemory(ctx)) dev := host.Instance(ctx) b := snippets.NewBuilder(ctx, dev) b.CreateContext(128, 128, false, false) draw, swap := b.DrawTexturedSquare(ctx) buf := bytes.Buffer{} check(capture.Export(ctx, b.Capture(ctx, "test-capture"), &buf)) testCaptureData, drawCmdIndex, swapCmdIndex = buf.Bytes(), uint64(draw), uint64(swap) } func TestGetServerInfo(t *testing.T) { ctx, server, shutdown := setup(t) defer shutdown() got, err := server.GetServerInfo(ctx) assert.For(ctx, "err").ThatError(err).Succeeded() assert.For(ctx, "got").That(got).DeepEquals(cfg.Info) } func TestGetAvailableStringTables(t *testing.T) { ctx, server, shutdown := setup(t) defer shutdown() got, err := server.GetAvailableStringTables(ctx) assert.For(ctx, "err").ThatError(err).Succeeded() assert.For(ctx, "got").ThatSlice(got).DeepEquals([]*stringtable.Info{stringtables[0].Info}) } func TestGetStringTable(t *testing.T) { ctx, server, shutdown := setup(t) defer shutdown() got, err := server.GetStringTable(ctx, stringtables[0].Info) assert.For(ctx, "err").ThatError(err).Succeeded() assert.For(ctx, "got").That(got).DeepEquals(stringtables[0]) } func TestImportCapture(t *testing.T) { ctx, server, shutdown := setup(t) defer shutdown() got, err := server.ImportCapture(ctx, "test-capture", testCaptureData) assert.For(ctx, "err").ThatError(err).Succeeded() assert.For(ctx, "got").That(got).IsNotNil() } func TestGetDevices(t *testing.T) { ctx, server, shutdown := setup(t) defer shutdown() got, err := server.GetDevices(ctx) assert.For(ctx, "err").ThatError(err).Succeeded() assert.For(ctx, "got").ThatSlice(got).IsNotEmpty() } func TestGetDevicesForReplay(t *testing.T) { ctx, server, shutdown := setup(t) defer shutdown() capture, err := server.ImportCapture(ctx, "test-capture", testCaptureData) assert.For(ctx, "err").ThatError(err).Succeeded() assert.For(ctx, "capture").That(capture).IsNotNil() got, err := server.GetDevicesForReplay(ctx, capture) assert.For(ctx, "err").ThatError(err).Succeeded() assert.For(ctx, "got").ThatSlice(got).IsNotEmpty() } func TestGetFramebufferAttachment(t *testing.T) { ctx, server, shutdown := setup(t) defer shutdown() capture, err := server.ImportCapture(ctx, "test-capture", testCaptureData) assert.For(ctx, "err").ThatError(err).Succeeded() assert.For(ctx, "capture").That(capture).IsNotNil() devices, err := server.GetDevices(ctx) assert.For(ctx, "err").ThatError(err).Succeeded() assert.For(ctx, "devices").ThatSlice(devices).IsNotEmpty() after := capture.Command(swapCmdIndex) attachment := api.FramebufferAttachment_Color0 settings := &service.RenderSettings{} renderSettings := &service.ReplaySettings{Device: devices[0]} got, err := server.GetFramebufferAttachment(ctx, renderSettings, after, attachment, settings, nil) assert.For(ctx, "err").ThatError(err).Succeeded() assert.For(ctx, "got").That(got).IsNotNil() } func TestGet(t *testing.T) { ctx, server, shutdown := setup(t) defer shutdown() capture, err := server.ImportCapture(ctx, "test-capture", testCaptureData) assert.For(ctx, "err").ThatError(err).Succeeded() assert.For(ctx, "capture").That(capture).IsNotNil() T, any := reflect.TypeOf, reflect.TypeOf(struct{}{}) for _, test := range []struct { path path.Node ty reflect.Type }{ {capture, T((*service.Capture)(nil))}, {capture.Contexts(), T((*service.Contexts)(nil))}, {capture.Commands(), T((*service.Commands)(nil))}, {capture.Command(swapCmdIndex), T((*api.Command)(nil))}, // TODO: box.go doesn't currently support serializing structs this big. // See bug https://github.com/google/gapid/issues/1761 // panic: reflect.nameFrom: name too long // {capture.Command(swapCmdIndex).StateAfter(), any}, {capture.Command(swapCmdIndex).MemoryAfter(0, 0x1000, 0x1000), T((*service.Memory)(nil))}, {capture.Command(drawCmdIndex).Mesh(nil), T((*api.Mesh)(nil))}, {capture.CommandTree(nil), T((*service.CommandTree)(nil))}, {capture.Report(nil, nil, false), T((*service.Report)(nil))}, {capture.Resources(), T((*service.Resources)(nil))}, } { ctx = log.V{"path": test.path}.Bind(ctx) got, err := server.Get(ctx, test.path.Path(), nil) assert.For(ctx, "err").ThatError(err).Succeeded() if test.ty.Kind() == reflect.Interface { assert.For(ctx, "got").That(got).Implements(test.ty) } else if test.ty != any { assert.For(ctx, "ty").That(reflect.TypeOf(got)).Equals(test.ty) } } } func TestSet(t *testing.T) { // TODO } func TestFollow(t *testing.T) { // TODO } func TestProfile(t *testing.T) { ctx, server, shutdown := setup(t) defer shutdown() pprof := &bytes.Buffer{} trace := &bytes.Buffer{} stop, err := server.Profile(ctx, pprof, trace, 1) if assert.For(ctx, "Profile").ThatError(err).Succeeded() { time.Sleep(time.Second) err := stop() if assert.For(ctx, "stop").ThatError(err).Succeeded() { assert.For(ctx, "pprof").That(pprof.Len()).NotEquals(0) assert.For(ctx, "trace").That(trace.Len()).NotEquals(0) } } } func TestGetPerformanceCounters(t *testing.T) { ctx, server, shutdown := setup(t) defer shutdown() data, err := server.GetPerformanceCounters(ctx) assert.For(ctx, "err").ThatError(err).Succeeded() assert.For(ctx, "data").That(data).IsNotNil() }
package logger import "go.uber.org/zap" // TODO create general interface with generic fields func Debug(msg ...interface{}) { logger, _ := zap.NewProduction() defer logger.Sync() // flushes buffer, if any sugar := logger.Sugar() sugar.Debug(msg...) } func Debugf(format string, args ...interface{}) { logger, _ := zap.NewProduction() defer logger.Sync() // flushes buffer, if any sugar := logger.Sugar() sugar.Debugf(format, args...) } func Info(msg ...interface{}) { logger, _ := zap.NewProduction() defer logger.Sync() // flushes buffer, if any sugar := logger.Sugar() sugar.Info(msg...) } func Infof(format string, args ...interface{}) { logger, _ := zap.NewProduction() defer logger.Sync() // flushes buffer, if any sugar := logger.Sugar() sugar.Infof(format, args...) } func Warn(msg ...interface{}) { logger, _ := zap.NewProduction() defer logger.Sync() // flushes buffer, if any sugar := logger.Sugar() sugar.Warn(msg...) } func Warnf(format string, args ...interface{}) { logger, _ := zap.NewProduction() defer logger.Sync() // flushes buffer, if any sugar := logger.Sugar() sugar.Warnf(format, args...) } func Error(msg ...interface{}) { logger, _ := zap.NewProduction() defer logger.Sync() // flushes buffer, if any sugar := logger.Sugar() sugar.Error(msg...) } func Errorf(format string, args ...interface{}) { logger, _ := zap.NewProduction() defer logger.Sync() // flushes buffer, if any sugar := logger.Sugar() sugar.Errorf(format, args...) }
//go:build integration // +build integration package k8s import ( "testing" "time" "github.com/libopenstorage/secrets" "github.com/libopenstorage/secrets/test" "github.com/pborman/uuid" "github.com/portworx/sched-ops/k8s/core" "github.com/stretchr/testify/assert" kubernetes "k8s.io/client-go/kubernetes/fake" ) const ( secretName = "openstorage-secret-test" secretId = "mysql-username" ) func TestAll(t *testing.T) { fakeKubeClient := kubernetes.NewSimpleClientset() core.SetInstance(core.New(fakeKubeClient)) ks, err := NewK8sSecretTest(nil) if err != nil { t.Fatalf("Unable to create a Kubernetes Secret instance: %v", err) return } // The secret needs to be created before running the tests data := make(map[string][]byte) data[secretId] = []byte("passphrase") _, err = core.Instance().UpdateSecretData(secretName, "default", data) if err != nil { t.Fatalf("Failed to get secret for test: %v", err) return } test.Run(ks, t) } type k8sSecretTest struct { s secrets.Secrets passphrase string secretVersion secrets.Version } func NewK8sSecretTest(secretConfig map[string]interface{}) (test.SecretTest, error) { s, err := New(secretConfig) if err != nil { return nil, err } return &k8sSecretTest{s, "", secrets.NoVersion}, nil } func (k *k8sSecretTest) TestPutSecret(t *testing.T) error { secretData := make(map[string]interface{}) k.passphrase = uuid.New() secretData[secretId] = k.passphrase // PutSecret with non-nil secretData and no namespace should fail _, err := k.s.PutSecret(secretName, secretData, nil) assert.Error(t, err, "Expected an error on PutSecret") keyContext := make(map[string]string) keyContext[SecretNamespace] = "default" // PutSecret with already existing secretId secretVersion, err := k.s.PutSecret(secretName, secretData, keyContext) assert.NoError(t, err, "Unexpected error on PutSecret") k.secretVersion = secretVersion return nil } func (k *k8sSecretTest) TestGetSecret(t *testing.T) error { secretData, secretVersion, err := k.s.GetSecret(secretName, nil) assert.Error(t, err, "Expected an error when no namespace is provided") assert.Nil(t, secretData, "Expected empty secret data") assert.Equal(t, secrets.NoVersion, secretVersion, "Unexpected secret version") keyContext := make(map[string]string) keyContext[SecretNamespace] = "default" plainText1, secretVersion, err := k.s.GetSecret(secretName, keyContext) assert.NoError(t, err, "Expected no error on GetSecret") // We have got secretData assert.NotNil(t, plainText1, "Invalid plainText was returned") v, ok := plainText1[secretId] assert.True(t, ok, "Unexpected plainText") str, ok := v.(string) assert.True(t, ok, "Unexpected plainText") assert.Equal(t, k.passphrase, str, "Unexpected passphrase") assert.Equal(t, k.secretVersion, secretVersion, "Unexpected secret version") return nil } func (k *k8sSecretTest) TestDeleteSecret(t *testing.T) error { err := k.s.DeleteSecret(secretName, nil) assert.Error(t, err, "Expected an error when no namespace is provided") keyContext := make(map[string]string) keyContext[SecretNamespace] = "default" err = k.s.DeleteSecret(secretName, keyContext) assert.NoError(t, err, "Unexpected an error on Delete") // Get of a deleted secret should fail. Sleeping for the delete to finish time.Sleep(2 * time.Second) _, _, err = k.s.GetSecret(secretName, keyContext) assert.Error(t, err, "Expected error on GetSecret") return nil } func (k *k8sSecretTest) TestListSecrets(t *testing.T) error { ids, err := k.s.ListSecrets() assert.Error(t, secrets.ErrNotSupported, err.Error(), "ListSecrets is not supported for k8s secrets") assert.Nil(t, ids, "Ids is expected to be nil") return nil }
/* Copyright 2016 Tamás Gulácsi Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package kitloghlp contains some helpers for go-kit/kit/log. package kitloghlp import ( "fmt" "io" "github.com/go-kit/kit/log" ) // LogFunc is the Log function. type LogFunc func(...interface{}) error // New returns a log.Context, using Logfmt logger on w. func New(w io.Writer) *log.Context { return NewContext(log.NewLogfmtLogger(w)) } // NewContext wraps the given logger with Stringify, and adds a default ts timestamp. func NewContext(logger log.Logger) *log.Context { return log.NewContext(Stringify{logger}).With("ts", log.DefaultTimestamp) } // With appends the given plus keyvals to the LogFunc. func With(oLog func(keyvals ...interface{}) error, plus ...interface{}) LogFunc { return LogFunc(func(keyvals ...interface{}) error { return oLog(append(keyvals, plus...)...) }) } // NewTestLogger returns a Context wrapping a testing.TB.Log. func NewTestLogger(t testLogger) *log.Context { return log.NewContext( Stringify{log.NewLogfmtLogger(testLog{t})}, ).With( "file", log.Caller(4), ) } type testLogger interface { Log(args ...interface{}) } type testLog struct { testLogger } func (t testLog) Write(p []byte) (int, error) { t.Log(string(p)) return len(p), nil } // Stringify stringifies every value to make it printable by logfmt. // // Example: // Logger := log.LogfmtLogger(os.Stderr) // Logger = log.Stringify{Logger} type Stringify struct { log.Logger } // Log with stringifying every value. func (l Stringify) Log(keyvals ...interface{}) error { for i := 1; i < len(keyvals); i += 2 { switch keyvals[i].(type) { case string, fmt.Stringer, fmt.Formatter: case error: default: keyvals[i] = StringWrap{Value: keyvals[i]} } } return l.Logger.Log(keyvals...) } var _ = fmt.Stringer(StringWrap{}) // StringWrap wraps the Value as a fmt.Stringer. type StringWrap struct { Value interface{} } // String returns a string representation (%v) of the underlying Value. func (sw StringWrap) String() string { return fmt.Sprintf("%v", sw.Value) }
package app import "time" // Ad falksdjfs type Ad struct { ID int `json:"id"` CreateAt time.Time `json:"created_at"` Subject string `json:"subject"` Body string `json:"body"` Price *float64 `json:"price,omitempty"` Email string `json:"email"` }
package shared import ( "go.uber.org/zap" ) // loggingInitialized is a flag to keep track of whether logging has been set up var loggingInitialized = false // Logger is the global logger to use var Logger zap.SugaredLogger // InitLogging initializes the logger func InitLogging() { if loggingInitialized == true { return } logger, _ := zap.NewProduction() defer logger.Sync() sugar := logger.Sugar() Logger = *sugar loggingInitialized = true }
package runtime import ( "net/http" "reflect" "regexp" "testing" ) func TestNewRoute(t *testing.T) { noopHandlerFn := http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {}) type args struct { pattern string handler http.Handler } tests := []struct { name string args args want *Route wantErr bool }{ {"empty pattern", args{"", nil}, nil, true}, {"missing root slash", args{"path", nil}, nil, true}, {"missing handler", args{"/", nil}, nil, true}, {"path: /", args{"/", noopHandlerFn}, &Route{pattern: "/", matcher: regexp.MustCompile("^/$"), handler: noopHandlerFn}, false}, {"path: /sub/", args{"/sub/", noopHandlerFn}, &Route{pattern: "/sub/", matcher: regexp.MustCompile("^/sub/$"), handler: noopHandlerFn}, false}, {"path: /**", args{"/**", noopHandlerFn}, &Route{pattern: "/**", matcher: regexp.MustCompile("^($|/(.*))"), handler: noopHandlerFn}, false}, {"path: /sub", args{"/sub/**", noopHandlerFn}, &Route{pattern: "/sub/**", matcher: regexp.MustCompile("^/sub($|/(.*))"), handler: noopHandlerFn}, false}, {"path: /sub/**", args{"/sub/**", noopHandlerFn}, &Route{pattern: "/sub/**", matcher: regexp.MustCompile("^/sub($|/(.*))"), handler: noopHandlerFn}, false}, {"path: /sub/**/foo/", args{"/sub/**/foo/", noopHandlerFn}, nil, true}, {"path: /sub/**/foo/**", args{"/sub/**/foo/**", noopHandlerFn}, nil, true}, } for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := NewRoute(tt.args.pattern, tt.args.handler) if (err != nil) != tt.wantErr { t.Errorf("%d: NewRoute() error = %v, wantErr %v", i, err, tt.wantErr) return } else if tt.wantErr && err != nil { if !reflect.DeepEqual(got, tt.want) { t.Errorf("%d: NewRoute() got = %v, want %v", i, got, tt.want) } return } if reflect.ValueOf(got.handler).Pointer() != reflect.ValueOf(tt.want.handler).Pointer() { t.Errorf("%d: NewRoute() got = %v, want %v", i, got.handler, tt.want.handler) } if got.matcher.String() != tt.want.matcher.String() { t.Errorf("%d: NewRoute() got = %v, want %v", i, got.matcher.String(), tt.want.matcher.String()) } }) } }
// Copyright (c) 2016-2019 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package blobserver import ( "net/http" "os" "strconv" "strings" "github.com/uber/kraken/core" "github.com/uber/kraken/lib/store" "github.com/uber/kraken/utils/handler" ) // parseContentRange parses start / end integers from a Content-Range header. func parseContentRange(h http.Header) (start, end int64, err error) { contentRange := h.Get("Content-Range") if len(contentRange) == 0 { return 0, 0, handler.Errorf("no Content-Range header").Status(http.StatusBadRequest) } parts := strings.Split(contentRange, "-") if len(parts) != 2 { return 0, 0, handler.Errorf( "cannot parse Content-Range header %q: expected format \"start-end\"", contentRange). Status(http.StatusBadRequest) } start, err = strconv.ParseInt(parts[0], 10, 64) if err != nil { return 0, 0, handler.Errorf( "cannot parse start of range in Content-Range header %q: %s", contentRange, err). Status(http.StatusBadRequest) } end, err = strconv.ParseInt(parts[1], 10, 64) if err != nil { return 0, 0, handler.Errorf( "cannot parse end of range in Content-Range header %q: %s", contentRange, err). Status(http.StatusBadRequest) } // Note, no need to check for negative because the "-" would cause the // Split check to fail. return start, end, nil } // blobExists returns true if cas has a cached blob for d. func blobExists(cas *store.CAStore, d core.Digest) (bool, error) { if _, err := cas.GetCacheFileStat(d.Hex()); err != nil { if os.IsNotExist(err) { return false, nil } return false, handler.Errorf("cache file stat: %s", err) } return true, nil } func setUploadLocation(w http.ResponseWriter, uid string) { w.Header().Set("Location", uid) } func setContentLength(w http.ResponseWriter, n int) { w.Header().Set("Content-Length", strconv.Itoa(n)) } func setOctetStreamContentType(w http.ResponseWriter) { w.Header().Set("Content-Type", "application/octet-stream-v1") }
package main // // Notes: // // The life of the router is to pull its dataMsg channel, and feed it it // to one of a number of worker queues. // // The life of a worker is to pull messages from it's queue, and for each, // build metrics. // // A worker handles produceMetrics callbacks as follows: // On build metric; // we simply accumulate metricsAtoms // On flushMetric, we add take the tags, and the accumulated metrics, and // build a point. // On return from produceMetrics we write the batch (and dump to file if // logging diagnostics). // // Protagonists: // metricsInfluxOutputHandler // dataChanRouter (one router feeding multiple workers) // metricsInfluxOutputWorker (multiple) // import ( "bufio" "fmt" "github.com/influxdata/influxdb/client/v2" log "github.com/sirupsen/logrus" "os" "runtime" "time" ) const ( // Timeout waiting to enqueue message. If queues aren't drained in // this time (coupled with buf channel to absorb transients), then // we're toast. METRICS_INFLUX_TIMEOUT_ENQUEUE_SECONDS = 2 METRICS_INFLUX_WAIT_TO_RECONNECT_SECONDS = 2 // // An estimate of fields per point allows us to setup // slices with a capacity which minimise reallocation, // without over allocating to omuch METRICS_INFLUX_FIELDS_PER_POINT_ESTIMATE = 32 ) type metricsInfluxOutputHandler struct { influxServer string database string consistency string retention string standalone bool auth userPasswordCollector // // Workers are fed through a channel (of dataChannelDepth) // by the dataMsgRouter. lastWorker is used to track the // last worker used. workers int router *dataMsgRouter lastWorker int dataChannelDepth int // // metricsfilename allow for diagnostic dump of metrics as // exported to InfluxDB metricsfilename string // // Logging context, built once, and reused. logctx *log.Entry } // // metricsInfluxOutputWorker handles (sub)set of events and translates // them into measurement POSTs to Influxdb, working completely // autonomously from any other workers if present. type metricsInfluxOutputWorker struct { influxServer string wkid int influxOutputHandler *metricsInfluxOutputHandler dataChan chan dataMsg metricsfilename string logctx *log.Entry } type metricsInfluxOutputContext struct { name string fields []*metricsAtom bp client.BatchPoints } func (w *metricsInfluxOutputWorker) worker(m *metricsOutputModule) { var metricsfile *os.File var error_tag string var dump *bufio.Writer var influxClient client.Client var metadata *dataMsgMetaData var err error var bp client.BatchPoints w.logctx.Debug("Run worker") defer m.shutdownSyncPoint.Done() if w.metricsfilename != "" { metricsfile, dump = metricsSetupDumpfile( w.metricsfilename, w.logctx) if metricsfile != nil { defer metricsfile.Close() } } outputHandler := w.influxOutputHandler // Add the client batch configuration once, and reuse it // for every batch we add. batchCfg := client.BatchPointsConfig{ Database: outputHandler.database, Precision: "ms", RetentionPolicy: outputHandler.retention, WriteConsistency: outputHandler.consistency, } for { // Any failure, other than explicitly closed channels // indicating we're shutting down, causes us to go back to go, // collect £200 and try again. // // Add tls config here. if !outputHandler.standalone { var user, passw string user, passw, err = outputHandler.auth.getUP() if err == nil { influxClient, err = client.NewHTTPClient(client.HTTPConfig{ Addr: w.influxServer, Username: user, Password: passw, }) } } if err != nil { // Wait, and come round again w.logctx.WithError(err).Error("connect to influx node (will retry)") time.Sleep( time.Duration(METRICS_INFLUX_WAIT_TO_RECONNECT_SECONDS) * time.Second) continue } if outputHandler.standalone { w.logctx.Debug("Running in standalone mode (dumping points to file)") } else { w.logctx.Debug("Connected to influx node") } // Ok, we connected. Lets get into the main loop. for { msg, ok := <-w.dataChan if !ok { w.logctx.Debug("Closed worker") if !outputHandler.standalone { influxClient.Close() } return } bp, err = client.NewBatchPoints(batchCfg) if err != nil { // Break out of the loop and start worker over. We // failed to get a new batch. error_tag = "failed to create batch point" break } metadata = msg.getMetaData() context := &metricsInfluxOutputContext{ name: metadata.Path, fields: nil, bp: bp, } err = msg.produceMetrics(&m.inputSpec, m.outputHandler, context) if err != nil { metricsStatMonitor.basePathMetricsError.WithLabelValues( m.name, metadata.Identifier, metadata.Path, "produce failed").Inc() continue } // If no metrics produced - perfectly valid, continue pts := bp.Points() if len(pts) == 0 { continue } // Dump metrics if doing diagnostics. Costly, of course. if dump != nil { dump.WriteString(fmt.Sprintf( "Server: [%s], wkid %d, writing %d points in db: %s\n"+ "(prec: [%s], consistency: [%s], retention: [%s])\n", w.influxServer, w.wkid, len(pts), bp.Database(), bp.Precision(), bp.WriteConsistency(), bp.RetentionPolicy())) for _, pt := range pts { // // Start with a simple dump. Might need to extend a little. dump.WriteString(fmt.Sprintf("\t%s\n", pt.String())) } } if !outputHandler.standalone { err = influxClient.Write(context.bp) } if err != nil { error_tag = "failed to write batch point" break } } // // We would be here on error. metricsStatMonitor.basePathMetricsError.WithLabelValues( m.name, metadata.Identifier, metadata.Path, error_tag).Inc() // // Close existing client. if !outputHandler.standalone { influxClient.Close() } // // It might be too noisy to log error here. We may need to // consider dampening and relying on exported metric w.logctx.WithError(err).WithFields(log.Fields{ "error_tag": error_tag}).Error( "exit loop handling messages, will reconnect") time.Sleep( time.Duration(METRICS_INFLUX_WAIT_TO_RECONNECT_SECONDS) * time.Second) } } func (o *metricsInfluxOutputHandler) setupWorkers(m *metricsOutputModule) { // // Setup as many workers with their own context as necessary. We // also route to the various workers using a dedicated // router. This will be generalised. // // Assuming we are picking up off the pub/sub bus, we could be // splitting the load by kafka partition already, and this // might be one instance of a group of pipelines operating as // a consumer group. // var dumpfilename string o.logctx.Info("Setting up workers") o.router = &dataMsgRouter{ dataChanIn: m.dataChan, shutdownChan: m.shutdownChan, dataChansOut: make([]chan dataMsg, o.workers), logctx: o.logctx, route: func(msg dataMsg, attempts int) int { // // We start with simple round robin algorithm. o.lastWorker++ o.lastWorker %= o.workers return o.lastWorker }, handleCongested: func( msg dataMsg, attempts int, worker int) dataMsgRouterCongestionAction { metadata := msg.getMetaData() // Reroute to another worker. if attempts < o.workers { metricsStatMonitor.basePathMetricsError.WithLabelValues( m.name, metadata.Identifier, metadata.Path, "congested worker (rerouted)").Inc() return DATAMSG_ROUTER_REROUTE } // // Give up and drop. metricsStatMonitor.basePathMetricsError.WithLabelValues( m.name, metadata.Identifier, metadata.Path, "congested worker (dropped)").Inc() return DATAMSG_ROUTER_DROP }, // We do not really use the timeout. Behaviour is currently to // hunt for worker whcih can take message or drop. timeout: time.Duration(METRICS_INFLUX_TIMEOUT_ENQUEUE_SECONDS) * time.Second, } // // Inherit channel depth for workers too. o.dataChannelDepth = m.dataChannelDepth for i := 0; i < o.workers; i++ { o.router.dataChansOut[i] = make(chan dataMsg, o.dataChannelDepth) m.shutdownSyncPoint.Add(1) if o.metricsfilename != "" { dumpfilename = fmt.Sprintf( "%s_wkid%d", o.metricsfilename, i) } else { dumpfilename = "" } w := &metricsInfluxOutputWorker{ influxServer: o.influxServer, wkid: i, influxOutputHandler: o, dataChan: o.router.dataChansOut[i], metricsfilename: dumpfilename, logctx: o.logctx.WithFields(log.Fields{"wkid": i}), } go w.worker(m) } // // Kick off router to start collecting and routing messages to // workers. go o.router.run() } // // Simply colllect the metric atoms at this stage for Influx. We use // the flush to assemble a new point, and clear the list. func (o *metricsInfluxOutputHandler) buildMetric( tags []metricsAtom, sensor metricsAtom, ts uint64, context metricsOutputContext) { c := context.(*metricsInfluxOutputContext) if c.fields == nil { c.fields = make([]*metricsAtom, 0, METRICS_INFLUX_FIELDS_PER_POINT_ESTIMATE) } // // Rewrite accursed uint64: // // fmt.Printf(" uint64 [%v] -> float64 [%v]\n", // uint64(math.MaxUint64), // float64(math.MaxUint64)) // // uint64 [18446744073709551615] -> float64 [1.8446744073709552e+19] // switch sensor.val.(type) { case uint64: sensor.val = float64(sensor.val.(uint64)) default: // nothing to do! } c.fields = append(c.fields, &sensor) } func (o *metricsInfluxOutputHandler) flushMetric( tag_atoms []metricsAtom, ts uint64, context metricsOutputContext) { c := context.(*metricsInfluxOutputContext) if c.fields != nil { fields := map[string]interface{}{} for _, field_atom := range c.fields { fields[field_atom.key] = field_atom.val } tags := map[string]string{} for _, tag_atom := range tag_atoms { tags[tag_atom.key] = fmt.Sprintf("%v", tag_atom.val) } pt, err := client.NewPoint(c.name, tags, fields, time.Unix(0, int64(ts)*1000*1000)) if err == nil { c.bp.AddPoint(pt) } else { // // Unexpected failure. o.logctx.WithFields( log.Fields{"base_path": c.name}).WithError(err).Error( "adding point to batch") } // // Finish by clearing the c.fields c.fields = nil } } func (o *metricsInfluxOutputHandler) adaptSensorName(name string) string { return name } func (o *metricsInfluxOutputHandler) adaptTagName(name string) string { return name } // // Process the configuration to extract whatever is needed. func metricsInfluxNew(name string, nc nodeConfig) (metricsOutputHandler, error) { var err error var authCollect userPasswordCollector var standalone bool logctx := logger.WithFields(log.Fields{ "name": name, "xport_type": "influx", }) // If not set, will default to false metricsfilename, _ := nc.config.GetString(name, "dump") influxServer, _ := nc.config.GetString(name, "influx") if influxServer == "" { if metricsfilename == "" { err = fmt.Errorf( "attribute 'influx' required for influx metric export. " + "Specify URL of the form " + "http://[ipv6-host%%zone]:port or " + "http://influx.example.com:port. " + "Alternatively specify 'dump' option to just log points.") logctx.WithError(err).Error("insufficient configuration") return nil, err } else { standalone = true } } else { logctx = logctx.WithFields(log.Fields{"influx": influxServer}) } // // TODO: Add TLS support by pulling in TLS config at this point. database, err := nc.config.GetString(name, "database") if err != nil { logctx.WithError(err).Error( "attribute 'database' required for influx metric export. " + " For a database created with 'CREATE DATABASE <name>', " + "this setting would be 'database=<name>'") return nil, err } logctx = logctx.WithFields(log.Fields{"database": database}) // Handle user/password if !standalone { authCollect = influxUPCollectorFactory() err = authCollect.handleConfig(nc, name, influxServer) if err != nil { logctx.WithError(err).Error( "failed to collect credentials required for influx") return nil, err } } // // One could imagine templatising these in exactly the same way as // we templatise deriving topic in kafka. Future. consistency, _ := nc.config.GetString(name, "consistency") retention, _ := nc.config.GetString(name, "retention") workers, _ := nc.config.GetInt(name, "workers") if workers == 0 { workers = 1 } else if workers > runtime.GOMAXPROCS(-1) { // // Excessive number of workers... cut back workers = runtime.GOMAXPROCS(-1) } logctx = logctx.WithFields(log.Fields{"workers": workers}) // // Populate the influx output context out := &metricsInfluxOutputHandler{ influxServer: influxServer, auth: authCollect, database: database, consistency: consistency, retention: retention, workers: workers, standalone: standalone, logctx: logctx, metricsfilename: metricsfilename, } return out, nil } var influxUPCollectorFactory userPasswordCollectorFactory // // We use init to setup the user/password collection function. This // can be overwritten by test. func init() { influxUPCollectorFactory = func() userPasswordCollector { return &cryptUserPasswordCollector{} } }
//go:build (!memtrace && !js && !plan9 && !windows && !armbe && !arm64be && !ppc && !ppc64 && !mips && !mips64 && !s390x) // +build !memtrace,!js,!plan9,!windows,!armbe,!arm64be,!ppc,!ppc64,!mips,!mips64,!s390x package exec import ( "fmt" "reflect" "runtime/debug" "syscall" "unsafe" ) var ErrLimitExceeded = fmt.Errorf("memory limit exceeded") // Memory is a WASM linear memory. type Memory struct { min, max uint32 start uintptr size uintptr } //go:linkname mmap runtime.mmap func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) // NewMemory creates a new linear memory with the given limits. func NewMemory(min, max uint32) Memory { debug.SetPanicOnFault(true) m := Memory{ min: min, max: max, } if max > 0 { // Reserve twice the maximum allocation (8Gb). This allows us to safely use 64-bit addresses // and unmapped pages for bounds checks. pages, err := mmap(nil, 1<<33, syscall.PROT_NONE, syscall.MAP_ANON|syscall.MAP_PRIVATE, 0, 0) if err != 0 { panic(syscall.Errno(uintptr(err))) } m.start = uintptr(pages) if err := m.grow(min); err != nil { panic(err) } } return m } func (m *Memory) grow(pages uint32) error { end := m.start + m.size size := uintptr(pages) * 65536 delta := size - m.size if delta == 0 { return nil } _, err := mmap(unsafe.Pointer(end), delta, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE|syscall.MAP_FIXED, 0, 0) if err != 0 { return syscall.Errno(uintptr(err)) } m.size = uintptr(pages) * 65536 return nil } // Limits returns the minimum and maximum size of the memory in pages. func (m *Memory) Limits() (min, max uint32) { return m.min, m.max } // Size returns the current size of the memory in pages. func (m *Memory) Size() uint32 { return uint32(m.size / 65536) } // Grow grows the memory by the given number of pages. It returns the old size of the memory in pages and an error if // growing the memory by the requested amount would exceed the memory's maximum size. func (m *Memory) Grow(pages uint32) (uint32, error) { currentSize := m.Size() newSize := currentSize + pages if newSize > m.max || newSize > 65536 { return currentSize, ErrLimitExceeded } return currentSize, m.grow(newSize) } // Bytes returns the memory's bytes. func (m *Memory) Bytes() []byte { var s []byte header := (*reflect.SliceHeader)(unsafe.Pointer(&s)) header.Data = m.start header.Len = int(m.size) header.Cap = int(m.size) return s } func (m *Memory) Start() uintptr { if m == nil { return 0 } return m.start } // Byte returns the byte stored at the given effective address. func (m *Memory) Byte(base, offset uint32) byte { p := (*byte)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) return *p } // Uint8 returns the byte stored at the given effective address. func (m *Memory) Uint8(base, offset uint32) byte { p := (*byte)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) return *p } // PutByte writes the given byte to the given effective address. func (m *Memory) PutByte(v byte, base, offset uint32) { p := (*byte)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) *p = v } // PutUint8 writes the given byte to the given effective address. func (m *Memory) PutUint8(v byte, base, offset uint32) { p := (*byte)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) *p = v } // Uint16 returns the uint16 stored at the given effective address. func (m *Memory) Uint16(base, offset uint32) uint16 { p := (*uint16)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) return *p } // PutUint16 writes the given uint16 to the given effective address. func (m *Memory) PutUint16(v uint16, base, offset uint32) { p := (*uint16)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) *p = v } // Uint32 returns the uint32 stored at the given effective address. func (m *Memory) Uint32(base, offset uint32) uint32 { p := (*uint32)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) return *p } // PutUint32 writes the given uint32 to the given effective address. func (m *Memory) PutUint32(v uint32, base, offset uint32) { p := (*uint32)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) *p = v } // Uint64 returns the uint64 stored at the given effective address. func (m *Memory) Uint64(base, offset uint32) uint64 { p := (*uint64)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) return *p } // PutUint64 writes the given uint64 to the given effective address. func (m *Memory) PutUint64(v uint64, base, offset uint32) { p := (*uint64)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) *p = v } // Float32 returns the float32 stored at the given effective address. func (m *Memory) Float32(base, offset uint32) float32 { p := (*float32)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) return *p } // PutFloat32 writes the given float32 to the given effective address. func (m *Memory) PutFloat32(v float32, base, offset uint32) { p := (*float32)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) *p = v } // Float64 returns the float64 stored at the given effective address. func (m *Memory) Float64(base, offset uint32) float64 { p := (*float64)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) return *p } // PutFloat64 writes the given float64 to the given effective address. func (m *Memory) PutFloat64(v float64, base, offset uint32) { p := (*float64)(unsafe.Pointer(m.start + uintptr(base) + uintptr(offset))) *p = v } // ByteAt returns the byte stored at the given offset. func (m *Memory) ByteAt(offset uint32) byte { p := (*byte)(unsafe.Pointer(m.start + uintptr(offset))) return *p } // PutByteAt writes the given byte to the given offset. func (m *Memory) PutByteAt(v byte, offset uint32) { p := (*byte)(unsafe.Pointer(m.start + uintptr(offset))) *p = v } // Uint8At returns the byte stored at the given offset. func (m *Memory) Uint8At(offset uint32) byte { p := (*byte)(unsafe.Pointer(m.start + uintptr(offset))) return *p } // PutUint8At writes the given byte to the given offset. func (m *Memory) PutUint8At(v byte, offset uint32) { p := (*byte)(unsafe.Pointer(m.start + uintptr(offset))) *p = v } // Uint16At returns the uint16 stored at the given offset. func (m *Memory) Uint16At(offset uint32) uint16 { p := (*uint16)(unsafe.Pointer(m.start + uintptr(offset))) return *p } // PutUint16At writes the given uint16 to the given offset. func (m *Memory) PutUint16At(v uint16, offset uint32) { p := (*uint16)(unsafe.Pointer(m.start + uintptr(offset))) *p = v } // Uint32At returns the uint32 stored at the given offset. func (m *Memory) Uint32At(offset uint32) uint32 { p := (*uint32)(unsafe.Pointer(m.start + uintptr(offset))) return *p } // PutUint32At writes the given uint32 to the given offset. func (m *Memory) PutUint32At(v uint32, offset uint32) { p := (*uint32)(unsafe.Pointer(m.start + uintptr(offset))) *p = v } // Uint64 returns the uint64 stored at the given offset. func (m *Memory) Uint64At(offset uint32) uint64 { p := (*uint64)(unsafe.Pointer(m.start + uintptr(offset))) return *p } // PutUint64 writes the given uint64 to the given offset. func (m *Memory) PutUint64At(v uint64, offset uint32) { p := (*uint64)(unsafe.Pointer(m.start + uintptr(offset))) *p = v } // Float32At returns the float32 stored at the given offset. func (m *Memory) Float32At(offset uint32) float32 { p := (*float32)(unsafe.Pointer(m.start + uintptr(offset))) return *p } // PutFloat32At writes the given float32 to the given offset. func (m *Memory) PutFloat32At(v float32, offset uint32) { p := (*float32)(unsafe.Pointer(m.start + uintptr(offset))) *p = v } // Float64At returns the float64 stored at the given offset. func (m *Memory) Float64At(offset uint32) float64 { p := (*float64)(unsafe.Pointer(m.start + uintptr(offset))) return *p } // PutFloat64At writes the given float64 to the given offset. func (m *Memory) PutFloat64At(v float64, offset uint32) { p := (*float64)(unsafe.Pointer(m.start + uintptr(offset))) *p = v }
package requests import ( "fmt" "net/url" "strings" "github.com/google/go-querystring/query" "github.com/atomicjolt/canvasapi" ) // FetchingLatestQuizStatistics This endpoint provides statistics for all quiz versions, or for a specific // quiz version, in which case the output is guaranteed to represent the // _latest_ and most current version of the quiz. // // <b>200 OK</b> response code is returned if the request was successful. // https://canvas.instructure.com/doc/api/quiz_statistics.html // // Path Parameters: // # Path.CourseID (Required) ID // # Path.QuizID (Required) ID // // Query Parameters: // # Query.AllVersions (Optional) Whether the statistics report should include all submissions attempts. // type FetchingLatestQuizStatistics struct { Path struct { CourseID string `json:"course_id" url:"course_id,omitempty"` // (Required) QuizID string `json:"quiz_id" url:"quiz_id,omitempty"` // (Required) } `json:"path"` Query struct { AllVersions bool `json:"all_versions" url:"all_versions,omitempty"` // (Optional) } `json:"query"` } func (t *FetchingLatestQuizStatistics) GetMethod() string { return "GET" } func (t *FetchingLatestQuizStatistics) GetURLPath() string { path := "courses/{course_id}/quizzes/{quiz_id}/statistics" path = strings.ReplaceAll(path, "{course_id}", fmt.Sprintf("%v", t.Path.CourseID)) path = strings.ReplaceAll(path, "{quiz_id}", fmt.Sprintf("%v", t.Path.QuizID)) return path } func (t *FetchingLatestQuizStatistics) GetQuery() (string, error) { v, err := query.Values(t.Query) if err != nil { return "", err } return v.Encode(), nil } func (t *FetchingLatestQuizStatistics) GetBody() (url.Values, error) { return nil, nil } func (t *FetchingLatestQuizStatistics) GetJSON() ([]byte, error) { return nil, nil } func (t *FetchingLatestQuizStatistics) HasErrors() error { errs := []string{} if t.Path.CourseID == "" { errs = append(errs, "'Path.CourseID' is required") } if t.Path.QuizID == "" { errs = append(errs, "'Path.QuizID' is required") } if len(errs) > 0 { return fmt.Errorf(strings.Join(errs, ", ")) } return nil } func (t *FetchingLatestQuizStatistics) Do(c *canvasapi.Canvas) error { _, err := c.SendRequest(t) if err != nil { return err } return nil }
package blockparser import ( "math/big" "time" ) // Parse requires and parses specified block by ID func (p *Parser) Parse(block *big.Int) error { // metrics t := time.Now() // require block blockBytes, err := p.queryBlockData(block) if err != nil { return err } defer blockBytes.Close() // metrics if p.metrics != nil { p.metrics.RequestDuration.Observe(time.Since(t).Seconds()) } t = time.Now() if err := p.parseBlockData(blockBytes); err != nil { return err } // metrics if p.metrics != nil { p.metrics.ParsingDuration.Observe(time.Since(t).Seconds()) } // send as parsed p.blockID <- new(big.Int).Set(block) return nil }
// Copyright 2022 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package inputs import ( "context" "strings" "time" "chromiumos/tast/ctxutil" "chromiumos/tast/errors" "chromiumos/tast/local/bundles/cros/inputs/fixture" "chromiumos/tast/local/bundles/cros/inputs/pre" "chromiumos/tast/local/bundles/cros/inputs/testserver" "chromiumos/tast/local/bundles/cros/inputs/util" "chromiumos/tast/local/chrome/ime" "chromiumos/tast/local/chrome/uiauto" "chromiumos/tast/local/chrome/uiauto/faillog" "chromiumos/tast/local/chrome/uiauto/nodewith" "chromiumos/tast/local/chrome/uiauto/role" "chromiumos/tast/local/chrome/uiauto/vkb" "chromiumos/tast/local/chrome/useractions" "chromiumos/tast/testing" "chromiumos/tast/testing/hwdep" ) func init() { testing.AddTest(&testing.Test{ Func: VirtualKeyboardTextEditing, LacrosStatus: testing.LacrosVariantExists, Desc: "Checks that the virtual keyboard can insert and delete text after clicking between different text fields", Contacts: []string{"essential-inputs-gardener-oncall@google.com", "essential-inputs-team@google.com"}, Attr: []string{"group:mainline", "group:input-tools", "informational"}, SoftwareDeps: []string{"chrome", "google_virtual_keyboard"}, SearchFlags: util.IMESearchFlags([]ime.InputMethod{ime.EnglishUS}), Timeout: 5 * time.Minute, Params: []testing.Param{ { Fixture: fixture.TabletVK, ExtraHardwareDeps: hwdep.D(pre.InputsStableModels), }, { Name: "informational", Fixture: fixture.TabletVK, ExtraHardwareDeps: hwdep.D(pre.InputsUnstableModels), }, { Name: "lacros", Fixture: fixture.LacrosTabletVK, ExtraHardwareDeps: hwdep.D(pre.InputsStableModels), ExtraSoftwareDeps: []string{"lacros_stable"}, }, }, }) } func VirtualKeyboardTextEditing(ctx context.Context, s *testing.State) { cr := s.FixtValue().(fixture.FixtData).Chrome tconn := s.FixtValue().(fixture.FixtData).TestAPIConn uc := s.FixtValue().(fixture.FixtData).UserContext cleanupCtx := ctx ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second) defer cancel() defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "ui_tree") its, err := testserver.LaunchBrowser(ctx, s.FixtValue().(fixture.FixtData).BrowserType, cr, tconn) if err != nil { s.Fatal("Failed to launch inputs test server: ", err) } defer its.CloseAll(cleanupCtx) ui := uiauto.New(tconn) vkbCtx := vkb.NewContext(cr, tconn) inputMethod := ime.EnglishUS if err := inputMethod.InstallAndActivateUserAction(uc)(ctx); err != nil { s.Fatal("Failed to set input method: ", err) } uc.SetAttribute(useractions.AttributeInputMethod, inputMethod.Name) inputField := testserver.TextAreaInputField noCorrectionInputField := testserver.TextAreaNoCorrectionInputField inputTextFinder := nodewith.Role(role.InlineTextBox).Ancestor(inputField.Finder()) noCorrectionInputFieldFinder := nodewith.Role(role.InlineTextBox).Ancestor(noCorrectionInputField.Finder()) clickTextRightBound := func(textFinder *nodewith.Finder, endIndex int) uiauto.Action { return func(ctx context.Context) error { textBounds, err := ui.BoundsForRange(ctx, textFinder, 0, endIndex) if err != nil { return errors.Wrap(err, "failed to get text location") } return ui.MouseClickAtLocation(0, textBounds.RightCenter())(ctx) } } validateAction := uiauto.Combine("edit text using virtual keyboard", // Edit text after swapping focus between text fields. its.ClickFieldUntilVKShown(inputField), vkbCtx.TapKeys(strings.Split("Abcdfg", "")), its.ClickFieldUntilVKShown(noCorrectionInputField), vkbCtx.TapKeys(strings.Split("abd", "")), clickTextRightBound(inputTextFinder, 4), vkbCtx.TapKeys(strings.Split("de", "")), clickTextRightBound(noCorrectionInputFieldFinder, 2), vkbCtx.TapKeys(strings.Split("c", "")), clickTextRightBound(inputTextFinder, 5), vkbCtx.TapKey("backspace"), clickTextRightBound(noCorrectionInputFieldFinder, 2), vkbCtx.TapKey("space"), util.WaitForFieldTextToBe(tconn, inputField.Finder(), "Abcdefg"), util.WaitForFieldTextToBe(tconn, noCorrectionInputField.Finder(), "ab cd"), // Edit text while focused in text field. clickTextRightBound(inputTextFinder, 7), vkbCtx.TapKeys(strings.Split("hjij", "")), clickTextRightBound(inputTextFinder, 9), vkbCtx.TapKey("backspace"), clickTextRightBound(inputTextFinder, 5), vkbCtx.TapKey("space"), util.WaitForFieldTextToBe(tconn, inputField.Finder(), "Abcde fghij"), ) if err := uiauto.UserAction("Edit text using virtual keyboard", validateAction, uc, &useractions.UserActionCfg{ Attributes: map[string]string{ useractions.AttributeFeature: useractions.FeatureVKTyping, useractions.AttributeInputField: string(inputField), }, }, )(ctx); err != nil { s.Fatal("Failed to edit text using virtual keyboard: ", err) } }
package log import ( "context" "strings" "sync" "github.com/upfluence/log" "github.com/upfluence/log/record" elsink "github.com/upfluence/log/sink/error_logger" "github.com/upfluence/log/sink/leveled" "github.com/upfluence/log/sink/multi" "github.com/upfluence/log/sink/writer" "github.com/upfluence/pkg/cfg" "github.com/upfluence/pkg/error_logger" ) const localPkg = "github.com/upfluence/pkg/log" var ( loggerMu sync.Mutex loggerOpts = []log.LoggerOption{ log.WithSink( multi.NewSink( leveled.NewSink( fetchLevel(), writer.NewStdoutSink(writer.NewDefaultFormatter(localPkg)), ), leveled.NewSink( record.Error, elsink.NewSink(error_logger.DefaultErrorLogger), ), ), ), } Logger = log.NewLogger(loggerOpts...) ) func RegisterContextExtractor(ce log.ContextExtractor) { loggerMu.Lock() defer loggerMu.Unlock() loggerOpts = append(loggerOpts, log.WithContextExtractor(ce)) Logger = log.NewLogger(loggerOpts...) } func fetchLevel() record.Level { switch strings.ToUpper(cfg.FetchString("LOGGER_LEVEL", "")) { case "DEBUG": return record.Debug case "INFO": return record.Info case "NOTICE": return record.Notice case "WARNING": return record.Warning case "ERROR": return record.Error case "FATAL": return record.Fatal } return record.Notice } func Fatal(args ...interface{}) { Logger.Fatal(args...) } func Fatalf(format string, args ...interface{}) { Logger.Fatalf(format, args...) } func Error(args ...interface{}) { Logger.Error(args...) } func Errorf(format string, args ...interface{}) { Logger.Errorf(format, args...) } func Warning(args ...interface{}) { Logger.Warning(args...) } func Warningf(format string, args ...interface{}) { Logger.Warningf(format, args...) } func Notice(args ...interface{}) { Logger.Notice(args...) } func Noticef(format string, args ...interface{}) { Logger.Noticef(format, args...) } func Info(args ...interface{}) { Logger.Info(args...) } func Infof(format string, args ...interface{}) { Logger.Infof(format, args...) } func Debug(args ...interface{}) { Logger.Debug(args...) } func Debugf(format string, args ...interface{}) { Logger.Debugf(format, args...) } func WithField(f record.Field) log.Logger { return Logger.WithField(f) } func WithFields(fs ...record.Field) log.Logger { return Logger.WithFields(fs...) } func WithContext(ctx context.Context) log.Logger { return Logger.WithContext(ctx) } func WithError(err error) log.Logger { return Logger.WithError(err) } var Field = log.Field
package rdm import "github.com/go-redis/redis/v8" var RDM *redis.Client func GetRdm() *redis.Client { if RDM == nil { RDM = redis.NewClient(&redis.Options{ Addr: "127.0.0.1:6379", DB: 0, }) } return RDM }
// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package crostini import ( "context" "time" "chromiumos/tast/local/bundles/cros/crostini/verifyapp" "chromiumos/tast/local/chrome/browser" "chromiumos/tast/local/chrome/browser/browserfixt" "chromiumos/tast/local/crostini" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: VerifyAppWayland, LacrosStatus: testing.LacrosVariantExists, Desc: "Runs a Wayland crostini application from the terminal and verifies that it renders", Contacts: []string{"clumptini+oncall@google.com"}, Attr: []string{"group:mainline"}, SoftwareDeps: []string{"chrome", "vm_host"}, Params: []testing.Param{ // Parameters generated by params_test.go. DO NOT EDIT. { Name: "buster_stable", ExtraSoftwareDeps: []string{"dlc"}, ExtraHardwareDeps: crostini.CrostiniStable, Fixture: "crostiniBuster", Timeout: 3 * time.Minute, Val: browser.TypeAsh, }, { Name: "buster_unstable", ExtraAttr: []string{"informational"}, ExtraSoftwareDeps: []string{"dlc"}, ExtraHardwareDeps: crostini.CrostiniUnstable, Fixture: "crostiniBuster", Timeout: 3 * time.Minute, Val: browser.TypeAsh, }, { Name: "bullseye_stable", ExtraSoftwareDeps: []string{"dlc"}, ExtraHardwareDeps: crostini.CrostiniStable, Fixture: "crostiniBullseye", Timeout: 3 * time.Minute, Val: browser.TypeAsh, }, { Name: "bullseye_unstable", ExtraAttr: []string{"informational"}, ExtraSoftwareDeps: []string{"dlc"}, ExtraHardwareDeps: crostini.CrostiniUnstable, Fixture: "crostiniBullseye", Timeout: 3 * time.Minute, Val: browser.TypeAsh, }, { Name: "bullseye_stable_lacros", ExtraAttr: []string{"informational"}, ExtraSoftwareDeps: []string{"dlc", "lacros"}, ExtraHardwareDeps: crostini.CrostiniStable, Fixture: "crostiniBullseyeWithLacros", Timeout: 3 * time.Minute, Val: browser.TypeLacros, }, }, }) } func VerifyAppWayland(ctx context.Context, s *testing.State) { cr := s.FixtValue().(crostini.FixtureData).Chrome cont := s.FixtValue().(crostini.FixtureData).Cont // Run Lacros (if specified) as non-focused browser, so that the tested Wayland apps work in this situation. _, cleanup, err := browserfixt.SetUp(ctx, cr, s.Param().(browser.Type)) if err != nil { s.Fatal("Failed to set up browser: ", err) } defer cleanup(ctx) verifyapp.RunTest(ctx, s, cr, cont, crostini.WaylandDemoConfig()) }
// Copyright 2019 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package lib import ( "os/exec" "path/filepath" "regexp" ) var nodejsTestRegEx = regexp.MustCompile(`^test-[^-].+\.js$`) // Location of nodejs tests relative to working dir. const nodejsTestDir = "test" // nodejsRunner implements TestRunner for NodeJS. type nodejsRunner struct{} var _ TestRunner = nodejsRunner{} // ListTests implements TestRunner.ListTests. func (nodejsRunner) ListTests() ([]string, error) { testSlice, err := Search(nodejsTestDir, nodejsTestRegEx) if err != nil { return nil, err } return testSlice, nil } // TestCmds implements TestRunner.TestCmds. func (nodejsRunner) TestCmds(tests []string) []*exec.Cmd { args := append([]string{filepath.Join("tools", "test.py"), "--timeout=180"}, tests...) return []*exec.Cmd{exec.Command("/usr/bin/python", args...)} }
package main import "golang-learning-playground/src/printer" func main() { words := []string{ "hi", "there", "johny", } multiLinePrinter := printer.MultiLinePrinter{} multiLinePrinter.Print(words) singleLinePrinter := printer.SingleLinePrinter{} singleLinePrinter.Print(words) }
package main import ( "fmt" "runtime" "strconv" "time" "github.com/go-redis/redis/v8" "gitlab.com/myikaco/msngr" "gitlab.com/myikaco/saga" ) var OpenTradeSaga saga.Saga = saga.Saga{ Steps: []saga.SagaStep{ { Transaction: calcPosSize, CompensatingTransaction: cancelCalcPosSize, }, { Transaction: submitEntryOrder, CompensatingTransaction: cancelSubmitEntryOrder, }, }, } // OpenTradeSaga T1 func calcPosSize(allArgs ...interface{}) (interface{}, error) { fmt.Println("SAGA: Running calcPosSize") transactionArgs := allArgs[0].(map[string]interface{}) funcArgs := allArgs[1].(map[string]interface{}) fmt.Printf("Inside OpenTradeSaga step, args = %v\n", funcArgs) //get acc balance for pos size calc (send msg to order-svc) msgs := []string{} msgs = append(msgs, "Calc") msgs = append(msgs, "GetBal") msgs = append(msgs, "Asset") msgs = append(msgs, "USDT") msgs = append(msgs, "BotStreamName") msgs = append(msgs, transactionArgs["botStream"].(string)) msngr.AddToStream(transactionArgs["botStream"].(string), msgs) //listen for msg resp listenArgs := make(map[string]string) listenArgs["streamName"] = transactionArgs["botStream"].(string) listenArgs["groupName"] = svcConsumerGroupName listenArgs["consumerName"] = redisConsumerID listenArgs["start"] = ">" listenArgs["count"] = "1" var bal string parserHandlers := []msngr.CommandHandler{ { Command: "Bal", HandlerMatches: []msngr.HandlerMatch{ { Matcher: func(fieldVal string) bool { return fieldVal != "" }, Handler: func(msg redis.XMessage, output *interface{}) { bal = msngr.FilterMsgVals(msg, func(k, v string) bool { return (k == "Bal" && v != "") }) if bal != "" { _, file, line, _ := runtime.Caller(0) go Log(loggingInJSON(fmt.Sprintf("OpenTradeSaga get bal = %v <%v>", bal, time.Now().UTC().Format(httpTimeFormat))), fmt.Sprintf("<%v> %v", line, file)) } }, }, }, }, } msngr.ReadAndParse(msngr.ReadStream, "OpenTradeSaga calcPosSize", msngr.ParseStream, listenArgs, parserHandlers) //calc pos size accBal, _ := strconv.ParseFloat(bal, 32) _, posSize := calcEntry(funcArgs["latestClosePrice"].(float64), funcArgs["slPrice"].(float64), funcArgs["accRisk"].(float64), accBal, funcArgs["leverage"].(int)) return posSize, nil } // OpenTradeSaga T-1 func cancelCalcPosSize(allArgs ...interface{}) (interface{}, error) { fmt.Println("SAGA: Running cancelCalcPosSize") // nothing to cancel return nil, nil } // OpenTradeSaga T2 func submitEntryOrder(allArgs ...interface{}) (interface{}, error) { fmt.Println("SAGA: Running submitEntryOrder") transactionArgs := allArgs[0].(map[string]interface{}) // XADD submitEntryOrderIntent msgs := []string{} msgs = append(msgs, "Action") msgs = append(msgs, "SubmitEntryOrderIntent") msgs = append(msgs, "Symbol") msgs = append(msgs, "BTCUSDT") msgs = append(msgs, "Side") msgs = append(msgs, "BUY") msgs = append(msgs, "Quantity") msgs = append(msgs, "1000") msgs = append(msgs, "Price") msgs = append(msgs, "69") msgs = append(msgs, "Timestamp") msgs = append(msgs, time.Now().Format("2006-01-02_15:04:05_-0700")) msngr.AddToStream(transactionArgs["botStream"].(string), msgs) //listen for msg resp listenArgs := make(map[string]string) listenArgs["streamName"] = transactionArgs["botStream"].(string) listenArgs["groupName"] = svcConsumerGroupName listenArgs["consumerName"] = redisConsumerID listenArgs["start"] = ">" listenArgs["count"] = "1" var order string parserHandlers := []msngr.CommandHandler{ { Command: "Entry Order", HandlerMatches: []msngr.HandlerMatch{ { Matcher: func(fieldVal string) bool { return fieldVal != "" }, Handler: func(msg redis.XMessage, output *interface{}) { order = msngr.FilterMsgVals(msg, func(k, v string) bool { return (k == "Entry Order" && v != "") }) fmt.Println(order) }, }, }, }, } msngr.ReadAndParse(msngr.ReadStream, "strat-svc submitEntryOrder consec headers retrieve", msngr.ParseStream, listenArgs, parserHandlers) //listen for consec responses msngr.ListenConsecResponses(transactionArgs, "strat-svc submitEntryOrder ListenConsecResponses", func(i int, v string, m redis.XMessage, isHeaderMatch bool) { fmt.Printf("Read consec header at index %v val: %s, IsMatch = %v (%s)", i, v, isHeaderMatch, m.ID) }) // order-svc: // entryOrderSubmitted, entryOrderFilled // entryOrderFailed // entryOrderSubmitted, entryOrderFilled, SLExitedTrade/TPExitedTrade _, file, line, _ := runtime.Caller(0) go Log(loggingInJSON(fmt.Sprintf("! OPENTRADE SAGA COMPLETE | args = %v", allArgs...)), fmt.Sprintf("<%v> %v", line, file)) return nil, nil } // OpenTradeSaga T-2 func cancelSubmitEntryOrder(allArgs ...interface{}) (interface{}, error) { fmt.Println("SAGA: Running cancelSubmitEntryOrder") // XADD cancelEntryOrderIntent {timestamp} // order-svc: // entryOrderCancelled return nil, nil } // stop loss and take profit (maybe partial exits), and full exit var ExitTradeSaga saga.Saga = saga.Saga{ Steps: []saga.SagaStep{ { Transaction: calcCloseSize, CompensatingTransaction: cancelCalcCloseSize, }, { Transaction: submitExitOrder, CompensatingTransaction: cancelSubmitExitOrder, }, }, } // OpenTradeSaga T1 func calcCloseSize(allArgs ...interface{}) (interface{}, error) { fmt.Println("SAGA: Running calcCloseSize") transactionArgs := allArgs[0].(map[string]interface{}) funcArgs := allArgs[1].(map[string]interface{}) fmt.Printf("Inside ExitTradeSaga step, args = %v\n", funcArgs) //get pos size for pos size calc (send msg to order-svc) msgs := []string{} msgs = append(msgs, "Calc") msgs = append(msgs, "GetPosSize") msgs = append(msgs, "Ticker") msgs = append(msgs, funcArgs["ticker"].(string)) msngr.AddToStream(transactionArgs["botStream"].(string), msgs) time.Sleep(1 * time.Second) //listen for msg resp listenArgs := make(map[string]string) listenArgs["streamName"] = transactionArgs["botStream"].(string) listenArgs["groupName"] = svcConsumerGroupName listenArgs["consumerName"] = redisConsumerID listenArgs["start"] = ">" listenArgs["count"] = "1" var posSize string for { _, msg, err := msngr.ReadStream(listenArgs, "OpenTradeSaga calcCloseSize") fmt.Println(colorGreen + "Finished ReadStream" + colorReset) if err != nil { _, file, line, _ := runtime.Caller(0) go Log(loggingInJSON(fmt.Sprintf("CalcPosSize saga step ReadStream err = %v", err)), fmt.Sprintf("<%v> %v", line, file)) return nil, err } if str, ok := msg.([]redis.XStream); ok { posSize = msngr.FilterMsgVals(str[0].Messages[0], func(k, v string) bool { return k == "PosSize" && v != "" }) } //calc exit size if posSize == "" { // return 0, fmt.Errorf("posSize calc result empty %v", allArgs...) fmt.Printf("posSize calc result empty %v", allArgs...) } else { break } } posSzFloat, _ := strconv.ParseFloat(posSize, 32) exitSz := (funcArgs["posPercToClose"].(float64) / 100) * posSzFloat return exitSz, nil } // OpenTradeSaga T-1 func cancelCalcCloseSize(allArgs ...interface{}) (interface{}, error) { fmt.Println("SAGA: Running cancelCalcCloseSize") // nothing to cancel return nil, nil } // OpenTradeSaga T2 func submitExitOrder(allArgs ...interface{}) (interface{}, error) { fmt.Println("SAGA: Running submitExitOrder") // XADD submitExitOrderIntent transactionArgs := allArgs[0].(map[string]interface{}) msgs := []string{} msgs = append(msgs, "Action") msgs = append(msgs, "SubmitExitOrderIntent") msgs = append(msgs, "Symbol") msgs = append(msgs, "BTCUSDT") msgs = append(msgs, "Side") msgs = append(msgs, "SELL") msgs = append(msgs, "Quantity") msgs = append(msgs, "1000") msgs = append(msgs, "Price") msgs = append(msgs, "69") msgs = append(msgs, "Timestamp") msgs = append(msgs, time.Now().Format("2006-01-02_15:04:05_-0700")) msngr.AddToStream(transactionArgs["botStream"].(string), msgs) //listen for msg resp listenArgs := make(map[string]string) listenArgs["streamName"] = transactionArgs["botStream"].(string) listenArgs["groupName"] = svcConsumerGroupName listenArgs["consumerName"] = redisConsumerID listenArgs["start"] = ">" listenArgs["count"] = "1" //listen for consec responses return msngr.ListenConsecResponses(transactionArgs, "strat-svc submitExitOrder ListenConsecResponses", func(i int, v string, m redis.XMessage, isHeaderMatch bool) { fmt.Printf("Read consec header at index %v val: %s, IsMatch = %v (%s)\n", i, v, isHeaderMatch, m.ID) }) // order-svc: // exitOrderSubmitted, exitOrderFilled // exitOrderFailed // exitOrderSubmitted, exitOrderFilled } // OpenTradeSaga T-2 func cancelSubmitExitOrder(allArgs ...interface{}) (interface{}, error) { fmt.Println("SAGA: Running cancelSubmitExitOrder") // XADD cancelExitOrderIntent {timestamp} // order-svc: // exitOrderCancelled return nil, nil } // edit SL/TP var EditTrade saga.Saga // OpenTradeSaga T1 func submitModifyPos(args map[string]interface{}) (interface{}, error) { // XADD submitModifyPosIntent {timestamp} // order-svc: // modifyPosSubmitted, modifyPosSuccessful // modifyPosFailed return nil, nil } // OpenTradeSaga T-1 func cancelModifyPos(args map[string]interface{}) (interface{}, error) { // modify back return nil, nil }
// Copyright 2018 NetApp, Inc. All Rights Reserved. package storageattribute import ( "fmt" ) func NewBoolOffer(offer bool) Offer { return &boolOffer{ Offer: offer, } } // Matches is a boolean offer of true matches any request; a boolean offer of false // only matches a false request. This assumes that the requested parameter // will be passed into the driver. func (o *boolOffer) Matches(r Request) bool { br, ok := r.(*boolRequest) if !ok { return false } if o.Offer { return true } return br.Request == o.Offer } func (o *boolOffer) String() string { return fmt.Sprintf("{Offer: %t}", o.Offer) } func NewBoolRequest(request bool) Request { return &boolRequest{ Request: request, } } func (r *boolRequest) Value() interface{} { return r.Request } func (r *boolRequest) GetType() Type { return boolType } func (r *boolRequest) String() string { return fmt.Sprintf("%t", r.Request) }
// +build ignore package main import ( "bytes" "context" "fmt" "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient" flag "github.com/spf13/pflag" ) var ( ganacheURL string txHash string account string ) func main() { flag.Parse() eth, err := ethclient.Dial(ganacheURL) if nil != err { panic(err) } defer eth.Close() // fetch the receipt to decode the contract address receipt, err := eth.TransactionReceipt(context.TODO(), common.HexToHash(txHash)) if nil != err { panic(err) } logs := receipt.Logs expect := struct { method []byte payer string amount []byte }{ crypto.Keccak256([]byte("Withdrawal(address,uint256)")), account, math.PaddedBigBytes(toWei(0.01), 32), } if got := logs[0].Topics[0].Bytes(); !bytes.Equal(got, expect.method) { panic(fmt.Sprintf("invalid method sig: got %x, expect %x", got, expect.method)) } if got := common.BytesToAddress( logs[0].Topics[1].Bytes()).Hex(); got != expect.payer { panic(fmt.Sprintf("invalid payer address: got %s, expect %s", got, expect.payer)) } if !bytes.Equal(logs[0].Data, expect.amount) { panic(fmt.Sprintf("invalid amount: got %x, expect %x", logs[0].Data, expect.amount)) } } func init() { flag.StringVarP(&ganacheURL, "ganache-url", "g", "http://127.0.0.1:7545", "receiver's address") flag.StringVar(&txHash, "tx", "", "hash of tx deploying the called contract") flag.StringVarP(&account, "account", "a", "", "the expected 'from' field within the event") } func toWei(ethers float64) *big.Int { // 1 ether = 10^18 wei orders, _ := new(big.Float).SetString("1000000000000000000") x := big.NewFloat(ethers) x.Mul(x, orders) wei, _ := x.Int(nil) return wei }
package parsefb import ( "github.com/PuerkitoBio/goquery" "strings" "testing" ) func TestParseTime(t *testing.T) { doc, err := goquery.NewDocumentFromReader(strings.NewReader(testhtml1)) if err != nil { t.Error(err) return } timestamp, err := GetTimeStamp(doc) if err != nil { t.Error(err) return } if timestamp != "2017-02-16T07:00:00+08:00" { t.Error("bad timestamp: " + timestamp) return } }
package flywheel import ( "encoding/json" "io" "log" "os" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/ec2" ) // SpinINTERVAL determines how often flywheel will update its // internal state and/or check for idle timeouts const SpinINTERVAL = time.Second // Ping - HTTP requests "ping" the flywheel goroutine. This updates the idle timeout, // and returns the current status to the http request. type Ping struct { replyTo chan Pong setTimeout time.Duration requestStart bool requestStop bool noop bool } // Pong - result of the ping request type Pong struct { Status Status `json:"-"` StatusName string `json:"status"` Err error `json:"error,omitempty"` LastStarted time.Time `json:"last-started,omitempty"` LastStopped time.Time `json:"last-stopped,omitempty"` StopAt time.Time `json:"stop-due-at"` } // Flywheel struct holds all the state required by the flywheel goroutine. type Flywheel struct { config *Config running bool pings chan Ping status Status ready bool stopAt time.Time lastStarted time.Time lastStopped time.Time ec2 *ec2.EC2 autoscaling *autoscaling.AutoScaling hcInterval time.Duration idleTimeout time.Duration } // New - Create new Flywheel type func New(config *Config) *Flywheel { awsConfig := &aws.Config{Region: &config.Region} sess := session.New(awsConfig) return &Flywheel{ hcInterval: time.Duration(config.HcInterval), idleTimeout: time.Duration(config.IdleTimeout), config: config, pings: make(chan Ping), stopAt: time.Now(), ec2: ec2.New(sess), autoscaling: autoscaling.New(sess), } } // ProxyEndpoint - retrieve the reverse proxy destination func (fw *Flywheel) ProxyEndpoint(hostname string) string { vhost, ok := fw.config.Vhosts[hostname] if ok { return vhost } return fw.config.Endpoint } // Spin - Runs the main loop for the Flywheel. func (fw *Flywheel) Spin() { hchan := make(chan Status, 1) go fw.HealthWatcher(hchan) ticker := time.NewTicker(SpinINTERVAL) for { select { case ping := <-fw.pings: fw.RecvPing(&ping) case <-ticker.C: fw.Poll() case status := <-hchan: if fw.status != status { log.Printf("Healthcheck - status changed from %v to %v", fw.status, status) // Status may change from STARTED to UNHEALTHY to STARTED due // to things like AWS RequestLimitExceeded errors. // If there is an active timeout, keep it instead of resetting. if status == STARTED && fw.stopAt.Before(time.Now()) { fw.stopAt = time.Now().Add(fw.idleTimeout) log.Printf("Timer update. Stop scheduled for %v", fw.stopAt) } fw.status = status } } } } // RecvPing - process user ping requests and update state if needed func (fw *Flywheel) RecvPing(ping *Ping) { var pong Pong ch := ping.replyTo defer close(ch) switch fw.status { case STOPPED: if ping.requestStart { pong.Err = fw.Start() } case STARTED: if ping.noop { // Status requests, etc. Don't update idle timer } else if ping.requestStop { pong.Err = fw.Stop() } else if int64(ping.setTimeout) != 0 { fw.stopAt = time.Now().Add(ping.setTimeout) log.Printf("Timer update. Stop scheduled for %v", fw.stopAt) } else { fw.stopAt = time.Now().Add(fw.idleTimeout) log.Printf("Timer update. Stop scheduled for %v", fw.stopAt) } } pong.Status = fw.status pong.StatusName = fw.status.String() pong.LastStarted = fw.lastStarted pong.LastStopped = fw.lastStopped pong.StopAt = fw.stopAt ch <- pong } // Poll - The periodic check for starting/stopping state transitions and idle // timeouts func (fw *Flywheel) Poll() { switch fw.status { case STARTED: if time.Now().After(fw.stopAt) { fw.Stop() log.Print("Idle timeout - shutting down") fw.status = STOPPING } case STOPPING: if fw.ready { log.Print("Shutdown complete") fw.status = STOPPED } case STARTING: if fw.ready { fw.status = STARTED fw.stopAt = time.Now().Add(fw.idleTimeout) log.Printf("Startup complete. Stop scheduled for %v", fw.stopAt) } } } // WriteStatusFile - Before we exit the application we write the current state func (fw *Flywheel) WriteStatusFile(statusFile string) { var pong Pong fd, err := os.OpenFile(statusFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) if err != nil { log.Printf("Unable to write status file: %s", err) return } defer fd.Close() pong.Status = fw.status pong.StatusName = fw.status.String() pong.LastStarted = fw.lastStarted pong.LastStopped = fw.lastStopped buf, err := json.Marshal(pong) if err != nil { log.Printf("Unable to write status file: %s", err) return } _, err = fd.Write(buf) if err != nil { log.Printf("Unable to write status file: %s", err) return } } // ReadStatusFile load status from the status file func (fw *Flywheel) ReadStatusFile(statusFile string) { fd, err := os.Open(statusFile) if err != nil { if err != os.ErrNotExist { log.Printf("Unable to load status file: %v", err) } return } stat, err := fd.Stat() if err != nil { log.Printf("Unable to load status file: %v", err) return } buf := make([]byte, int(stat.Size())) _, err = io.ReadFull(fd, buf) if err != nil { log.Printf("Unable to load status file: %v", err) return } var status Pong err = json.Unmarshal(buf, &status) if err != nil { log.Printf("Unable to load status file: %v", err) return } fw.status = status.Status fw.lastStarted = status.LastStarted fw.lastStopped = status.LastStopped }
/* Copyright 2022-2023 ICS-FORTH. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Package embed is used to embed the various required scripts into the Frisbee Terminal. This allows to execute the Terminal from any path. For more info see https://zetcode.com/golang/embed/ */ package embed import ( "embed" "io/fs" "os" "path/filepath" "github.com/pkg/errors" ) //go:embed hack var Hack embed.FS // UpdateLocalFiles duplicates the structure of embedded fs into the installation dir. func UpdateLocalFiles(embeddedFS embed.FS, installationDir string) error { root := "." copyLocally := func(sourceFilePath string, hostFilePath string) error { data, err := fs.ReadFile(embeddedFS, sourceFilePath) if err != nil { return errors.Wrapf(err, "cannot read embeddedFS file '%s'", sourceFilePath) } if err := os.WriteFile(hostFilePath, data, os.ModePerm); err != nil { return errors.Wrapf(err, "cannot write file '%s'", hostFilePath) } return nil } return fs.WalkDir(embeddedFS, root, func(relPath string, d fs.DirEntry, _ error) error { if relPath == root { // ignore the root return nil } /*--------------------------------------------------- * Open and inspect embedded file. *---------------------------------------------------*/ embeddedFile, err := embeddedFS.Open(relPath) if err != nil { return errors.Wrapf(err, "cannot open embeddedFS file '%s'", relPath) } embeddedFileInfo, err := embeddedFile.Stat() if err != nil { return errors.Wrapf(err, "cannot stat embeddedFS file '%s'", relPath) } /*--------------------------------------------------- * Duplicate the embedded file into installation dir. *---------------------------------------------------*/ hostpath := filepath.Join(installationDir, relPath) switch { case embeddedFileInfo.Mode().IsRegular(): hostFileInfo, err := os.Stat(hostpath) if err != nil { if errors.Is(err, os.ErrNotExist) { // Copy the file locally return copyLocally(relPath, hostpath) } return errors.Wrapf(err, "cannot stat host path '%s'", hostpath) } if !hostFileInfo.Mode().IsRegular() { return errors.Errorf("expected '%s' to be a file, but it's '%s'.", relPath, hostFileInfo.Mode().Type()) } // Copy the file locally return copyLocally(relPath, hostpath) case embeddedFileInfo.IsDir(): hostFileInfo, err := os.Stat(hostpath) switch { case os.IsNotExist(err): if err := os.MkdirAll(hostpath, os.ModePerm); err != nil { return errors.Wrapf(err, "cannot create dir '%s' in the host fs", hostpath) } return nil case err != nil: return errors.Wrapf(err, "cannot stat host path '%s'", hostpath) case !hostFileInfo.IsDir(): return errors.Errorf("expected '%s' to be a dir, but it's '%s'", hostpath, hostFileInfo.Mode().Type()) default: return nil } default: return errors.Errorf("Filemode '%s' is not supported", embeddedFileInfo.Mode().Type()) } }) }
// Copyright 2014 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package kvserver import ( "context" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/errors" ) var importCmdFn ImportCmdFunc = func(context.Context, batcheval.CommandArgs) (*roachpb.ImportResponse, error) { return &roachpb.ImportResponse{}, errors.Errorf("unimplemented command: %s", roachpb.Import) } // ImportCmdFunc is the type of the function that will be called as the // implementation of the Import command. type ImportCmdFunc func(context.Context, batcheval.CommandArgs) (*roachpb.ImportResponse, error) // SetImportCmd allows setting the function that will be called as the // implementation of the Import command. Only allowed to be called by Init. func SetImportCmd(fn ImportCmdFunc) { // This is safe if SetImportCmd is only called at init time. importCmdFn = fn }
package client import ( "fmt" "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io" fleetcontrollers "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1" "github.com/rancher/wrangler/pkg/apply" "github.com/rancher/wrangler/pkg/generated/controllers/core" corev1 "github.com/rancher/wrangler/pkg/generated/controllers/core/v1" "github.com/rancher/wrangler/pkg/generated/controllers/rbac" rbaccontrollers "github.com/rancher/wrangler/pkg/generated/controllers/rbac/v1" "github.com/rancher/wrangler/pkg/kubeconfig" ) type Getter struct { Kubeconfig string Context string Namespace string } func (g *Getter) Get() (*Client, error) { if g == nil { return nil, fmt.Errorf("client is not configured, please set client getter") } return NewClient(g.Kubeconfig, g.Context, g.Namespace) } func (g *Getter) GetNamespace() string { return g.Namespace } type Client struct { Fleet fleetcontrollers.Interface Core corev1.Interface RBAC rbaccontrollers.Interface Apply apply.Apply Namespace string } func NewGetter(kubeconfig, context, namespace string) *Getter { return &Getter{ Kubeconfig: kubeconfig, Context: context, Namespace: namespace, } } func NewClient(kubeConfig, context, namespace string) (*Client, error) { cc := kubeconfig.GetNonInteractiveClientConfigWithContext(kubeConfig, context) ns, _, err := cc.Namespace() if err != nil { return nil, err } if namespace != "" { ns = namespace } restConfig, err := cc.ClientConfig() if err != nil { return nil, err } c := &Client{ Namespace: ns, } fleet, err := fleet.NewFactoryFromConfig(restConfig) if err != nil { return nil, err } c.Fleet = fleet.Fleet().V1alpha1() core, err := core.NewFactoryFromConfig(restConfig) if err != nil { return nil, err } c.Core = core.Core().V1() rbac, err := rbac.NewFactoryFromConfig(restConfig) if err != nil { return nil, err } c.RBAC = rbac.Rbac().V1() c.Apply, err = apply.NewForConfig(restConfig) if err != nil { return nil, err } if c.Namespace == "" { c.Namespace = "default" } c.Apply = c.Apply. WithDynamicLookup(). WithDefaultNamespace(c.Namespace). WithListerNamespace(c.Namespace). WithRestrictClusterScoped() return c, nil }
package net import "zinx/03-Request/iface" type Request struct { conn iface.IConnection data []byte len uint32 } func NewRequest(conn iface.IConnection,data []byte,len uint32) iface.IRequest{ return &Request{ conn: conn, data: data, len: nil, } } func (req *Request)GetConnection()iface.IConnection{ return req.conn } func (req Request)GetData()[]byte{ return req.data } func (req Request)GetLen() uint32{ return req.len }
package service import ( "fmt" "strings" "time" "github.com/mirzaakhena/admin/model" log "github.com/mirzaakhena/common/logger" "github.com/mirzaakhena/common/utils" ) // IAdminService is type IAdminService interface { IUserService GetOneSpace(sc model.ServiceContext) *model.Space GetAllUserSpace(sc model.ServiceContext, req model.GetAllBasicRequest) *model.GetAllSpaceResponse CreateSpace(sc model.ServiceContext, req model.CreateSpaceRequest) (*model.CreateSpaceResponse, error) // IsAdmin(sc model.ServiceContext, req model.IsAdminRequest) bool // GenerateInvitationAccount(sc model.ServiceContext, req model.GenerateInvitationAccountRequest) (*model.GenerateInvitationAccountResponse, error) // UpdateAccountStatus(sc model.ServiceContext, req model.UpdateStatusRequest) (*model.UpdateStatusResponse, error) // RemoveAccount(sc model.ServiceContext, req model.RemoveAccountRequest) (*model.RemoveAccountResponse, error) // RemoveWaitingAccount(sc model.ServiceContext, req model.RemoveWaitingAccountRequest) (*model.RemoveWaitingAccountResponse, error) // GetAllUserRolePermission(sc model.ServiceContext, req model.GetAllBasicRequest) ([]model.GetAllUserRolePermissionResponse, uint) // CreateUserRolePermission(sc model.ServiceContext, req model.CreateUserRolePermissionRequest) (*model.CreateUserRolePermissionResponse, error) // UpdateUserRolePermission(sc model.ServiceContext, req model.UpdateUserRolePermissionRequest) (*model.UpdateUserRolePermissionResponse, error) // DeleteUserRolePermission(sc model.ServiceContext, req model.DeleteUserRolePermissionRequest) (*model.DeleteUserRolePermissionResponse, error) // GetAllAccountUserRole(sc model.ServiceContext, req model.GetAllBasicRequest) ([]model.GetAllAccountUserRoleResponse, uint) // UpdateAccountUserRole(sc model.ServiceContext, req model.UpdateAccountUserRoleRequest) (*model.UpdateAccountUserRoleResponse, error) } // AdminService is type AdminService struct { UserService } // GetOneSpace is func (o *AdminService) GetOneSpace(sc model.ServiceContext) *model.Space { spaceID := sc["spaceId"].(string) return o.Space.GetOne(o.Trx.GetDB(false), spaceID) } // GetAllUserSpace is func (o *AdminService) GetAllUserSpace(sc model.ServiceContext, req model.GetAllBasicRequest) *model.GetAllSpaceResponse { var ss []model.Space us, count := o.UserSpace.GetAll(o.Trx.GetDB(false), req) for _, s := range us { ss = append(ss, *s.Space) } result := model.GetAllSpaceResponse{ TotalCount: count, Spaces: ss, } return &result } // CreateSpace is func (o *AdminService) CreateSpace(sc model.ServiceContext, req model.CreateSpaceRequest) (*model.CreateSpaceResponse, error) { name := strings.TrimSpace(req.Name) description := strings.TrimSpace(req.Description) if name == "" { return nil, fmt.Errorf("space name must not empty") } userID, logInfo := o.ExtractServiceContext(sc) tx := o.Trx.GetDB(true) if o.Space.IsExistName(tx, name, userID) { log.GetLog().Error(logInfo, "space with name %s is exist", name) o.Trx.RollbackTransaction(tx) return nil, utils.PrintError(model.ConstErrorUnExistingEmailAddress, "space with name %s is exist. ", name) } var ws model.Space { ws.ID = utils.GenID() ws.Name = name ws.Description = description ws.MaxUser = 5 ws.TotalCurrentUser = 1 ws.Expired = time.Now().Add(time.Hour * 24 * 100000) o.Space.Create(tx, ws) } var wsa model.UserSpace { wsa.ID = utils.GenID() wsa.UserID = userID wsa.SpaceID = ws.ID wsa.Type = "ADMIN" wsa.Status = "ACTIVE" o.UserSpace.Create(tx, wsa) } o.Trx.CommitTransaction(tx) response := model.CreateSpaceResponse{} return &response, nil } // IsAdmin is func (o *AdminService) IsAdmin(sc model.ServiceContext, req model.IsAdminRequest) bool { userID, _ := o.ExtractServiceContext(sc) tx := o.Trx.GetDB(false) us := o.UserSpace.GetOne(tx, req.SpaceID, userID) return us.ID != "" && us.Type == "ADMIN" && us.Status == "ACTIVE" } // GenerateInvitationAccount is func (o *AdminService) GenerateInvitationAccount(sc model.ServiceContext, req model.GenerateInvitationAccountRequest) (*model.GenerateInvitationAccountResponse, error) { data := map[string]string{ "SpaceId": req.SpaceID, } token := o.Token.GenerateToken("INVITATION", "APPS", "NEWUSER", data, 24) response := model.GenerateInvitationAccountResponse{ SpaceInvitationToken: token, } return &response, nil } // UpdateAccountStatus is func (o *AdminService) UpdateAccountStatus(sc model.ServiceContext, req model.UpdateStatusRequest) (*model.UpdateStatusResponse, error) { // userID, logInfo := o.ExtractServiceContext(sc) tx := o.Trx.GetDB(true) wsa := o.UserSpace.GetOne(tx, req.SpaceID, req.UserID) wsa.Status = req.Status o.UserSpace.Update(tx, wsa.ID, wsa) o.Trx.CommitTransaction(tx) response := model.UpdateStatusResponse{} return &response, nil } // RemoveAccount is func (o *AdminService) RemoveAccount(sc model.ServiceContext, req model.RemoveAccountRequest) (*model.RemoveAccountResponse, error) { tx := o.Trx.GetDB(true) wsa := o.UserSpace.GetOne(tx, req.SpaceID, req.UserID) o.UserSpace.Delete(tx, wsa.ID) o.Trx.CommitTransaction(tx) response := model.RemoveAccountResponse{} return &response, nil } // GetAllAppliedPermission is func (o *AdminService) GetAllAppliedPermission(sc model.ServiceContext, req model.GetAllBasicRequest) ([]model.GetAllAppliedPermissionResponse, uint, error) { return nil, 0, nil } // GrantAppliedPermission is func (o *AdminService) GrantAppliedPermission(sc model.ServiceContext, req model.GrantAppliedPermissionRequest) (*model.GrantAppliedPermissionResponse, error) { return nil, nil } // RefuseAppliedPermission is func (o *AdminService) RefuseAppliedPermission(sc model.ServiceContext, req model.RefuseAppliedPermissionRequest) (*model.RefuseAppliedPermissionResponse, error) { return nil, nil } // GetAllUserRolePermission is func (o *AdminService) GetAllUserRolePermission(sc model.ServiceContext, req model.GetAllUserRolePermissionRequest) (*model.GetAllUserRolePermissionResponse, uint, error) { return nil, 0, nil } // CreateUserRolePermission is func (o *AdminService) CreateUserRolePermission(sc model.ServiceContext, req model.CreateUserRolePermissionRequest) (*model.CreateUserRolePermissionResponse, error) { return nil, nil } // UpdateUserRolePermission is func (o *AdminService) UpdateUserRolePermission(sc model.ServiceContext, req model.UpdateUserRolePermissionRequest) (*model.UpdateUserRolePermissionResponse, error) { return nil, nil } // DeleteUserRolePermission is func (o *AdminService) DeleteUserRolePermission(sc model.ServiceContext, req model.DeleteUserRolePermissionRequest) (*model.DeleteUserRolePermissionResponse, error) { return nil, nil } // GetAllAccountUserRole is func (o *AdminService) GetAllAccountUserRole(sc model.ServiceContext, req model.GetAllAccountUserRoleRequest) (*model.GetAllAccountUserRoleResponse, uint, error) { return nil, 0, nil } // UpdateAccountUserRole is func (o *AdminService) UpdateAccountUserRole(sc model.ServiceContext, req model.UpdateAccountUserRoleRequest) (*model.UpdateAccountUserRoleResponse, error) { return nil, nil }
package Wallet import ( "fmt" "testing" ) const TestWalletNode = "1" func TestGetWalletByNodeId(t *testing.T) { ws := Wallets{} wallet, err := ws.GetWalletByNodeId(TestWalletNode, "1M59oANzychihR3fXJRxQGUqXxDume2KZ2") if err != nil { fmt.Print(err) } fmt.Print(wallet) } func TestGetWallets(t *testing.T) { wallets, err := GetWallets(TestWalletNode) if err != nil { fmt.Print(err) } fmt.Print(wallets) } func TestCreateWallet(t *testing.T) { ws := Wallets{} address := ws.CreateWallet(TestWalletNode) fmt.Print(address) } func TestCreateWallets(t *testing.T) { wallets, _ := NewWallets(TestWalletNode) fmt.Print(wallets) }
package mysql import ( "database/sql" "errors" "time" ) func CreateDB(db *sql.DB, createDB string) error { _, err := db.Exec(createDB) return err } func CreateTable(db *sql.DB, createTable string) error { _, err :=db.Exec(createTable) return err } func Insert(db *sql.DB, insert string, )
package factogo import ( "math/rand" "time" ) const defaultCharset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" func seededRand() *rand.Rand { return rand.New(rand.NewSource(time.Now().UnixNano())) } /* ProduceString produces a ramdom string. */ func ProduceString() string { length := 20 chars := make([]byte, length) for i := range chars { chars[i] = defaultCharset[seededRand().Intn(len(defaultCharset))] } return string(chars) } /* ProduceInt produces a ramdom int number. */ func ProduceInt() int { return seededRand().Int() } /* ProduceInt8 produces a ramdom int8 number. */ func ProduceInt8() int8 { return int8(seededRand().Intn(127)) } /* ProduceInt16 produces a ramdom int16 number. */ func ProduceInt16() int16 { return int16(seededRand().Intn(32767)) } /* ProduceInt32 produces a ramdom int32 number. */ func ProduceInt32() int32 { return seededRand().Int31() } /* ProduceInt64 produces a ramdom int64 number. */ func ProduceInt64() int64 { return seededRand().Int63() } /* ProduceUint produces a ramdom uint number. */ func ProduceUint() uint { return uint(seededRand().Int()) } /* ProduceUint8 produces a ramdom uint8 number. */ func ProduceUint8() uint8 { return uint8(seededRand().Intn(255)) } /* ProduceUint16 produces a ramdom uint16 number. */ func ProduceUint16() uint16 { return uint16(seededRand().Intn(65535)) } /* ProduceUint32 produces a ramdom uint32 number. */ func ProduceUint32() uint32 { return seededRand().Uint32() } /* ProduceUint64 produces a ramdom uint64 number. */ func ProduceUint64() uint64 { return seededRand().Uint64() } /* ProduceFloat32 produces a ramdom float32 number. */ func ProduceFloat32() float32 { return seededRand().Float32() } /* ProduceFloat64 produces a ramdom float64 number. */ func ProduceFloat64() float64 { return seededRand().Float64() } /* ProduceBool produces a ramdom boolean. */ func ProduceBool() bool { var val bool if seededRand().Intn(1) == 1 { val = true } return val } /* ProduceTime produces a time.Time using time.Now() function. */ func ProduceTime() time.Time { return time.Now() } /* ProduceStringPointer produces a pointer to a ramdom string. */ func ProduceStringPointer() *string { val := ProduceString() return &val } /* ProduceIntPointer produces a pointer to a ramdom int. */ func ProduceIntPointer() *int { val := ProduceInt() return &val } /* ProduceInt8Pointer produces a pointer to a ramdom int8. */ func ProduceInt8Pointer() *int8 { val := ProduceInt8() return &val } /* ProduceInt16Pointer produces a pointer to a ramdom int16. */ func ProduceInt16Pointer() *int16 { val := ProduceInt16() return &val } /* ProduceInt32Pointer produces a pointer to a ramdom int32. */ func ProduceInt32Pointer() *int32 { val := ProduceInt32() return &val } /* ProduceInt54Pointer produces a pointer to a ramdom int64. */ func ProduceInt64Pointer() *int64 { val := ProduceInt64() return &val } /* ProduceUintPointer produces a pointer to a ramdom uint. */ func ProduceUintPointer() *uint { val := ProduceUint() return &val } /* ProduceUint8Pointer produces a pointer to a ramdom uint8. */ func ProduceUint8Pointer() *uint8 { val := ProduceUint8() return &val } /* ProduceUint16Pointer produces a pointer to a ramdom uint16. */ func ProduceUint16Pointer() *uint16 { val := ProduceUint16() return &val } /* ProduceUint32Pointer produces a pointer to a ramdom uint32. */ func ProduceUint32Pointer() *uint32 { val := ProduceUint32() return &val } /* ProduceUint64Pointer produces a pointer to a ramdom uint64. */ func ProduceUint64Pointer() *uint64 { val := ProduceUint64() return &val } /* ProduceFloat32Pointer produces a pointer to a ramdom float32. */ func ProduceFloat32Pointer() *float32 { val := ProduceFloat32() return &val } /* ProduceFloat54Pointer produces a pointer to a ramdom float64. */ func ProduceFloat64Pointer() *float64 { val := ProduceFloat64() return &val } /* ProduceBoolPointer produces a pointer to a ramdom bool. */ func ProduceBoolPointer() *bool { val := ProduceBool() return &val } /* ProduceTime produces a pointer to a time.Time using time.Now() function. */ func ProduceTimePointer() *time.Time { val := ProduceTime() return &val }
// Copyright 2020 The LUCI Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package updater import ( "context" "sort" "sync" "sync/atomic" "testing" "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/timestamppb" gerritpb "go.chromium.org/luci/common/proto/gerrit" "go.chromium.org/luci/common/retry/transient" "go.chromium.org/luci/gae/service/datastore" cfgpb "go.chromium.org/luci/cv/api/config/v2" "go.chromium.org/luci/cv/internal/changelist" "go.chromium.org/luci/cv/internal/common" "go.chromium.org/luci/cv/internal/cvtesting" "go.chromium.org/luci/cv/internal/gerrit" gf "go.chromium.org/luci/cv/internal/gerrit/gerritfake" "go.chromium.org/luci/cv/internal/gerrit/gobmap" . "github.com/smartystreets/goconvey/convey" . "go.chromium.org/luci/common/testing/assertions" ) func TestSchedule(t *testing.T) { t.Parallel() Convey("Schedule works", t, func() { ct := cvtesting.Test{} ctx, cancel := ct.SetUp() defer cancel() const lProject = "infra" const gHost = "chromium-review.example.com" const gRepo = "depot_tools" // Each Schedule() moves clock forward by 1ns. // This ensures that SortByETA returns tasks in the same order as scheduled, // and makes tests deterministic w/o having to somehow sort individual proto // messages. const scheduleTimeIncrement = time.Nanosecond u := New(ct.TQDispatcher, nil, nil) do := func(t *RefreshGerritCL) []proto.Message { So(u.Schedule(ctx, t), ShouldBeNil) ct.Clock.Add(scheduleTimeIncrement) return ct.TQ.Tasks().SortByETA().Payloads() } doTrans := func(t *RefreshGerritCL) []proto.Message { err := datastore.RunInTransaction(ctx, func(tctx context.Context) error { So(u.Schedule(tctx, t), ShouldBeNil) return nil }, nil) So(err, ShouldBeNil) ct.Clock.Add(scheduleTimeIncrement) return ct.TQ.Tasks().SortByETA().Payloads() } taskMinimal := &RefreshGerritCL{ LuciProject: lProject, Host: gHost, Change: 123, } Convey("Minimal task", func() { So(do(taskMinimal), ShouldResembleProto, []proto.Message{taskMinimal}) Convey("dedup works", func() { So(do(taskMinimal), ShouldResembleProto, []proto.Message{taskMinimal}) Convey("but only within blindRefreshInterval", func() { ct.Clock.Add(blindRefreshInterval - time.Second) // still within So(do(taskMinimal), ShouldResembleProto, []proto.Message{taskMinimal}) ct.Clock.Add(time.Second) // already out So(do(taskMinimal), ShouldResembleProto, []proto.Message{taskMinimal, taskMinimal}) }) }) Convey("transactional can't dedup, even with other transactional", func() { So(doTrans(taskMinimal), ShouldResembleProto, []proto.Message{taskMinimal, taskMinimal}) So(doTrans(taskMinimal), ShouldResembleProto, []proto.Message{taskMinimal, taskMinimal, taskMinimal}) }) Convey("no dedup if different", func() { taskAnother := proto.Clone(taskMinimal).(*RefreshGerritCL) Convey("project", func() { taskAnother.LuciProject = lProject + "2" So(doTrans(taskAnother), ShouldResembleProto, []proto.Message{taskMinimal, taskAnother}) }) Convey("change", func() { taskAnother.Change++ So(doTrans(taskAnother), ShouldResembleProto, []proto.Message{taskMinimal, taskAnother}) }) Convey("host", func() { taskAnother.Host = gHost + "2" So(doTrans(taskAnother), ShouldResembleProto, []proto.Message{taskMinimal, taskAnother}) }) }) }) Convey("CLID hint doesn't effect dedup", func() { taskWithHint := proto.Clone(taskMinimal).(*RefreshGerritCL) taskWithHint.ClidHint = 321 do(taskMinimal) So(do(taskWithHint), ShouldResembleProto, []proto.Message{taskMinimal}) }) Convey("ForceNotifyPM is never deduped", func() { taskForce := proto.Clone(taskMinimal).(*RefreshGerritCL) taskForce.ForceNotifyPm = true So(do(taskForce), ShouldResembleProto, []proto.Message{taskForce}) Convey("itself", func() { So(do(taskForce), ShouldResembleProto, []proto.Message{taskForce, taskForce}) }) Convey("task without forceNotifyPM", func() { So(do(taskMinimal), ShouldResembleProto, []proto.Message{taskForce, taskMinimal}) }) Convey("task with updatedHint", func() { taskForceUpdatedHint := proto.Clone(taskForce).(*RefreshGerritCL) taskForceUpdatedHint.UpdatedHint = &timestamppb.Timestamp{Seconds: 1531230000} So(do(taskForceUpdatedHint), ShouldResembleProto, []proto.Message{taskForce, taskForceUpdatedHint}) So(do(taskForceUpdatedHint), ShouldResembleProto, []proto.Message{taskForce, taskForceUpdatedHint, taskForceUpdatedHint}) }) }) Convey("UpdateHint is de-duped with the same UpdatedHint, only", func() { // updatedHint logically has no relationship to now, but realistically it's usually // quite recent. So, use 1 hour ago. updatedHintEpoch := ct.Clock.Now().Add(-time.Hour) taskU0 := proto.Clone(taskMinimal).(*RefreshGerritCL) taskU0.UpdatedHint = timestamppb.New(updatedHintEpoch) taskU1 := proto.Clone(taskMinimal).(*RefreshGerritCL) taskU1.UpdatedHint = timestamppb.New(updatedHintEpoch.Add(time.Second)) Convey("transactionally still no dedup", func() { So(doTrans(taskU0), ShouldResembleProto, []proto.Message{taskU0}) So(doTrans(taskU0), ShouldResembleProto, []proto.Message{taskU0, taskU0}) }) Convey("only non-transactionally", func() { So(do(taskU0), ShouldResembleProto, []proto.Message{taskU0}) So(do(taskU0), ShouldResembleProto, []proto.Message{taskU0}) So(do(taskU1), ShouldResembleProto, []proto.Message{taskU0, taskU1}) So(do(taskU1), ShouldResembleProto, []proto.Message{taskU0, taskU1}) So(do(taskMinimal), ShouldResembleProto, []proto.Message{taskU0, taskU1, taskMinimal}) }) }) }) } func TestRelatedChangeProcessing(t *testing.T) { t.Parallel() Convey("setGitDeps works", t, func() { ctx := context.Background() f := fetcher{ change: 111, host: "host", toUpdate: changelist.UpdateFields{ Snapshot: &changelist.Snapshot{Kind: &changelist.Snapshot_Gerrit{Gerrit: &changelist.Gerrit{}}}, }, } Convey("No related changes", func() { f.setGitDeps(ctx, nil) So(f.toUpdate.Snapshot.GetGerrit().GetGitDeps(), ShouldBeNil) f.setGitDeps(ctx, []*gerritpb.GetRelatedChangesResponse_ChangeAndCommit{}) So(f.toUpdate.Snapshot.GetGerrit().GetGitDeps(), ShouldBeNil) }) Convey("Just itself", func() { // This isn't happening today, but CV shouldn't choke if Gerrit changes. f.setGitDeps(ctx, []*gerritpb.GetRelatedChangesResponse_ChangeAndCommit{ gf.RelatedChange(111, 3, 3), // No parents. }) So(f.toUpdate.Snapshot.GetGerrit().GetGitDeps(), ShouldBeNil) f.setGitDeps(ctx, []*gerritpb.GetRelatedChangesResponse_ChangeAndCommit{ gf.RelatedChange(111, 3, 3, "107_2"), }) So(f.toUpdate.Snapshot.GetGerrit().GetGitDeps(), ShouldBeNil) }) Convey("Has related, but no deps", func() { f.setGitDeps(ctx, []*gerritpb.GetRelatedChangesResponse_ChangeAndCommit{ gf.RelatedChange(111, 3, 3, "107_2"), gf.RelatedChange(114, 1, 3, "111_3"), gf.RelatedChange(117, 2, 2, "114_1"), }) So(f.toUpdate.Snapshot.GetGerrit().GetGitDeps(), ShouldBeNil) }) Convey("Has related, but lacking this change crbug/1199471", func() { f.setGitDeps(ctx, []*gerritpb.GetRelatedChangesResponse_ChangeAndCommit{ gf.RelatedChange(114, 1, 3, "111_3"), gf.RelatedChange(117, 2, 2, "114_1"), }) So(f.toUpdate.Snapshot.GetErrors(), ShouldHaveLength, 1) So(f.toUpdate.Snapshot.GetErrors()[0].GetCorruptGerritMetadata(), ShouldContainSubstring, "https://crbug.com/1199471") }) Convey("Has related, and several times itself", func() { f.setGitDeps(ctx, []*gerritpb.GetRelatedChangesResponse_ChangeAndCommit{ gf.RelatedChange(111, 2, 2, "107_2"), gf.RelatedChange(111, 3, 3, "107_2"), gf.RelatedChange(114, 1, 3, "111_3"), }) So(f.toUpdate.Snapshot.GetErrors()[0].GetCorruptGerritMetadata(), ShouldContainSubstring, "https://crbug.com/1199471") }) Convey("1 parent", func() { f.setGitDeps(ctx, []*gerritpb.GetRelatedChangesResponse_ChangeAndCommit{ gf.RelatedChange(107, 1, 3, "104_2"), gf.RelatedChange(111, 3, 3, "107_1"), gf.RelatedChange(117, 2, 2, "114_1"), }) So(f.toUpdate.Snapshot.GetGerrit().GetGitDeps(), ShouldResembleProto, []*changelist.GerritGitDep{ {Change: 107, Immediate: true}, }) }) Convey("Diamond", func() { f.setGitDeps(ctx, []*gerritpb.GetRelatedChangesResponse_ChangeAndCommit{ gf.RelatedChange(103, 2, 2), gf.RelatedChange(104, 2, 2, "103_2"), gf.RelatedChange(107, 1, 3, "104_2"), gf.RelatedChange(108, 1, 3, "104_2"), gf.RelatedChange(111, 3, 3, "107_1", "108_1"), gf.RelatedChange(114, 1, 3, "111_3"), gf.RelatedChange(117, 2, 2, "114_1"), }) So(f.toUpdate.Snapshot.GetGerrit().GetGitDeps(), ShouldResembleProto, []*changelist.GerritGitDep{ {Change: 107, Immediate: true}, {Change: 108, Immediate: true}, {Change: 104, Immediate: false}, {Change: 103, Immediate: false}, }) }) Convey("Same revision, different changes", func() { c104 := gf.RelatedChange(104, 1, 1, "103_2") c105 := gf.RelatedChange(105, 1, 1, "103_2") c105.GetCommit().Id = c104.GetCommit().GetId() f.setGitDeps(ctx, []*gerritpb.GetRelatedChangesResponse_ChangeAndCommit{ gf.RelatedChange(103, 2, 2), c104, c105, // should be ignored, somewhat arbitrarily. gf.RelatedChange(111, 3, 3, "104_1"), }) So(f.toUpdate.Snapshot.GetGerrit().GetGitDeps(), ShouldResembleProto, []*changelist.GerritGitDep{ {Change: 104, Immediate: true}, {Change: 103, Immediate: false}, }) }) Convey("2 parents which are the same change at different revisions", func() { // Actually happened, see https://crbug.com/988309. f.setGitDeps(ctx, []*gerritpb.GetRelatedChangesResponse_ChangeAndCommit{ gf.RelatedChange(104, 1, 2, "long-ago-merged1"), gf.RelatedChange(107, 1, 1, "long-ago-merged2"), gf.RelatedChange(104, 2, 2, "107_1"), gf.RelatedChange(111, 3, 3, "104_1", "104_2"), }) So(f.toUpdate.Snapshot.GetGerrit().GetGitDeps(), ShouldResembleProto, []*changelist.GerritGitDep{ {Change: 104, Immediate: true}, {Change: 107, Immediate: false}, }) }) }) } func TestUpdateCLWorks(t *testing.T) { t.Parallel() Convey("Updating CL works", t, func() { ct := cvtesting.Test{} ctx, cancel := ct.SetUp() defer cancel() const lProject = "infra" const gHost = "chromium-review.example.com" const gHostInternal = "internal-review.example.com" const gRepo = "depot_tools" ct.Cfg.Create(ctx, lProject, singleRepoConfig(gHost, gRepo)) gobmap.Update(ctx, lProject) task := &RefreshGerritCL{ LuciProject: lProject, Host: gHost, } pm := pmMock{} rm := rmMock{} u := New(ct.TQDispatcher, &pm, &rm) Convey("No access or permission denied", func() { assertDependentMetaOnly := func(change int) { cl := getCL(ctx, gHost, change) So(cl.Snapshot, ShouldBeNil) So(cl.ApplicableConfig, ShouldBeNil) So(cl.DependentMeta.GetByProject()[lProject].GetUpdateTime().AsTime(), ShouldResemble, ct.Clock.Now().UTC()) } Convey("after getting error from Gerrit", func() { task.Change = 404 So(u.Refresh(ctx, task), ShouldBeNil) assertDependentMetaOnly(404) task.Change = 403 So(u.Refresh(ctx, task), ShouldBeNil) assertDependentMetaOnly(403) }) Convey("because Gerrit host isn't even watched by the LUCI project", func() { // Add a CL readable to current LUCI project. ci := gf.CI(1, gf.Project(gRepo), gf.Ref("refs/heads/main")) ct.GFake.AddFrom(gf.WithCIs(gHost, gf.ACLPublic(), ci)) client, err := gerrit.CurrentClient(ctx, gHost, lProject) So(err, ShouldBeNil) _, err = client.GetChange(ctx, &gerritpb.GetChangeRequest{Number: 1}) So(err, ShouldBeNil) // But update LUCI project config to stop watching entire host. ct.Cfg.Update(ctx, lProject, singleRepoConfig("other-"+gHost, gRepo)) gobmap.Update(ctx, lProject) task.Change = 1 So(u.Refresh(ctx, task), ShouldBeNil) assertDependentMetaOnly(1) }) }) Convey("Unhandled Gerrit error results in no CL update", func() { ci500 := gf.CI(500, gf.Project(gRepo), gf.Ref("refs/heads/main")) Convey("fail to fetch change details", func() { ct.GFake.AddFrom(gf.WithCIs(gHost, err5xx, ci500)) task.Change = 500 So(u.Refresh(ctx, task), ShouldErrLike, "boo") cl := getCL(ctx, gHost, 500) So(cl, ShouldBeNil) }) Convey("fail to get filelist", func() { ct.GFake.AddFrom(gf.WithCIs(gHost, okThenErr5xx(), ci500)) task.Change = 500 So(u.Refresh(ctx, task), ShouldErrLike, "boo") cl := getCL(ctx, gHost, 500) So(cl, ShouldBeNil) }) }) Convey("CL hint must actually exist", func() { task.Change = 123 task.ClidHint = 848484881 So(u.Refresh(ctx, task), ShouldErrLike, "clidHint 848484881 doesn't refer to an existing CL") }) Convey("Fetch for the first time", func() { ci := gf.CI(123, gf.Project(gRepo), gf.Ref("refs/heads/main"), gf.Files("a.cpp", "c/b.py"), gf.Desc("T.\n\nCq-Depend: 101")) ciParent := gf.CI(122, gf.Desc("Z\n\nCq-Depend: must-be-ignored:47")) ciGrandpa := gf.CI(121, gf.Desc("Z\n\nCq-Depend: must-be-ignored:46")) ct.GFake.AddFrom(gf.WithCIs(gHost, gf.ACLPublic(), ci, ciParent, ciGrandpa)) ct.GFake.SetDependsOn(gHost, ci, ciParent) ct.GFake.SetDependsOn(gHost, ciParent, ciGrandpa) task.Change = 123 So(u.Refresh(ctx, task), ShouldBeNil) cl := getCL(ctx, gHost, 123) So(cl.ApplicableConfig.HasOnlyProject(lProject), ShouldBeTrue) So(cl.Snapshot.GetGerrit().GetHost(), ShouldEqual, gHost) So(cl.Snapshot.GetGerrit().Info.GetProject(), ShouldEqual, gRepo) So(cl.Snapshot.GetGerrit().Info.GetRef(), ShouldEqual, "refs/heads/main") So(cl.Snapshot.GetGerrit().GetFiles(), ShouldResemble, []string{"a.cpp", "c/b.py"}) So(cl.Snapshot.GetLuciProject(), ShouldEqual, lProject) So(cl.Snapshot.GetExternalUpdateTime(), ShouldResembleProto, ci.GetUpdated()) So(cl.Snapshot.GetGerrit().GetGitDeps(), ShouldResembleProto, []*changelist.GerritGitDep{ {Change: 122, Immediate: true}, {Change: 121}, }) So(cl.Snapshot.GetGerrit().GetSoftDeps(), ShouldResembleProto, []*changelist.GerritSoftDep{ {Change: 101, Host: gHost}, }) // Each of the dep should have an existing CL + a task schedule. expectedDeps := []*changelist.Dep{ {Clid: int64(getCL(ctx, gHost, 122).ID), Kind: changelist.DepKind_HARD}, {Clid: int64(getCL(ctx, gHost, 121).ID), Kind: changelist.DepKind_SOFT}, {Clid: int64(getCL(ctx, gHost, 101).ID), Kind: changelist.DepKind_SOFT}, } sort.Slice(expectedDeps, func(i, j int) bool { return expectedDeps[i].GetClid() < expectedDeps[j].GetClid() }) So(cl.Snapshot.GetDeps(), ShouldResembleProto, expectedDeps) expectedTasks := []*RefreshGerritCL{ { LuciProject: lProject, Host: gHost, Change: 101, ClidHint: int64(getCL(ctx, gHost, 101).ID), }, { LuciProject: lProject, Host: gHost, Change: 121, ClidHint: int64(getCL(ctx, gHost, 121).ID), }, { LuciProject: lProject, Host: gHost, Change: 122, ClidHint: int64(getCL(ctx, gHost, 122).ID), }, } So(sortedRefreshTasks(ct), ShouldResembleProto, expectedTasks) So(pm.popNotifiedProjects(), ShouldResemble, []string{lProject}) // Simulate Gerrit change being updated with +1s timestamp. ct.GFake.MutateChange(gHost, 123, func(c *gf.Change) { c.Info.Updated.Seconds++ }) Convey("Notify IncompleteRuns", func() { rid1 := common.RunID("chromium/111-1-dead") rid2 := common.RunID("chromium/222-1-beef") cl.Mutate(ctx, func(cl *changelist.CL) (updated bool) { cl.IncompleteRuns = []common.RunID{rid1, rid2} return true }) So(datastore.Put(ctx, cl), ShouldBeNil) So(u.Refresh(ctx, task), ShouldBeNil) So(rm.popNotifiedRuns(), ShouldResemble, common.RunIDs{rid1, rid2}) }) Convey("Skips update with updatedHint", func() { task.UpdatedHint = cl.Snapshot.GetExternalUpdateTime() So(u.Refresh(ctx, task), ShouldBeNil) So(getCL(ctx, gHost, 123).EVersion, ShouldEqual, cl.EVersion) So(pm.popNotifiedProjects(), ShouldBeEmpty) Convey("But notifies PM if explicitly asked to do so", func() { task.ForceNotifyPm = true So(u.Refresh(ctx, task), ShouldBeNil) So(getCL(ctx, gHost, 123).EVersion, ShouldEqual, cl.EVersion) // not touched So(pm.popNotifiedProjects(), ShouldResemble, []string{lProject}) }) }) Convey("Don't update iff fetched less recent than updatedHint ", func() { // Set expectation that Gerrit serves change with >=+1m timestamp. task.UpdatedHint = timestamppb.New( cl.Snapshot.GetExternalUpdateTime().AsTime().Add(time.Minute), ) err := u.Refresh(ctx, task) So(err, ShouldErrLike, "stale Gerrit data") So(transient.Tag.In(err), ShouldBeTrue) So(getCL(ctx, gHost, 123).EVersion, ShouldEqual, cl.EVersion) So(pm.popNotifiedProjects(), ShouldBeEmpty) }) Convey("Heeds updatedHint and updates the CL", func() { // Set expectation that Gerrit serves change with >=+1ms timestamp. task.UpdatedHint = timestamppb.New( cl.Snapshot.GetExternalUpdateTime().AsTime().Add(time.Millisecond), ) ct.GFake.MutateChange(gHost, 123, func(c *gf.Change) { // Only ChangeInfo but not ListFiles and GetRelatedChanges RPCs should // be called. So, ensure 2+ RPCs return 5xx. c.ACLs = okThenErr5xx() }) So(u.Refresh(ctx, task), ShouldBeNil) cl2 := getCL(ctx, gHost, 123) So(cl2.EVersion, ShouldEqual, cl.EVersion+1) So(cl2.Snapshot.GetExternalUpdateTime().AsTime(), ShouldResemble, cl.Snapshot.GetExternalUpdateTime().AsTime().Add(time.Second)) So(pm.popNotifiedProjects(), ShouldResemble, []string{lProject}) Convey("New revision doesn't re-use files & related changes", func() { // Stay within the same blindRefreshInterval for de-duping refresh // tasks of dependencies. ct.Clock.Add(blindRefreshInterval - 2*time.Second) ct.GFake.MutateChange(gHost, 123, func(c *gf.Change) { c.ACLs = gf.ACLPublic() // Simulate new patchset which no longer has GerritGitDeps. gf.PS(10)(c.Info) gf.Files("z.zz")(c.Info) // 101 is from before, internal:477 is new. gf.Desc("T\n\nCq-Depend: 101,internal:477")(c.Info) gf.Updated(ct.Clock.Now())(c.Info) }) task.UpdatedHint = nil So(u.Refresh(ctx, task), ShouldBeNil) cl3 := getCL(ctx, gHost, 123) So(cl3.EVersion, ShouldEqual, cl2.EVersion+1) So(cl3.Snapshot.GetExternalUpdateTime().AsTime(), ShouldResemble, ct.Clock.Now().UTC()) So(cl3.Snapshot.GetGerrit().GetFiles(), ShouldResemble, []string{"z.zz"}) So(cl3.Snapshot.GetGerrit().GetGitDeps(), ShouldBeNil) So(cl3.Snapshot.GetGerrit().GetSoftDeps(), ShouldResembleProto, []*changelist.GerritSoftDep{ {Change: 101, Host: gHost}, {Change: 477, Host: gHostInternal}, }) // For each dep, a task should have been created, but 101 should have // been de-duped with an earlier one. So, only 1 new task for 477: So(sortedRefreshTasks(ct), ShouldResembleProto, append(expectedTasks, &RefreshGerritCL{ LuciProject: lProject, Host: gHostInternal, Change: 477, ClidHint: int64(getCL(ctx, gHostInternal, 477).ID), }, )) So(pm.popNotifiedProjects(), ShouldResemble, []string{lProject}) }) }) Convey("No longer watched", func() { ct.Clock.Add(time.Second) ct.Cfg.Update(ctx, lProject, singleRepoConfig(gHost, "another/repo")) gobmap.Update(ctx, lProject) So(u.Refresh(ctx, task), ShouldBeNil) cl2 := getCL(ctx, gHost, 123) So(cl2.ApplicableConfig, ShouldResembleProto, &changelist.ApplicableConfig{}) So(cl2.EVersion, ShouldEqual, cl.EVersion+1) // Snapshot is preserved (handy, if this is temporal misconfiguration). So(cl2.Snapshot, ShouldResembleProto, cl.Snapshot) // PM is still notified. So(pm.popNotifiedProjects(), ShouldResemble, []string{lProject}) }) Convey("Watched by a diff project", func() { ct.Clock.Add(time.Second) const lProject2 = "proj-2" ct.Cfg.Update(ctx, lProject, singleRepoConfig(gHost, "another repo")) ct.Cfg.Create(ctx, lProject2, singleRepoConfig(gHost, gRepo)) gobmap.Update(ctx, lProject) gobmap.Update(ctx, lProject2) // Use a hint that'd normally prevent an update. task.UpdatedHint = cl.Snapshot.GetExternalUpdateTime() task.LuciProject = lProject2 Convey("with access", func() { So(u.Refresh(ctx, task), ShouldBeNil) cl2 := getCL(ctx, gHost, 123) So(cl2.EVersion, ShouldEqual, cl.EVersion+1) So(cl2.Snapshot.GetLuciProject(), ShouldEqual, lProject2) So(cl2.Snapshot.GetExternalUpdateTime(), ShouldResemble, ct.GFake.GetChange(gHost, 123).Info.GetUpdated()) So(cl2.ApplicableConfig.HasOnlyProject(lProject2), ShouldBeTrue) // A different PM is notified. So(pm.popNotifiedProjects(), ShouldResemble, []string{lProject2}) }) Convey("without access", func() { ct.GFake.MutateChange(gHost, 123, func(c *gf.Change) { c.ACLs = gf.ACLRestricted("not-lProject2") }) So(u.Refresh(ctx, task), ShouldBeNil) cl2 := getCL(ctx, gHost, 123) So(cl2.EVersion, ShouldEqual, cl.EVersion+1) // Snapshot is kept as is, including its ExternalUpdateTime. So(cl2.Snapshot, ShouldResembleProto, cl.Snapshot) So(cl2.ApplicableConfig.HasOnlyProject(lProject2), ShouldBeTrue) So(cl2.DependentMeta.GetByProject()[lProject2].GetNoAccess(), ShouldBeTrue) // A different PM is notified anyway. So(pm.popNotifiedProjects(), ShouldResemble, []string{lProject2}) }) }) }) Convey("Fetch dep after bare CL was crated", func() { eid, err := changelist.GobID(gHost, 101) So(err, ShouldBeNil) cl, err := eid.GetOrInsert(ctx, func(cl *changelist.CL) {}) So(err, ShouldBeNil) So(cl.EVersion, ShouldEqual, 1) ci := gf.CI(101, gf.Project(gRepo), gf.Ref("refs/heads/main")) ct.GFake.AddFrom(gf.WithCIs(gHost, gf.ACLPublic(), ci)) task.Change = 101 task.ClidHint = int64(cl.ID) So(u.Refresh(ctx, task), ShouldBeNil) cl2 := getCL(ctx, gHost, 101) So(cl2.EVersion, ShouldEqual, 2) changelist.RemoveUnusedGerritInfo(ci) So(cl2.Snapshot.GetGerrit().GetInfo(), ShouldResembleProto, ci) So(pm.popNotifiedProjects(), ShouldResemble, []string{lProject}) }) }) } func getCL(ctx context.Context, host string, change int) *changelist.CL { eid, err := changelist.GobID(host, int64(change)) So(err, ShouldBeNil) cl, err := eid.Get(ctx) if err == datastore.ErrNoSuchEntity { return nil } So(err, ShouldBeNil) return cl } func singleRepoConfig(gHost string, gRepos ...string) *cfgpb.Config { projects := make([]*cfgpb.ConfigGroup_Gerrit_Project, len(gRepos)) for i, gRepo := range gRepos { projects[i] = &cfgpb.ConfigGroup_Gerrit_Project{ Name: gRepo, RefRegexp: []string{"refs/heads/main"}, } } return &cfgpb.Config{ ConfigGroups: []*cfgpb.ConfigGroup{ { Name: "main", Gerrit: []*cfgpb.ConfigGroup_Gerrit{ { Url: "https://" + gHost + "/", Projects: projects, }, }, }, }, } } func err5xx(gf.Operation, string) *status.Status { return status.New(codes.Internal, "boo") } func okThenErr5xx() gf.AccessCheck { calls := int32(0) return func(o gf.Operation, p string) *status.Status { if atomic.AddInt32(&calls, 1) == 1 { return status.New(codes.OK, "") } else { return err5xx(o, p) } } } func sortedRefreshTasks(ct cvtesting.Test) []*RefreshGerritCL { ret := make([]*RefreshGerritCL, 0, len(ct.TQ.Tasks().Payloads())) for _, m := range ct.TQ.Tasks().Payloads() { v, ok := m.(*RefreshGerritCL) if ok { ret = append(ret, v) } } sort.SliceStable(ret, func(i, j int) bool { return ret[i].less(ret[j]) }) return ret } func (l *RefreshGerritCL) less(r *RefreshGerritCL) bool { switch { case l.GetHost() < r.GetHost(): return true case l.GetHost() > r.GetHost(): return false case l.GetChange() < r.GetChange(): return true case l.GetChange() > r.GetChange(): return false case l.GetLuciProject() < r.GetLuciProject(): return true case l.GetLuciProject() > r.GetLuciProject(): return false default: return l.GetUpdatedHint().AsTime().Before(r.GetUpdatedHint().AsTime()) } } type pmMock struct { projects []string m sync.Mutex } func (p *pmMock) NotifyCLUpdated(ctx context.Context, project string, cl common.CLID, eversion int) error { p.m.Lock() p.projects = append(p.projects, project) p.m.Unlock() return nil } func (p *pmMock) popNotifiedProjects() (res []string) { p.m.Lock() res, p.projects = p.projects, nil p.m.Unlock() sort.Strings(res) return } type rmMock struct { runs common.RunIDs m sync.Mutex } func (r *rmMock) NotifyCLUpdated(ctx context.Context, rid common.RunID, cl common.CLID, eversion int) error { r.m.Lock() r.runs = append(r.runs, rid) r.m.Unlock() return nil } func (r *rmMock) popNotifiedRuns() (res common.RunIDs) { r.m.Lock() res, r.runs = r.runs, nil r.m.Unlock() sort.Sort(res) return res }
//给定一个整数数组 prices,其中第 i 个元素代表了第 i 天的股票价格 ;prices = [1, 3, 2, 8, 4, 9], fee = 2 //结果:能够达到的最大利润: 该怎么买进卖出 package main import ( "fmt" "math" ) func main(){ prices := []float64{1, 3, 2, 8, 4, 9} fmt.Println(maxProfit(prices,2)) } func maxProfit(prices []float64,fee float64) float64{ n := len(prices) if n<1 { return 0 } buy := -prices[0] cash :=0.0 for i:=1;i<n;i++{ cash = math.Max(cash,float64(buy+prices[i]-fee)) buy = math.Max(buy,cash-prices[i]) } return cash }
package main import ( "fmt" // "os" // "os/user" // "go-interpreter/repl" "go-interpreter/ast" "go-interpreter/token" ) func main() { Statements := []ast.Statement{} t := token.Token{token.EOF, ""} i := &ast.Identifier{t, "justRandom"} statement1 := &ast.LetStatement{Token: t} append(Statements, statement1) fmt.Printf(Statements) // user, err := user.Current() // if err != nil { // panic(err) // } // fmt.Printf("Hello %s! This is the Monkey programming language.\n", user.Username) // fmt.Printf("Feel free to type in commands\n") // repl.Start(os.Stdin, os.Stdout) }
package tree import ( "log" ) type Cluster struct{ Cluster_id int Num_of_points int ListOfUnits map[Range]*Unit } func (c *Cluster) GetCenter() Point { var Center_vec []float64 for _, unit := range c.ListOfUnits{ if len(unit.Points) > 0 && Center_vec == nil{ for _, p := range unit.Points{ Center_vec = make([]float64, p.Dim()) break } } for _, p := range unit.Points { for i := 0; i < p.Dim(); i++ { Center_vec[i] = Center_vec[i] + p.GetValue(i) } } } for i, _ := range Center_vec { Center_vec[i] = Center_vec[i] / float64(c.Num_of_points) } pc := Point{Vec: Center_vec} return pc } func (c *Cluster) GetUnits()map[Range]*Unit{ return c.ListOfUnits } func (c *Cluster) GetNumberOfPoints() int{ tmp := 0 for _, unit := range c.ListOfUnits{ tmp += unit.GetNumberOfPoints() } return tmp } func ValidateCluster(c Cluster) bool{ ret := true if len(c.ListOfUnits) == 1{ ret = true } else{ for _, unit := range c.ListOfUnits{ neighbour_present := false for _, neigh_unit := range unit.GetNeighbouringUnits(){ _, ok := c.ListOfUnits[neigh_unit.Range] if ok{ neighbour_present = true break } } if neighbour_present == false{ log.Printf("Neighbour not present for cluster: %v \n", c.Cluster_id) for _, unit := range c.ListOfUnits{ log.Printf("Unit Id: %v Range: %+v", unit.Id, unit.Range) for _, neigh_unit := range unit.GetNeighbouringUnits(){ log.Printf("\tUnit Id: %v Range: %+v", neigh_unit.Id, neigh_unit.Range) } log.Printf("\n") } ret = false break } } } return ret }
package pipeline import ( "strconv" "strings" "github.com/project-flogo/core/data/path" ) type scopeImpl struct { values map[string]interface{} } func NewPipelineScope(input map[string]interface{}) *scopeImpl { if input != nil { return &scopeImpl{values: input} } values := make(map[string]interface{}) return &scopeImpl{values: values} } func (s *scopeImpl) GetValue(name string) (value interface{}, exists bool) { val, ok := s.values[name] if !ok { return nil, false } return val, true } //Check if the name resolves to existing values in scope //If not then set a new value func (s *scopeImpl) SetValue(name string, value interface{}) error { if strings.Contains(name, "[") { path.SetValue(s.values, getPath(name), value) } else { s.values[name] = value } return nil } func getPath(name string) string { var result string for _, val := range strings.Split(name, "[") { temp := strings.TrimFunc(val, func(r rune) bool { if r == '\'' || r == ']' { return true } return false }) if _, err := strconv.Atoi(temp); err == nil { result = result + "[" + temp + "]" } else { result = result + "." + temp } } return result }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package validating delegates admission checks to dynamically configured // validating webhooks. package validating import ( "context" "fmt" "io" "sync" "time" "github.com/golang/glog" admissionv1alpha1 "k8s.io/api/admission/v1alpha1" "k8s.io/api/admissionregistration/v1alpha1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/admission/configuration" genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer" "k8s.io/apiserver/pkg/admission/plugin/webhook/config" "k8s.io/apiserver/pkg/admission/plugin/webhook/rules" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" ) const ( // Name of admission plug-in PluginName = "GenericAdmissionWebhook" ) // Register registers a plugin func Register(plugins *admission.Plugins) { plugins.Register(PluginName, func(configFile io.Reader) (admission.Interface, error) { plugin, err := NewGenericAdmissionWebhook(configFile) if err != nil { return nil, err } return plugin, nil }) } // WebhookSource can list dynamic webhook plugins. type WebhookSource interface { Run(stopCh <-chan struct{}) Webhooks() (*v1alpha1.ValidatingWebhookConfiguration, error) } // NewGenericAdmissionWebhook returns a generic admission webhook plugin. func NewGenericAdmissionWebhook(configFile io.Reader) (*GenericAdmissionWebhook, error) { kubeconfigFile, err := config.LoadConfig(configFile) if err != nil { return nil, err } cm, err := config.NewClientManager() if err != nil { return nil, err } authInfoResolver, err := config.NewDefaultAuthenticationInfoResolver(kubeconfigFile) if err != nil { return nil, err } // Set defaults which may be overridden later. cm.SetAuthenticationInfoResolver(authInfoResolver) cm.SetServiceResolver(config.NewDefaultServiceResolver()) return &GenericAdmissionWebhook{ Handler: admission.NewHandler( admission.Connect, admission.Create, admission.Delete, admission.Update, ), clientManager: cm, }, nil } // GenericAdmissionWebhook is an implementation of admission.Interface. type GenericAdmissionWebhook struct { *admission.Handler hookSource WebhookSource namespaceLister corelisters.NamespaceLister client clientset.Interface convertor runtime.ObjectConvertor creator runtime.ObjectCreater clientManager config.ClientManager } var ( _ = genericadmissioninit.WantsExternalKubeClientSet(&GenericAdmissionWebhook{}) ) // TODO find a better way wire this, but keep this pull small for now. func (a *GenericAdmissionWebhook) SetAuthenticationInfoResolverWrapper(wrapper config.AuthenticationInfoResolverWrapper) { a.clientManager.SetAuthenticationInfoResolverWrapper(wrapper) } // SetServiceResolver sets a service resolver for the webhook admission plugin. // Passing a nil resolver does not have an effect, instead a default one will be used. func (a *GenericAdmissionWebhook) SetServiceResolver(sr config.ServiceResolver) { a.clientManager.SetServiceResolver(sr) } // SetScheme sets a serializer(NegotiatedSerializer) which is derived from the scheme func (a *GenericAdmissionWebhook) SetScheme(scheme *runtime.Scheme) { if scheme != nil { a.clientManager.SetNegotiatedSerializer(serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{ Serializer: serializer.NewCodecFactory(scheme).LegacyCodec(admissionv1alpha1.SchemeGroupVersion), })) a.convertor = scheme a.creator = scheme } } // WantsExternalKubeClientSet defines a function which sets external ClientSet for admission plugins that need it func (a *GenericAdmissionWebhook) SetExternalKubeClientSet(client clientset.Interface) { a.client = client a.hookSource = configuration.NewValidatingWebhookConfigurationManager(client.AdmissionregistrationV1alpha1().ValidatingWebhookConfigurations()) } // SetExternalKubeInformerFactory implements the WantsExternalKubeInformerFactory interface. func (a *GenericAdmissionWebhook) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { namespaceInformer := f.Core().V1().Namespaces() a.namespaceLister = namespaceInformer.Lister() a.SetReadyFunc(namespaceInformer.Informer().HasSynced) } // ValidateInitialization implements the InitializationValidator interface. func (a *GenericAdmissionWebhook) ValidateInitialization() error { if a.hookSource == nil { return fmt.Errorf("the GenericAdmissionWebhook admission plugin requires a Kubernetes client to be provided") } if a.namespaceLister == nil { return fmt.Errorf("the GenericAdmissionWebhook admission plugin requires a namespaceLister") } if err := a.clientManager.Validate(); err != nil { return fmt.Errorf("the GenericAdmissionWebhook.clientManager is not properly setup: %v", err) } go a.hookSource.Run(wait.NeverStop) return nil } func (a *GenericAdmissionWebhook) loadConfiguration(attr admission.Attributes) (*v1alpha1.ValidatingWebhookConfiguration, error) { hookConfig, err := a.hookSource.Webhooks() // if Webhook configuration is disabled, fail open if err == configuration.ErrDisabled { return &v1alpha1.ValidatingWebhookConfiguration{}, nil } if err != nil { e := apierrors.NewServerTimeout(attr.GetResource().GroupResource(), string(attr.GetOperation()), 1) e.ErrStatus.Message = fmt.Sprintf("Unable to refresh the Webhook configuration: %v", err) e.ErrStatus.Reason = "LoadingConfiguration" e.ErrStatus.Details.Causes = append(e.ErrStatus.Details.Causes, metav1.StatusCause{ Type: "ValidatingWebhookConfigurationFailure", Message: "An error has occurred while refreshing the ValidatingWebhook configuration, no resources can be created/updated/deleted/connected until a refresh succeeds.", }) return nil, e } return hookConfig, nil } // TODO: move this object to a common package type versionedAttributes struct { admission.Attributes oldObject runtime.Object object runtime.Object } func (v versionedAttributes) GetObject() runtime.Object { return v.object } func (v versionedAttributes) GetOldObject() runtime.Object { return v.oldObject } // TODO: move this method to a common package func (a *GenericAdmissionWebhook) convertToGVK(obj runtime.Object, gvk schema.GroupVersionKind) (runtime.Object, error) { // Unlike other resources, custom resources do not have internal version, so // if obj is a custom resource, it should not need conversion. if obj.GetObjectKind().GroupVersionKind() == gvk { return obj, nil } out, err := a.creator.New(gvk) if err != nil { return nil, err } err = a.convertor.Convert(obj, out, nil) if err != nil { return nil, err } return out, nil } // Admit makes an admission decision based on the request attributes. func (a *GenericAdmissionWebhook) Admit(attr admission.Attributes) error { hookConfig, err := a.loadConfiguration(attr) if err != nil { return err } hooks := hookConfig.Webhooks ctx := context.TODO() var relevantHooks []*v1alpha1.Webhook for i := range hooks { call, err := a.shouldCallHook(&hooks[i], attr) if err != nil { return err } if call { relevantHooks = append(relevantHooks, &hooks[i]) } } if len(relevantHooks) == 0 { // no matching hooks return nil } // convert the object to the external version before sending it to the webhook versionedAttr := versionedAttributes{ Attributes: attr, } if oldObj := attr.GetOldObject(); oldObj != nil { out, err := a.convertToGVK(oldObj, attr.GetKind()) if err != nil { return apierrors.NewInternalError(err) } versionedAttr.oldObject = out } if obj := attr.GetObject(); obj != nil { out, err := a.convertToGVK(obj, attr.GetKind()) if err != nil { return apierrors.NewInternalError(err) } versionedAttr.object = out } wg := sync.WaitGroup{} errCh := make(chan error, len(relevantHooks)) wg.Add(len(relevantHooks)) for i := range relevantHooks { go func(hook *v1alpha1.Webhook) { defer wg.Done() t := time.Now() err := a.callHook(ctx, hook, versionedAttr) admission.Metrics.ObserveWebhook(time.Since(t), err != nil, hook, attr) if err == nil { return } ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == v1alpha1.Ignore if callErr, ok := err.(*config.ErrCallingWebhook); ok { if ignoreClientCallFailures { glog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) utilruntime.HandleError(callErr) // Since we are failing open to begin with, we do not send an error down the channel return } glog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) errCh <- apierrors.NewInternalError(err) return } glog.Warningf("rejected by webhook %q: %#v", hook.Name, err) errCh <- err }(relevantHooks[i]) } wg.Wait() close(errCh) var errs []error for e := range errCh { errs = append(errs, e) } if len(errs) == 0 { return nil } if len(errs) > 1 { for i := 1; i < len(errs); i++ { // TODO: merge status errors; until then, just return the first one. utilruntime.HandleError(errs[i]) } } return errs[0] } // TODO: move this method to a common package func (a *GenericAdmissionWebhook) getNamespaceLabels(attr admission.Attributes) (map[string]string, error) { // If the request itself is creating or updating a namespace, then get the // labels from attr.Object, because namespaceLister doesn't have the latest // namespace yet. // // However, if the request is deleting a namespace, then get the label from // the namespace in the namespaceLister, because a delete request is not // going to change the object, and attr.Object will be a DeleteOptions // rather than a namespace object. if attr.GetResource().Resource == "namespaces" && len(attr.GetSubresource()) == 0 && (attr.GetOperation() == admission.Create || attr.GetOperation() == admission.Update) { accessor, err := meta.Accessor(attr.GetObject()) if err != nil { return nil, err } return accessor.GetLabels(), nil } namespaceName := attr.GetNamespace() namespace, err := a.namespaceLister.Get(namespaceName) if err != nil && !apierrors.IsNotFound(err) { return nil, err } if apierrors.IsNotFound(err) { // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not namespace, err = a.client.Core().Namespaces().Get(namespaceName, metav1.GetOptions{}) if err != nil { return nil, err } } return namespace.Labels, nil } // TODO: move this method to a common package // whether the request is exempted by the webhook because of the // namespaceSelector of the webhook. func (a *GenericAdmissionWebhook) exemptedByNamespaceSelector(h *v1alpha1.Webhook, attr admission.Attributes) (bool, *apierrors.StatusError) { namespaceName := attr.GetNamespace() if len(namespaceName) == 0 && attr.GetResource().Resource != "namespaces" { // If the request is about a cluster scoped resource, and it is not a // namespace, it is exempted from all webhooks for now. // TODO: figure out a way selective exempt cluster scoped resources. // Also update the comment in types.go return true, nil } namespaceLabels, err := a.getNamespaceLabels(attr) // this means the namespace is not found, for backwards compatibility, // return a 404 if apierrors.IsNotFound(err) { status, ok := err.(apierrors.APIStatus) if !ok { return false, apierrors.NewInternalError(err) } return false, &apierrors.StatusError{status.Status()} } if err != nil { return false, apierrors.NewInternalError(err) } // TODO: adding an LRU cache to cache the translation selector, err := metav1.LabelSelectorAsSelector(h.NamespaceSelector) if err != nil { return false, apierrors.NewInternalError(err) } return !selector.Matches(labels.Set(namespaceLabels)), nil } // TODO: move this method to a common package func (a *GenericAdmissionWebhook) shouldCallHook(h *v1alpha1.Webhook, attr admission.Attributes) (bool, *apierrors.StatusError) { var matches bool for _, r := range h.Rules { m := rules.Matcher{Rule: r, Attr: attr} if m.Matches() { matches = true break } } if !matches { return false, nil } excluded, err := a.exemptedByNamespaceSelector(h, attr) if err != nil { return false, err } return !excluded, nil } func (a *GenericAdmissionWebhook) callHook(ctx context.Context, h *v1alpha1.Webhook, attr admission.Attributes) error { // Make the webhook request request := createAdmissionReview(attr) client, err := a.clientManager.HookClient(h) if err != nil { return &config.ErrCallingWebhook{WebhookName: h.Name, Reason: err} } response := &admissionv1alpha1.AdmissionReview{} if err := client.Post().Context(ctx).Body(&request).Do().Into(response); err != nil { return &config.ErrCallingWebhook{WebhookName: h.Name, Reason: err} } if response.Status.Allowed { return nil } return toStatusErr(h.Name, response.Status.Result) } // TODO: move this function to a common package // toStatusErr returns a StatusError with information about the webhook controller func toStatusErr(name string, result *metav1.Status) *apierrors.StatusError { deniedBy := fmt.Sprintf("admission webhook %q denied the request", name) const noExp = "without explanation" if result == nil { result = &metav1.Status{Status: metav1.StatusFailure} } switch { case len(result.Message) > 0: result.Message = fmt.Sprintf("%s: %s", deniedBy, result.Message) case len(result.Reason) > 0: result.Message = fmt.Sprintf("%s: %s", deniedBy, result.Reason) default: result.Message = fmt.Sprintf("%s %s", deniedBy, noExp) } return &apierrors.StatusError{ ErrStatus: *result, } }
// Copyright © Copyright 2020 Orion Labs, Inc. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "fmt" "github.com/orion-labs/certinator/pkg/certinator" "github.com/spf13/cobra" "log" ) func init() { CertCmd.AddCommand(CertListCmd) } var CertListCmd = &cobra.Command{ Use: "list", Short: "List Certificates", Long: ` List Certificates `, Run: func(cmd *cobra.Command, args []string) { c, err := certinator.NewCertinator(verbose) if err != nil { log.Fatalf("Error creating Certinator: %s", err) } if len(args) > 0 { if caName == "" { caName = args[0] } } roottoken, err := c.UsingRootToken() if err != nil { log.Fatalf("failed checking own token: %s", err) } if roottoken { fmt.Print("You are currently using the root token. You should not be doing this unless it's really necessary.\n\n") } certs, err := c.ListCerts(caName) if err != nil { log.Fatalf("error listing certificates on CA %s: %s", caName, err) } if len(certs) == 0 { fmt.Printf("No Certificates created in CA %s\n", caName) return } fmt.Printf("Certificates in CA %s:\n", caName) for _, c := range certs { fmt.Printf(" %s\n", c) } }, }
package tydaapi import ( "fmt" "log" "net/url" "strings" "github.com/PuerkitoBio/goquery" ) // Languages is a map from short to long form of languages var Languages = map[string]string{ "sv": "Svenska", "en": "Engelska", "fr": "Franska", "de": "Tyska", "es": "Spanska", "la": "Latin", "nb": "Norska", } // Search makes a search on the tyda.se webpage and parses the result // into the Response structure. Returns a response and nil, or nil and an error. // Languages parameter uses the short form, eg. sv, en es. func Search(query string, languages []string) (*Response, error) { u, err := BuildURL(query, languages) // Parse search reslult doc, err := goquery.NewDocument(u.String()) if err != nil { log.Fatal(err) } // Limit selection on page res := doc.Find(".box-searchresult").First() ret := Parse(res) return ret, nil } // Parse takes a response and a goquery selection and parses data from the // selection into the response structure. func Parse(doc *goquery.Selection) *Response { response := &Response{} setSearchTermAndPronunciationURL(response, doc) setLanguage(response, doc) setConjugations(response, doc) setWordClass(response, doc) setTranslations(response, doc) setSynonyms(response, doc) return response } // BuildURL retuns a URL for tyda.se with correct format func BuildURL(query string, languages []string) (*url.URL, error) { u, err := url.Parse("http://tyda.se/search/" + query) if err != nil { return nil, err } q := u.Query() for i, v := range languages { q.Set(fmt.Sprintf("lang[%d]", i), v) } u.RawQuery = q.Encode() return u, nil } func setLanguage(ret *Response, res *goquery.Selection) { id, _ := res.Find("h2").First().Attr("id") spl := strings.Split(id, "-") ret.Language = Languages[spl[0]] } func setSearchTermAndPronunciationURL(ret *Response, res *goquery.Selection) { h2 := res.Find("h2").First() s := h2.Find("b").First() ret.SearchTerm = strings.TrimSpace(s.Text()) p, _ := h2.Find(".speaker").First().Attr("href") if p != "" { ret.PronunciationURL = "http://tyda.se" + p } } func setConjugations(ret *Response, res *goquery.Selection) { conjugations := res.Find(".conjugation") conjugations.Each(func(i int, s *goquery.Selection) { if !s.HasClass("missing") { ret.Conjugations = append(ret.Conjugations, strings.TrimSpace(s.Text())) } }) } func setWordClass(ret *Response, res *goquery.Selection) { w := res.Find(".word-class").First() ret.WordClass = strings.TrimSpace(w.Text()) } func setTranslations(ret *Response, res *goquery.Selection) { var translations []Translation var t Translation res.Find(".capsulated-content").Each(func(i int, c *goquery.Selection) { c.Find(".list-translations").Each(func(i int, tr *goquery.Selection) { tr.Find(".item").Each(func(i int, s *goquery.Selection) { if s.HasClass("item-title") { if t.Language != "" { d := c.Find(".description").First() t.Description = strings.TrimSpace(d.Text()) translations = append(translations, t) } t = Translation{Language: strings.TrimSpace(s.Text()), Words: make([]Word, 0)} } else { w := Word{Value: s.Find("a").First().Text()} w.Context = strings.Trim(s.Find(".trans-desc").First().Text(), " \n\t[]") p, _ := s.Find(".speaker").First().Attr("href") if p != "" { w.PronunciationURL = "http://tyda.se" + p } w.DictionaryURL, _ = s.Find(".mm").First().Attr("href") t.Words = append(t.Words, w) } }) }) }) if t.Language != "" { translations = append(translations, t) } ret.Translations = translations } func setSynonyms(ret *Response, res *goquery.Selection) { sy := res.Find(".list-synonyms").First() var synonyms []Word sy.Find(".item").Each(func(i int, s *goquery.Selection) { w := Word{Value: s.Find("a").First().Text()} w.Context = strings.Trim(s.Find(".syn-desc").First().Text(), " \n\t[]") synonyms = append(synonyms, w) }) ret.Synonyms = synonyms }
package leetcode_go func twoSum(nums []int, target int) []int { m := make(map[int]int) for idx, num := range nums { if anotherIdx, ok := m[num]; ok { return []int{idx, anotherIdx} } m[target-num] = idx } return []int{} }
package utils import ( "log" "regexp" ) // Str2Uint string表示的数字大于0 func Str2Uint(str string) int { res := 0 for _, v := range str { if '0' <= v && v <= '9' { res = res*10 + int(v) - '0' } else { log.Fatal("this string is not a pure num") return 0 } } return res } func RegFind(pattern, targetString string, index ...int) []string { res := make([]string, 0) reg := regexp.MustCompile(pattern) match := reg.FindStringSubmatch(targetString) if len(match) == 0 { return res } //fmt.Printf("%#v\n",match) //fmt.Println(len(match)) for _, v := range index { res = append(res, match[v]) } return res }
package prov import ( "fmt" "io" ) type AccessedPath struct { AccessID string RunID int64 Path string PathIndex int64 PathRole string } func GetAccessedPaths(executed []Execution, opens []FileOpen) []AccessedPath { var accessed []AccessedPath for _, e := range executed { fileIndex, _ := PathIndex(e.Name) runID := e.RunID path := TrimWorkingDirPrefix(e.Name) role := Role(path) f := AccessedPath{E(e.ExecID), runID, path, fileIndex, role} accessed = append(accessed, f) } for _, o := range opens { fileIndex, _ := PathIndex(o.Name) runID := o.RunID path := TrimWorkingDirPrefix(o.Name) role := Role(path) f := AccessedPath{O(o.OpenID), runID, path, fileIndex, role} accessed = append(accessed, f) } return accessed } func WriteAccessedPathFacts(w io.Writer, accessed []AccessedPath) { printRowHeader(w, "wt_accessed_path(AccessID, RunId, Path, PathIndex, PathRole).") for _, f := range accessed { fmt.Fprintln(w, f) } } func (f AccessedPath) String() string { return fmt.Sprintf("wt_accessed_path(%s, %s, %s, %s, %s).", f.AccessID, R(f.RunID), Q(f.Path), I(f.PathIndex), f.PathRole) }
package content import ( "io" "github.com/webnice/transport/v3/data" "golang.org/x/text/encoding" ) // Interface is an interface of package type Interface interface { io.WriterTo // Transcode is an transcoding content from the specified encoding to UTF-8 Transcode(e encoding.Encoding) Interface // Transform is an transforming content using a custom function Transform(fn TransformFunc) Interface // String Return content as string String() (string, error) // Bytes Return content as []byte Bytes() ([]byte, error) // UnmarshalJSON Decoding content like JSON UnmarshalJSON(o interface{}) error // UnmarshalXML Decoding content like XML UnmarshalXML(o interface{}) error // UnTar Разархивация контента методом TAR UnTar() Interface // UnZip Разархивация контента методом ZIP (извлекается только первый файл) UnZip() Interface // UnGzip Разархивация контента методом GZIP UnGzip() Interface // UnFlate Разархивация контента методом FLATE UnFlate() Interface // BackToBegin Returns the content reading pointer to the beginning // This allows you to repeat the work with content BackToBegin() error } // TransformFunc is an func for streaming content conversion type TransformFunc func(r io.Reader) (io.Reader, error) // impl is an implementation of package type impl struct { esence data.ReadAtSeekerWriteToCloser // Данные контента rdc io.ReadCloser // Интерфейс transcode encoding.Encoding // Если не равно nil, то контент перекодируется на лету из указанной кодировки transform TransformFunc // Функция потокового преобразования контента unzip bool // =true - контент разархивируется алгоритмом сжатия ZIP, возвращается первый файл в архиве untar bool // =true - контент разархивируется алгоритмом сжатия TAR ungzip bool // =true - контент разархивируется алгоритмом сжатия GZIP unflate bool // =true - контент разархивируется алгоритмом сжатия FLATE }
package main import "github.com/gin-gonic/gin" func main() { g := gin.Default() g.GET("/gin", func(context *gin.Context) { context.JSON(200,gin.H{"message":"hello gin"}) }) g.Run() }
package mail import ( "fmt" "gopkg.in/gomail.v2" "time" ) type SMTPConfig struct { Host string Port int Username string Password string SSL bool } type SMTPSender struct { config SMTPConfig } func NewSMTPSender(config SMTPConfig) *SMTPSender { return &SMTPSender{config} } func (m *SMTPSender) Send(messages <-chan Message, results chan<- MessageProcessingResult) error { go func() { d := gomail.NewDialer(m.config.Host, m.config.Port, m.config.Username, m.config.Password) s, err := d.Dial() if err != nil { fmt.Printf("error on connect %v", err) } counter := 0 for msg := range messages { //fmt.Printf("msg: %+v\n", msg) err := gomail.Send(s, gomailMessage(msg)) results <- MessageProcessingResult{ msg.MailingID, msg.To, err, } counter++ if err != nil || counter%20 == 0 { fmt.Printf("reconnect ...\n") s.Close() time.Sleep(time.Second) d = gomail.NewDialer(m.config.Host, m.config.Port, m.config.Username, m.config.Password) s, err = d.Dial() if err != nil { fmt.Printf("error on reconnect %v\n", err) } } time.Sleep(150 * time.Millisecond) } close(results) s.Close() }() return nil } func gomailMessage(message Message) *gomail.Message { m := gomail.NewMessage() m.SetHeader("From", message.From) m.SetHeader("To", message.To) m.SetHeader("Subject", message.Subject) if message.TextBody != "" { m.SetBody("text/plain", message.TextBody) } if message.HtmlBody != "" { m.AddAlternative("text/html", message.HtmlBody) } return m }
package routers import ( "github.com/gin-gonic/gin" "go-admin-starter/controllers/api" "go-admin-starter/controllers/api/v1" "go-admin-starter/middleware/authrole" "go-admin-starter/middleware/jwt" ) func initApiRouter(r *gin.Engine) *gin.Engine { apiGroup := r.Group("/api") { apiGroup.POST("/auth", api.GetAuth) apiGroup.POST("/register", api.Register) apiGroup.POST("/refreshToken", api.RefreshToken)//前后台共用 apiv1 := apiGroup.Group("/v1") apiv1.Use(jwt.JWTAuth()) { //获取标签列表 apiv1.GET("/tags", v1.GetTags) //新建标签 apiv1.POST("/tags", v1.AddTag) //更新指定标签 apiv1.PUT("/tags/:id", v1.EditTag) //删除指定标签 apiv1.DELETE("/tags/:id", v1.DeleteTag) //获取文章列表 apiv1.GET("/articles", v1.GetArticles) //获取指定文章 apiv1.GET("/articles/:id", v1.GetArticle) //新建文章 apiv1.POST("/articles", v1.AddArticle) //更新指定文章 apiv1.PUT("/articles/:id", v1.EditArticle) //删除指定文章 apiv1.DELETE("/articles/:id", v1.DeleteArticle) } //后台管理api admin := apiGroup.Group("/admin") admin.POST("/auth", api.AdminGetAuth) admin.Use(jwt.JWTAuth()) admin.Use(authrole.AuthCheckRole()) { admin.POST("/addrole", v1.AddCasbin)//添加角色权限 } } return r }
package server import ( "net/http" "github.com/cnrancher/autok3s/pkg/server/ui" "github.com/gorilla/mux" responsewriter "github.com/rancher/apiserver/pkg/middleware" "github.com/rancher/apiserver/pkg/server" "github.com/rancher/apiserver/pkg/store/apiroot" "github.com/rancher/apiserver/pkg/types" // pprof "net/http/pprof" ) func Start() http.Handler { s := server.DefaultAPIServer() initMutual(s.Schemas) initProvider(s.Schemas) initCluster(s.Schemas) initCredential(s.Schemas) initKubeconfig(s.Schemas) initLogs(s.Schemas) initTemplates(s.Schemas) apiroot.Register(s.Schemas, []string{"v1"}) router := mux.NewRouter() router.UseEncodedPath() router.StrictSlash(true) middleware := responsewriter.Chain{ responsewriter.Gzip, responsewriter.DenyFrameOptions, responsewriter.CacheMiddleware("json", "js", "css", "svg", "png", "woff", "woff2"), ui.ServeNotFound, } router.PathPrefix("/ui/").Handler(middleware.Handler(http.StripPrefix("/ui/", ui.Serve()))) router.Path("/").HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { http.Redirect(rw, req, "/ui/", http.StatusFound) }) // profiling handlers for pprof under /debug/pprof router.HandleFunc("/debug/pprof/", pprof.Index) router.HandleFunc("/debug/pprof/trace", pprof.Trace) router.HandleFunc("/debug/pprof/profile", pprof.Profile) router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) // Manually add support for paths linked to by index page at /debug/pprof/ router.Handle("/debug/pprof/goroutine", pprof.Handler("goroutine")) router.Handle("/debug/pprof/heap", pprof.Handler("heap")) router.Handle("/debug/pprof/threadcreate", pprof.Handler("threadcreate")) router.Handle("/debug/pprof/block", pprof.Handler("block")) router.Handle("/debug/pprof/mutex", pprof.Handler("mutex")) router.Path("/{prefix}/{type}").Handler(s) router.Path("/{prefix}/{type}/{name}").Queries("link", "{link}").Handler(s) router.Path("/{prefix}/{type}/{name}").Queries("action", "{action}").Handler(s) router.Path("/{prefix}/{type}/{name}").Handler(s) router.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { s.Handle(&types.APIRequest{ Request: r, Response: rw, Type: "apiRoot", URLPrefix: "v1", }) }) return router }
package store // PlanRepository specifies the possible interactions with Plan records type PlanRepository interface { Get(id int) (Plan, error) } // SimplePlanRepository implements a simple, in-memory PlanRepository type SimplePlanRepository struct { store *MemoryStore } // NewSimplePlanRepository is a constructor for SimplePlanRepository func NewSimplePlanRepository(store *MemoryStore) PlanRepository { return &SimplePlanRepository{store: store} } // Get retrieves a Plan by ID or returns an empty Plan and an error if it can't be found func (r *SimplePlanRepository) Get(id int) (Plan, error) { return r.store.Plans.get(id) }
package main import ( "fmt" "io/ioutil" "log" ) func main() { fileBytes, err := ioutil.ReadFile("text.txt") if err != nil { log.Fatal(err) } fmt.Println(fileBytes) fileString := string(fileBytes) fmt.Println(fileString) }
// Copyright 2020 The Moov Authors // Use of this source code is governed by an Apache License // license that can be found in the LICENSE file. package pacs_v09 import ( "reflect" "github.com/moov-io/iso20022/pkg/utils" ) // Must be at least 1 items long type ExternalAccountIdentification1Code string func (r ExternalAccountIdentification1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalAccountIdentification1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalCashAccountType1Code string func (r ExternalCashAccountType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalCashAccountType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalCashClearingSystem1Code string func (r ExternalCashClearingSystem1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 3 { return utils.NewErrTextLengthInvalid("ExternalCashClearingSystem1Code", 1, 3) } return nil } // Must be at least 1 items long type ExternalCategoryPurpose1Code string func (r ExternalCategoryPurpose1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalCategoryPurpose1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalClearingSystemIdentification1Code string func (r ExternalClearingSystemIdentification1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 5 { return utils.NewErrTextLengthInvalid("ExternalClearingSystemIdentification1Code", 1, 5) } return nil } // Must be at least 1 items long type ExternalCreditorAgentInstruction1Code string func (r ExternalCreditorAgentInstruction1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalCreditorAgentInstruction1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalDiscountAmountType1Code string func (r ExternalDiscountAmountType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalDiscountAmountType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalDocumentLineType1Code string func (r ExternalDocumentLineType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalDocumentLineType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalFinancialInstitutionIdentification1Code string func (r ExternalFinancialInstitutionIdentification1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalFinancialInstitutionIdentification1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalGarnishmentType1Code string func (r ExternalGarnishmentType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalGarnishmentType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalLocalInstrument1Code string func (r ExternalLocalInstrument1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 35 { return utils.NewErrTextLengthInvalid("ExternalLocalInstrument1Code", 1, 35) } return nil } // Must be at least 1 items long type ExternalMandateSetupReason1Code string func (r ExternalMandateSetupReason1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalMandateSetupReason1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalOrganisationIdentification1Code string func (r ExternalOrganisationIdentification1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalOrganisationIdentification1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalPersonIdentification1Code string func (r ExternalPersonIdentification1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalPersonIdentification1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalProxyAccountType1Code string func (r ExternalProxyAccountType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalProxyAccountType1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalPurpose1Code string func (r ExternalPurpose1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalPurpose1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalServiceLevel1Code string func (r ExternalServiceLevel1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalServiceLevel1Code", 1, 4) } return nil } // Must be at least 1 items long type ExternalTaxAmountType1Code string func (r ExternalTaxAmountType1Code) Validate() error { if len(string(r)) < 1 || len(string(r)) > 4 { return utils.NewErrTextLengthInvalid("ExternalTaxAmountType1Code", 1, 4) } return nil } // May be one of RADM, RPIN, FXDR, DISP, PUOR, SCOR type DocumentType3Code string func (r DocumentType3Code) Validate() error { for _, vv := range []string{ "RADM", "RPIN", "FXDR", "DISP", "PUOR", "SCOR", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("DocumentType3Code") } // May be one of MSIN, CNFA, DNFA, CINV, CREN, DEBN, HIRI, SBIN, CMCN, SOAC, DISP, BOLD, VCHR, AROI, TSUT, PUOR type DocumentType6Code string func (r DocumentType6Code) Validate() error { for _, vv := range []string{ "MSIN", "CNFA", "DNFA", "CINV", "CREN", "DEBN", "HIRI", "SBIN", "CMCN", "SOAC", "DISP", "BOLD", "VCHR", "AROI", "TSUT", "PUOR", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("DocumentType6Code") } // May be one of YEAR, MNTH, QURT, MIAN, WEEK, DAIL, ADHO, INDA, FRTN type Frequency6Code string func (r Frequency6Code) Validate() error { for _, vv := range []string{ "YEAR", "MNTH", "QURT", "MIAN", "WEEK", "DAIL", "ADHO", "INDA", "FRTN", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("Frequency6Code") } // May be one of PHOA, TELA type Instruction4Code string func (r Instruction4Code) Validate() error { for _, vv := range []string{ "PHOA", "TELA", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("Instruction4Code") } // May be one of LETT, MAIL, PHON, FAXX, CELL type PreferredContactMethod1Code string func (r PreferredContactMethod1Code) Validate() error { for _, vv := range []string{ "LETT", "MAIL", "PHON", "FAXX", "CELL", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("PreferredContactMethod1Code") } // May be one of HIGH, NORM type Priority2Code string func (r Priority2Code) Validate() error { for _, vv := range []string{ "HIGH", "NORM", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("Priority2Code") } // May be one of URGT, HIGH, NORM type Priority3Code string func (r Priority3Code) Validate() error { for _, vv := range []string{ "URGT", "HIGH", "NORM", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("Priority3Code") } // May be one of CRED, DEBT, BOTH type RegulatoryReportingType1Code string func (r RegulatoryReportingType1Code) Validate() error { for _, vv := range []string{ "CRED", "DEBT", "BOTH", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("RegulatoryReportingType1Code") } // May be one of FAXI, EDIC, URID, EMAL, POST, SMSM type RemittanceLocationMethod2Code string func (r RemittanceLocationMethod2Code) Validate() error { for _, vv := range []string{ "FAXI", "EDIC", "URID", "EMAL", "POST", "SMSM", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("RemittanceLocationMethod2Code") } // May be one of INDA, INGA, COVE, CLRG type SettlementMethod1Code string func (r SettlementMethod1Code) Validate() error { for _, vv := range []string{ "INDA", "INGA", "COVE", "CLRG", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("SettlementMethod1Code") } // May be one of MM01, MM02, MM03, MM04, MM05, MM06, MM07, MM08, MM09, MM10, MM11, MM12, QTR1, QTR2, QTR3, QTR4, HLF1, HLF2 type TaxRecordPeriod1Code string func (r TaxRecordPeriod1Code) Validate() error { for _, vv := range []string{ "MM01", "MM02", "MM03", "MM04", "MM05", "MM06", "MM07", "MM08", "MM09", "MM10", "MM11", "MM12", "QTR1", "QTR2", "QTR3", "QTR4", "HLF1", "HLF2", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("TaxRecordPeriod1Code") } // May be one of DEBT, CRED, SHAR, SLEV type ChargeBearerType1Code string func (r ChargeBearerType1Code) Validate() error { for _, vv := range []string{ "DEBT", "CRED", "SHAR", "SLEV", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("ChargeBearerType1Code") } // May be one of RTGS, RTNS, MPNS, BOOK type ClearingChannel2Code string func (r ClearingChannel2Code) Validate() error { for _, vv := range []string{ "RTGS", "RTNS", "MPNS", "BOOK", } { if reflect.DeepEqual(string(r), vv) { return nil } } return utils.NewErrValueInvalid("ClearingChannel2Code") }