text
stringlengths
11
4.05M
//************************************************************************// // RightScale API client // // Generated with: // $ praxisgen -metadata=ca/cac/docs/api -output=ca/cac -pkg=cac -target=1.0 -client=API // // The content of this file is auto-generated, DO NOT MODIFY //************************************************************************// package cac import ( "encoding/json" "fmt" "io/ioutil" "time" "github.com/rightscale/rsc/metadata" "github.com/rightscale/rsc/rsapi" ) // API Version const APIVersion = "1.0" // An Href contains the relative path to a resource or resource collection, // e.g. "/api/servers/123" or "/api/servers". type Href string // ActionPath computes the path to the given resource action. For example given the href // "/api/servers/123" calling ActionPath with resource "servers" and action "clone" returns the path // "/api/servers/123/clone" and verb POST. // The algorithm consists of extracting the variables from the href by looking up a matching // pattern from the resource metadata. The variables are then substituted in the action path. // If there are more than one pattern that match the href then the algorithm picks the one that can // substitute the most variables. func (r *Href) ActionPath(rName, aName string) (*metadata.ActionPath, error) { res, ok := GenMetadata[rName] if !ok { return nil, fmt.Errorf("No resource with name '%s'", rName) } var action *metadata.Action for _, a := range res.Actions { if a.Name == aName { action = a break } } if action == nil { return nil, fmt.Errorf("No action with name '%s' on %s", aName, rName) } vars, err := res.ExtractVariables(string(*r)) if err != nil { return nil, err } return action.URL(vars) } /****** Account ******/ // Accounts act as a container for clouds credentials and other RightScale concepts such as // Deployments or ServerArrays. Users with the `enterprise_manager` permission in an account can create // child accounts. This resource is not included in the public docs. type Account struct { } //===== Locator // AccountLocator exposes the Account resource actions. type AccountLocator struct { Href api *API } // AccountLocator builds a locator from the given href. func (api *API) AccountLocator(href string) *AccountLocator { return &AccountLocator{Href(href), api} } //===== Actions // POST /api/accounts // // Create a new child account. func (loc *AccountLocator) Create(options rsapi.APIParams) (*AccountLocator, error) { var res *AccountLocator var params rsapi.APIParams var p rsapi.APIParams p = rsapi.APIParams{} var dunnoOpt = options["dunno"] if dunnoOpt != nil { p["dunno"] = dunnoOpt } uri, err := loc.ActionPath("Account", "create") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } location := resp.Header.Get("Location") if len(location) == 0 { return res, fmt.Errorf("Missing location header in response") } else { return &AccountLocator{Href(location), loc.api}, nil } } // GET /api/accounts // // List all accounts. func (loc *AccountLocator) Index(options rsapi.APIParams) ([]*Account, error) { var res []*Account var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("Account", "index") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/accounts/:id // // Show a specific account. func (loc *AccountLocator) Show(options rsapi.APIParams) (*Account, error) { var res *Account var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("Account", "show") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** AnalysisSnapshot ******/ // AnalysisSnapshots can be used to generate unique links to share data using filters over a date range. type AnalysisSnapshot struct { } //===== Locator // AnalysisSnapshotLocator exposes the AnalysisSnapshot resource actions. type AnalysisSnapshotLocator struct { Href api *API } // AnalysisSnapshotLocator builds a locator from the given href. func (api *API) AnalysisSnapshotLocator(href string) *AnalysisSnapshotLocator { return &AnalysisSnapshotLocator{Href(href), api} } //===== Actions // POST /api/analysis_snapshots // // Create a new AnalysisSnapshot. func (loc *AnalysisSnapshotLocator) Create(endTime *time.Time, granularity string, startTime *time.Time, options rsapi.APIParams) (*AnalysisSnapshotLocator, error) { var res *AnalysisSnapshotLocator if granularity == "" { return res, fmt.Errorf("granularity is required") } var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{ "end_time": endTime, "granularity": granularity, "start_time": startTime, } var excludedTagTypesOpt = options["excluded_tag_types"] if excludedTagTypesOpt != nil { p["excluded_tag_types"] = excludedTagTypesOpt } var filtersOpt = options["filters"] if filtersOpt != nil { p["filters"] = filtersOpt } var isComparisonOpt = options["is_comparison"] if isComparisonOpt != nil { p["is_comparison"] = isComparisonOpt } var metricsOpt = options["metrics"] if metricsOpt != nil { p["metrics"] = metricsOpt } var moduleStatesOpt = options["module_states"] if moduleStatesOpt != nil { p["module_states"] = moduleStatesOpt } uri, err := loc.ActionPath("AnalysisSnapshot", "create") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } location := resp.Header.Get("Location") if len(location) == 0 { return res, fmt.Errorf("Missing location header in response") } else { return &AnalysisSnapshotLocator{Href(location), loc.api}, nil } } // GET /api/analysis_snapshots/:uuid // // Show a specific AnalysisSnapshot. func (loc *AnalysisSnapshotLocator) Show(options rsapi.APIParams) (*AnalysisSnapshot, error) { var res *AnalysisSnapshot var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("AnalysisSnapshot", "show") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** BudgetAlert ******/ // Enable you to set a monthly spend budget and be alerted via email when this is exceeded, // based on either actual or forecasted spend. These emails include links to AnalysisSnapshots, which are // generated automatically by us. type BudgetAlert struct { } //===== Locator // BudgetAlertLocator exposes the BudgetAlert resource actions. type BudgetAlertLocator struct { Href api *API } // BudgetAlertLocator builds a locator from the given href. func (api *API) BudgetAlertLocator(href string) *BudgetAlertLocator { return &BudgetAlertLocator{Href(href), api} } //===== Actions // POST /api/budget_alerts // // Create a new BudgetAlert. func (loc *BudgetAlertLocator) Create(budget *BudgetStruct, frequency string, name string, type_ string, options rsapi.APIParams) (*BudgetAlertLocator, error) { var res *BudgetAlertLocator if budget == nil { return res, fmt.Errorf("budget is required") } if frequency == "" { return res, fmt.Errorf("frequency is required") } if name == "" { return res, fmt.Errorf("name is required") } if type_ == "" { return res, fmt.Errorf("type_ is required") } var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{ "budget": budget, "frequency": frequency, "name": name, "type": type_, } var additionalEmailsOpt = options["additional_emails"] if additionalEmailsOpt != nil { p["additional_emails"] = additionalEmailsOpt } var attachCsvOpt = options["attach_csv"] if attachCsvOpt != nil { p["attach_csv"] = attachCsvOpt } var filtersOpt = options["filters"] if filtersOpt != nil { p["filters"] = filtersOpt } uri, err := loc.ActionPath("BudgetAlert", "create") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } location := resp.Header.Get("Location") if len(location) == 0 { return res, fmt.Errorf("Missing location header in response") } else { return &BudgetAlertLocator{Href(location), loc.api}, nil } } // GET /api/budget_alerts // // List all BudgetAlerts. func (loc *BudgetAlertLocator) Index(options rsapi.APIParams) ([]*BudgetAlert, error) { var res []*BudgetAlert var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("BudgetAlert", "index") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/budget_alerts/:id // // Show a specific BudgetAlert. func (loc *BudgetAlertLocator) Show(options rsapi.APIParams) (*BudgetAlert, error) { var res *BudgetAlert var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("BudgetAlert", "show") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // PATCH /api/budget_alerts/:id // // Update the provided attributes of a BudgetAlert. func (loc *BudgetAlertLocator) Update(options rsapi.APIParams) (*BudgetAlert, error) { var res *BudgetAlert var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{} var additionalEmailsOpt = options["additional_emails"] if additionalEmailsOpt != nil { p["additional_emails"] = additionalEmailsOpt } var attachCsvOpt = options["attach_csv"] if attachCsvOpt != nil { p["attach_csv"] = attachCsvOpt } var budgetOpt = options["budget"] if budgetOpt != nil { p["budget"] = budgetOpt } var frequencyOpt = options["frequency"] if frequencyOpt != nil { p["frequency"] = frequencyOpt } var nameOpt = options["name"] if nameOpt != nil { p["name"] = nameOpt } var type_Opt = options["type"] if type_Opt != nil { p["type"] = type_Opt } uri, err := loc.ActionPath("BudgetAlert", "update") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // DELETE /api/budget_alerts/:id // // Delete a BudgetAlert. func (loc *BudgetAlertLocator) Destroy() error { var params rsapi.APIParams var p rsapi.APIParams uri, err := loc.ActionPath("BudgetAlert", "destroy") if err != nil { return err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return err } resp, err := loc.api.PerformRequest(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return fmt.Errorf("invalid response %s%s", resp.Status, sr) } return nil } /****** CloudBill ******/ // Enables you to get details about cloud bills. Only Amazon Web Services is supported for now. type CloudBill struct { } //===== Locator // CloudBillLocator exposes the CloudBill resource actions. type CloudBillLocator struct { Href api *API } // CloudBillLocator builds a locator from the given href. func (api *API) CloudBillLocator(href string) *CloudBillLocator { return &CloudBillLocator{Href(href), api} } //===== Actions // GET /api/cloud_bills/actions/filter_options // // Gets the filter options which can be used for filtering the cloud bill breakdown calls. func (loc *CloudBillLocator) FilterOptions(endTime *time.Time, filterTypes []string, startTime *time.Time, options rsapi.APIParams) (*Filter, error) { var res *Filter if len(filterTypes) == 0 { return res, fmt.Errorf("filterTypes is required") } var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "filter_types[]": filterTypes, "start_time": startTime, } var cloudBillFiltersOpt = options["cloud_bill_filters"] if cloudBillFiltersOpt != nil { params["cloud_bill_filters[]"] = cloudBillFiltersOpt } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("CloudBill", "filter_options") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** CloudBillMetric ******/ // Enables you to get breakdowns of your cloud bill costs. Only Amazon Web Services is supported for now. type CloudBillMetric struct { } //===== Locator // CloudBillMetricLocator exposes the CloudBillMetric resource actions. type CloudBillMetricLocator struct { Href api *API } // CloudBillMetricLocator builds a locator from the given href. func (api *API) CloudBillMetricLocator(href string) *CloudBillMetricLocator { return &CloudBillMetricLocator{Href(href), api} } //===== Actions // GET /api/cloud_bill_metrics/actions/grouped_time_series // // Calculates the time series of costs for cloud bills in a time period grouped into monthly // time buckets and groups them into specified breakdown categories, e.g. show me cost of my // cloud bills per month during the last year grouped by product. func (loc *CloudBillMetricLocator) GroupedTimeSeries(endTime *time.Time, group [][]string, startTime *time.Time, options rsapi.APIParams) (*TimeSeriesMetricsResult, error) { var res *TimeSeriesMetricsResult if len(group) == 0 { return res, fmt.Errorf("group is required") } var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "group[]": group, "start_time": startTime, } var cloudBillFiltersOpt = options["cloud_bill_filters"] if cloudBillFiltersOpt != nil { params["cloud_bill_filters[]"] = cloudBillFiltersOpt } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("CloudBillMetric", "grouped_time_series") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** CurrentUser ******/ // Represents the currently logged-in user. This resource is not included in the public docs. type CurrentUser struct { } //===== Locator // CurrentUserLocator exposes the CurrentUser resource actions. type CurrentUserLocator struct { Href api *API } // CurrentUserLocator builds a locator from the given href. func (api *API) CurrentUserLocator(href string) *CurrentUserLocator { return &CurrentUserLocator{Href(href), api} } //===== Actions // GET /api/current_user // // Show the user's details. func (loc *CurrentUserLocator) Show(options rsapi.APIParams) (*CurrentUser, error) { var res *CurrentUser var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("CurrentUser", "show") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // PATCH /api/current_user // // Update the user's details. func (loc *CurrentUserLocator) Update(password string, options rsapi.APIParams) (*CurrentUser, error) { var res *CurrentUser if password == "" { return res, fmt.Errorf("password is required") } var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{ "password": password, } var companyOpt = options["company"] if companyOpt != nil { p["company"] = companyOpt } var emailOpt = options["email"] if emailOpt != nil { p["email"] = emailOpt } var firstNameOpt = options["first_name"] if firstNameOpt != nil { p["first_name"] = firstNameOpt } var lastNameOpt = options["last_name"] if lastNameOpt != nil { p["last_name"] = lastNameOpt } var newPasswordOpt = options["new_password"] if newPasswordOpt != nil { p["new_password"] = newPasswordOpt } var phoneOpt = options["phone"] if phoneOpt != nil { p["phone"] = phoneOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { p["timezone"] = timezoneOpt } uri, err := loc.ActionPath("CurrentUser", "update") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // POST /api/current_user/actions/cloud_accounts // // Creates a cloud account in the first available child account, // or the account used to login if there are no available child accounts. func (loc *CurrentUserLocator) CloudAccounts(awsAccessKeyId string, awsAccountNumber string, awsSecretAccessKey string, cloudVendorName string) error { if awsAccessKeyId == "" { return fmt.Errorf("awsAccessKeyId is required") } if awsAccountNumber == "" { return fmt.Errorf("awsAccountNumber is required") } if awsSecretAccessKey == "" { return fmt.Errorf("awsSecretAccessKey is required") } if cloudVendorName == "" { return fmt.Errorf("cloudVendorName is required") } var params rsapi.APIParams var p rsapi.APIParams p = rsapi.APIParams{ "aws_access_key_id": awsAccessKeyId, "aws_account_number": awsAccountNumber, "aws_secret_access_key": awsSecretAccessKey, "cloud_vendor_name": cloudVendorName, } uri, err := loc.ActionPath("CurrentUser", "cloud_accounts") if err != nil { return err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return err } resp, err := loc.api.PerformRequest(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return fmt.Errorf("invalid response %s%s", resp.Status, sr) } return nil } // GET /api/current_user/actions/onboarding_status // // Gets the onboarding status of the user. func (loc *CurrentUserLocator) OnboardingStatus(options rsapi.APIParams) (*UserOnboardingStatus, error) { var res *UserOnboardingStatus var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("CurrentUser", "onboarding_status") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/current_user/actions/environment // // Gets various environment settings. func (loc *CurrentUserLocator) Environment() (*UserEnvironment, error) { var res *UserEnvironment var params rsapi.APIParams var p rsapi.APIParams uri, err := loc.ActionPath("CurrentUser", "environment") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** Instance ******/ // Enables you to get instance details, including the cost of individual instances. type Instance struct { } //===== Locator // InstanceLocator exposes the Instance resource actions. type InstanceLocator struct { Href api *API } // InstanceLocator builds a locator from the given href. func (api *API) InstanceLocator(href string) *InstanceLocator { return &InstanceLocator{Href(href), api} } //===== Actions // GET /api/instances // // Gets instances that overlap with the requested time period. func (loc *InstanceLocator) Index(endTime *time.Time, startTime *time.Time, options rsapi.APIParams) ([]*Instance, error) { var res []*Instance var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "start_time": startTime, } var instanceFiltersOpt = options["instance_filters"] if instanceFiltersOpt != nil { params["instance_filters[]"] = instanceFiltersOpt } var limitOpt = options["limit"] if limitOpt != nil { params["limit"] = limitOpt } var offsetOpt = options["offset"] if offsetOpt != nil { params["offset"] = offsetOpt } var orderOpt = options["order"] if orderOpt != nil { params["order[]"] = orderOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("Instance", "index") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/instances/actions/count // // Gets the count of instances that overlap with the requested time period. func (loc *InstanceLocator) Count(endTime *time.Time, startTime *time.Time, options rsapi.APIParams) (string, error) { var res string var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "start_time": startTime, } var instanceFiltersOpt = options["instance_filters"] if instanceFiltersOpt != nil { params["instance_filters[]"] = instanceFiltersOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var p rsapi.APIParams uri, err := loc.ActionPath("Instance", "count") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } res = string(respBody) return res, err } // GET /api/instances/actions/exist // // Checks if any instances overlap with the requested time period. func (loc *InstanceLocator) Exist(options rsapi.APIParams) (string, error) { var res string var params rsapi.APIParams params = rsapi.APIParams{} var endTimeOpt = options["end_time"] if endTimeOpt != nil { params["end_time"] = endTimeOpt } var instanceFiltersOpt = options["instance_filters"] if instanceFiltersOpt != nil { params["instance_filters[]"] = instanceFiltersOpt } var startTimeOpt = options["start_time"] if startTimeOpt != nil { params["start_time"] = startTimeOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var p rsapi.APIParams uri, err := loc.ActionPath("Instance", "exist") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } res = string(respBody) return res, err } // GET /api/instances/actions/export // // Exports the instances that overlap with the requested time period in CSV format. func (loc *InstanceLocator) Export(endTime *time.Time, startTime *time.Time, options rsapi.APIParams) (string, error) { var res string var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "start_time": startTime, } var instanceFiltersOpt = options["instance_filters"] if instanceFiltersOpt != nil { params["instance_filters[]"] = instanceFiltersOpt } var limitOpt = options["limit"] if limitOpt != nil { params["limit"] = limitOpt } var offsetOpt = options["offset"] if offsetOpt != nil { params["offset"] = offsetOpt } var orderOpt = options["order"] if orderOpt != nil { params["order[]"] = orderOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("Instance", "export") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } res = string(respBody) return res, err } // GET /api/instances/actions/filter_options // // Gets the filter options for instances that overlap with the requested time period. func (loc *InstanceLocator) FilterOptions(endTime *time.Time, filterTypes []string, startTime *time.Time, options rsapi.APIParams) (*Filter, error) { var res *Filter if len(filterTypes) == 0 { return res, fmt.Errorf("filterTypes is required") } var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "filter_types[]": filterTypes, "start_time": startTime, } var instanceFiltersOpt = options["instance_filters"] if instanceFiltersOpt != nil { params["instance_filters[]"] = instanceFiltersOpt } var limitOpt = options["limit"] if limitOpt != nil { params["limit"] = limitOpt } var offsetOpt = options["offset"] if offsetOpt != nil { params["offset"] = offsetOpt } var orderOpt = options["order"] if orderOpt != nil { params["order[]"] = orderOpt } var searchTermOpt = options["search_term"] if searchTermOpt != nil { params["search_term"] = searchTermOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("Instance", "filter_options") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** InstanceCombination ******/ // InstanceCombinations represent instances that make-up a Scenario. // Note that, when making create and update calls, a Pattern can only be applied to an InstanceCombination once. type InstanceCombination struct { } //===== Locator // InstanceCombinationLocator exposes the InstanceCombination resource actions. type InstanceCombinationLocator struct { Href api *API } // InstanceCombinationLocator builds a locator from the given href. func (api *API) InstanceCombinationLocator(href string) *InstanceCombinationLocator { return &InstanceCombinationLocator{Href(href), api} } //===== Actions // POST /api/scenarios/:scenario_id/instance_combinations // // Create a new InstanceCombination. func (loc *InstanceCombinationLocator) Create(cloudName string, cloudVendorName string, instanceTypeName string, monthlyUsageOption string, platform string, quantity int, options rsapi.APIParams) (*InstanceCombinationLocator, error) { var res *InstanceCombinationLocator if cloudName == "" { return res, fmt.Errorf("cloudName is required") } if cloudVendorName == "" { return res, fmt.Errorf("cloudVendorName is required") } if instanceTypeName == "" { return res, fmt.Errorf("instanceTypeName is required") } if monthlyUsageOption == "" { return res, fmt.Errorf("monthlyUsageOption is required") } if platform == "" { return res, fmt.Errorf("platform is required") } var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{ "cloud_name": cloudName, "cloud_vendor_name": cloudVendorName, "instance_type_name": instanceTypeName, "monthly_usage_option": monthlyUsageOption, "platform": platform, "quantity": quantity, } var datacenterNameOpt = options["datacenter_name"] if datacenterNameOpt != nil { p["datacenter_name"] = datacenterNameOpt } var monthlyUsageHoursOpt = options["monthly_usage_hours"] if monthlyUsageHoursOpt != nil { p["monthly_usage_hours"] = monthlyUsageHoursOpt } var patternsOpt = options["patterns"] if patternsOpt != nil { p["patterns"] = patternsOpt } uri, err := loc.ActionPath("InstanceCombination", "create") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } location := resp.Header.Get("Location") if len(location) == 0 { return res, fmt.Errorf("Missing location header in response") } else { return &InstanceCombinationLocator{Href(location), loc.api}, nil } } // GET /api/scenarios/:scenario_id/instance_combinations/:id // // Show a specific InstanceCombination. func (loc *InstanceCombinationLocator) Show(options rsapi.APIParams) (*InstanceCombination, error) { var res *InstanceCombination var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("InstanceCombination", "show") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // PATCH /api/scenarios/:scenario_id/instance_combinations/:id // // Update the provided attributes of an InstanceCombination. func (loc *InstanceCombinationLocator) Update(options rsapi.APIParams) (*InstanceCombination, error) { var res *InstanceCombination var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{} var cloudNameOpt = options["cloud_name"] if cloudNameOpt != nil { p["cloud_name"] = cloudNameOpt } var cloudVendorNameOpt = options["cloud_vendor_name"] if cloudVendorNameOpt != nil { p["cloud_vendor_name"] = cloudVendorNameOpt } var datacenterNameOpt = options["datacenter_name"] if datacenterNameOpt != nil { p["datacenter_name"] = datacenterNameOpt } var instanceTypeNameOpt = options["instance_type_name"] if instanceTypeNameOpt != nil { p["instance_type_name"] = instanceTypeNameOpt } var monthlyUsageHoursOpt = options["monthly_usage_hours"] if monthlyUsageHoursOpt != nil { p["monthly_usage_hours"] = monthlyUsageHoursOpt } var monthlyUsageOptionOpt = options["monthly_usage_option"] if monthlyUsageOptionOpt != nil { p["monthly_usage_option"] = monthlyUsageOptionOpt } var patternsOpt = options["patterns"] if patternsOpt != nil { p["patterns"] = patternsOpt } var platformOpt = options["platform"] if platformOpt != nil { p["platform"] = platformOpt } var quantityOpt = options["quantity"] if quantityOpt != nil { p["quantity"] = quantityOpt } uri, err := loc.ActionPath("InstanceCombination", "update") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // DELETE /api/scenarios/:scenario_id/instance_combinations/:id // // Delete an InstanceCombination. func (loc *InstanceCombinationLocator) Destroy() error { var params rsapi.APIParams var p rsapi.APIParams uri, err := loc.ActionPath("InstanceCombination", "destroy") if err != nil { return err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return err } resp, err := loc.api.PerformRequest(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return fmt.Errorf("invalid response %s%s", resp.Status, sr) } return nil } // GET /api/scenarios/:scenario_id/instance_combinations/:id/actions/reserved_instance_prices // // Returns pricing details for the various reserved instances that can be purchased for this InstanceCombination. func (loc *InstanceCombinationLocator) ReservedInstancePrices(options rsapi.APIParams) (*ReservedInstancePurchase, error) { var res *ReservedInstancePurchase var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("InstanceCombination", "reserved_instance_prices") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** InstanceMetric ******/ // Enables you to get aggregated metrics from instances, such as total_cost or lowest_instance_count. type InstanceMetric struct { } //===== Locator // InstanceMetricLocator exposes the InstanceMetric resource actions. type InstanceMetricLocator struct { Href api *API } // InstanceMetricLocator builds a locator from the given href. func (api *API) InstanceMetricLocator(href string) *InstanceMetricLocator { return &InstanceMetricLocator{Href(href), api} } //===== Actions // GET /api/instance_metrics/actions/overall // // Calculates the overall metrics for instance usages in a time period, e.g. show me the // total cost of all my instances during the last month. func (loc *InstanceMetricLocator) Overall(endTime *time.Time, metrics []string, startTime *time.Time, options rsapi.APIParams) (*MetricsResult, error) { var res *MetricsResult if len(metrics) == 0 { return res, fmt.Errorf("metrics is required") } var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "metrics[]": metrics, "start_time": startTime, } var instanceFiltersOpt = options["instance_filters"] if instanceFiltersOpt != nil { params["instance_filters[]"] = instanceFiltersOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("InstanceMetric", "overall") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/instance_metrics/actions/grouped_overall // // Calculates the overall metrics for instance usages in a time period and groups them into // specified breakdown categories, e.g. show me the total cost of all my instances during the // last month grouped by different accounts. func (loc *InstanceMetricLocator) GroupedOverall(endTime *time.Time, group []string, metrics []string, startTime *time.Time, options rsapi.APIParams) (*MetricsResult, error) { var res *MetricsResult if len(group) == 0 { return res, fmt.Errorf("group is required") } if len(metrics) == 0 { return res, fmt.Errorf("metrics is required") } var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "group[]": group, "metrics[]": metrics, "start_time": startTime, } var instanceFiltersOpt = options["instance_filters"] if instanceFiltersOpt != nil { params["instance_filters[]"] = instanceFiltersOpt } var limitOpt = options["limit"] if limitOpt != nil { params["limit"] = limitOpt } var offsetOpt = options["offset"] if offsetOpt != nil { params["offset"] = offsetOpt } var orderOpt = options["order"] if orderOpt != nil { params["order[]"] = orderOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("InstanceMetric", "grouped_overall") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/instance_metrics/actions/time_series // // Calculates the metrics time series for instance usages in a time period allowing different // time buckets (hour, 3 days, month, etc.), e.g. show me the lowest instance count of my // instances per day during the last month. func (loc *InstanceMetricLocator) TimeSeries(endTime *time.Time, granularity string, metrics []string, startTime *time.Time, options rsapi.APIParams) (*TimeSeriesMetricsResult, error) { var res *TimeSeriesMetricsResult if granularity == "" { return res, fmt.Errorf("granularity is required") } if len(metrics) == 0 { return res, fmt.Errorf("metrics is required") } var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "granularity": granularity, "metrics[]": metrics, "start_time": startTime, } var instanceFiltersOpt = options["instance_filters"] if instanceFiltersOpt != nil { params["instance_filters[]"] = instanceFiltersOpt } var intervalOpt = options["interval"] if intervalOpt != nil { params["interval"] = intervalOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("InstanceMetric", "time_series") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/instance_metrics/actions/grouped_time_series // // Calculates the metrics time series for instance usages in a time period allowing different // time buckets (hour, 3 days, month, etc.) and groups them into specified breakdown // categories, e.g. show me the lowest instance count of my instances per day during the last // month grouped by accounts. func (loc *InstanceMetricLocator) GroupedTimeSeries(endTime *time.Time, granularity string, group []string, metrics []string, startTime *time.Time, options rsapi.APIParams) (*TimeSeriesMetricsResult, error) { var res *TimeSeriesMetricsResult if granularity == "" { return res, fmt.Errorf("granularity is required") } if len(group) == 0 { return res, fmt.Errorf("group is required") } if len(metrics) == 0 { return res, fmt.Errorf("metrics is required") } var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "granularity": granularity, "group[]": group, "metrics[]": metrics, "start_time": startTime, } var instanceFiltersOpt = options["instance_filters"] if instanceFiltersOpt != nil { params["instance_filters[]"] = instanceFiltersOpt } var intervalOpt = options["interval"] if intervalOpt != nil { params["interval"] = intervalOpt } var limitOpt = options["limit"] if limitOpt != nil { params["limit"] = limitOpt } var offsetOpt = options["offset"] if offsetOpt != nil { params["offset"] = offsetOpt } var orderOpt = options["order"] if orderOpt != nil { params["order[]"] = orderOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("InstanceMetric", "grouped_time_series") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/instance_metrics/actions/current_count // // Returns the count of currently running instances. func (loc *InstanceMetricLocator) CurrentCount(options rsapi.APIParams) (string, error) { var res string var params rsapi.APIParams params = rsapi.APIParams{} var instanceFiltersOpt = options["instance_filters"] if instanceFiltersOpt != nil { params["instance_filters[]"] = instanceFiltersOpt } var p rsapi.APIParams uri, err := loc.ActionPath("InstanceMetric", "current_count") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } res = string(respBody) return res, err } /****** InstanceUsagePeriod ******/ // Enables you to get usage period details from instances. An instance can have many usage periods, which can // be caused by stop/start actions or changes to the instance type etc. InstanceUsagePeriods are used internally to // calculate aggregate InstanceMetrics. type InstanceUsagePeriod struct { } //===== Locator // InstanceUsagePeriodLocator exposes the InstanceUsagePeriod resource actions. type InstanceUsagePeriodLocator struct { Href api *API } // InstanceUsagePeriodLocator builds a locator from the given href. func (api *API) InstanceUsagePeriodLocator(href string) *InstanceUsagePeriodLocator { return &InstanceUsagePeriodLocator{Href(href), api} } //===== Actions // GET /api/instance_usage_periods // // Gets the instance usage periods of instances. func (loc *InstanceUsagePeriodLocator) Index(instanceUsagePeriodFilters []*Filter, options rsapi.APIParams) ([]*InstanceUsagePeriod, error) { var res []*InstanceUsagePeriod if len(instanceUsagePeriodFilters) == 0 { return res, fmt.Errorf("instanceUsagePeriodFilters is required") } var params rsapi.APIParams params = rsapi.APIParams{ "instance_usage_period_filters[]": instanceUsagePeriodFilters, } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("InstanceUsagePeriod", "index") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** Pattern ******/ // Patterns describe operations in usage, and can be applied to InstanceCombinations in Scenarios to model changes in the cost. // A pattern can only be applied to an InstanceCombination once. type Pattern struct { } //===== Locator // PatternLocator exposes the Pattern resource actions. type PatternLocator struct { Href api *API } // PatternLocator builds a locator from the given href. func (api *API) PatternLocator(href string) *PatternLocator { return &PatternLocator{Href(href), api} } //===== Actions // POST /api/patterns // // Create a new Pattern. func (loc *PatternLocator) Create(months string, name string, operation string, type_ string, value float64, years string, options rsapi.APIParams) (*PatternLocator, error) { var res *PatternLocator if months == "" { return res, fmt.Errorf("months is required") } if name == "" { return res, fmt.Errorf("name is required") } if operation == "" { return res, fmt.Errorf("operation is required") } if type_ == "" { return res, fmt.Errorf("type_ is required") } if years == "" { return res, fmt.Errorf("years is required") } var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{ "months": months, "name": name, "operation": operation, "type": type_, "value": value, "years": years, } var summaryOpt = options["summary"] if summaryOpt != nil { p["summary"] = summaryOpt } uri, err := loc.ActionPath("Pattern", "create") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } location := resp.Header.Get("Location") if len(location) == 0 { return res, fmt.Errorf("Missing location header in response") } else { return &PatternLocator{Href(location), loc.api}, nil } } // GET /api/patterns // // List all Patterns. func (loc *PatternLocator) Index(options rsapi.APIParams) ([]*Pattern, error) { var res []*Pattern var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("Pattern", "index") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/patterns/:id // // Show a specific Pattern. func (loc *PatternLocator) Show(options rsapi.APIParams) (*Pattern, error) { var res *Pattern var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("Pattern", "show") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // PATCH /api/patterns/:id // // Update the provided attributes of a Pattern. func (loc *PatternLocator) Update(options rsapi.APIParams) (*Pattern, error) { var res *Pattern var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{} var monthsOpt = options["months"] if monthsOpt != nil { p["months"] = monthsOpt } var nameOpt = options["name"] if nameOpt != nil { p["name"] = nameOpt } var operationOpt = options["operation"] if operationOpt != nil { p["operation"] = operationOpt } var summaryOpt = options["summary"] if summaryOpt != nil { p["summary"] = summaryOpt } var type_Opt = options["type"] if type_Opt != nil { p["type"] = type_Opt } var valueOpt = options["value"] if valueOpt != nil { p["value"] = valueOpt } var yearsOpt = options["years"] if yearsOpt != nil { p["years"] = yearsOpt } uri, err := loc.ActionPath("Pattern", "update") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // DELETE /api/patterns/:id // // Delete a Pattern. func (loc *PatternLocator) Destroy() error { var params rsapi.APIParams var p rsapi.APIParams uri, err := loc.ActionPath("Pattern", "destroy") if err != nil { return err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return err } resp, err := loc.api.PerformRequest(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return fmt.Errorf("invalid response %s%s", resp.Status, sr) } return nil } // POST /api/patterns/actions/create_defaults // // Create the following commonly used default Patterns: Increase by 2% every month, // Increase by 5% every month, Increase by 10% every month, Increase by 15% every month, // Increase by 500% during Nov - Dec, Increase by 200% during Jan - Feb, Decrease by 2% every month, // Decrease by 5% every month, Decrease by 10% every month, Decrease by 15% every month, Add 1 every month. func (loc *PatternLocator) CreateDefaults(options rsapi.APIParams) (*Pattern, error) { var res *Pattern var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("Pattern", "create_defaults") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** ReservedInstance ******/ // Enables you to get details of existing AWS ReservedInstances and some metrics about their utilization. type ReservedInstance struct { } //===== Locator // ReservedInstanceLocator exposes the ReservedInstance resource actions. type ReservedInstanceLocator struct { Href api *API } // ReservedInstanceLocator builds a locator from the given href. func (api *API) ReservedInstanceLocator(href string) *ReservedInstanceLocator { return &ReservedInstanceLocator{Href(href), api} } //===== Actions // GET /api/reserved_instances // // Gets Reserved Instances that overlap with the requested time period. func (loc *ReservedInstanceLocator) Index(endTime *time.Time, startTime *time.Time, options rsapi.APIParams) ([]*ReservedInstance, error) { var res []*ReservedInstance var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "start_time": startTime, } var limitOpt = options["limit"] if limitOpt != nil { params["limit"] = limitOpt } var offsetOpt = options["offset"] if offsetOpt != nil { params["offset"] = offsetOpt } var orderOpt = options["order"] if orderOpt != nil { params["order[]"] = orderOpt } var reservedInstanceFiltersOpt = options["reserved_instance_filters"] if reservedInstanceFiltersOpt != nil { params["reserved_instance_filters[]"] = reservedInstanceFiltersOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("ReservedInstance", "index") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/reserved_instances/actions/count // // Gets the count of Reserved Instances that overlap with the requested time period. func (loc *ReservedInstanceLocator) Count(endTime *time.Time, startTime *time.Time, options rsapi.APIParams) (string, error) { var res string var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "start_time": startTime, } var reservedInstanceFiltersOpt = options["reserved_instance_filters"] if reservedInstanceFiltersOpt != nil { params["reserved_instance_filters[]"] = reservedInstanceFiltersOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var p rsapi.APIParams uri, err := loc.ActionPath("ReservedInstance", "count") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } res = string(respBody) return res, err } // GET /api/reserved_instances/actions/exist // // Checks if any Reserved Instances overlap with the requested time period. func (loc *ReservedInstanceLocator) Exist(options rsapi.APIParams) (string, error) { var res string var params rsapi.APIParams params = rsapi.APIParams{} var endTimeOpt = options["end_time"] if endTimeOpt != nil { params["end_time"] = endTimeOpt } var reservedInstanceFiltersOpt = options["reserved_instance_filters"] if reservedInstanceFiltersOpt != nil { params["reserved_instance_filters[]"] = reservedInstanceFiltersOpt } var startTimeOpt = options["start_time"] if startTimeOpt != nil { params["start_time"] = startTimeOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var p rsapi.APIParams uri, err := loc.ActionPath("ReservedInstance", "exist") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } res = string(respBody) return res, err } // GET /api/reserved_instances/actions/export // // Exports the Reserved Instances that overlap with the requested time period in CSV format. func (loc *ReservedInstanceLocator) Export(endTime *time.Time, startTime *time.Time, options rsapi.APIParams) (string, error) { var res string var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "start_time": startTime, } var limitOpt = options["limit"] if limitOpt != nil { params["limit"] = limitOpt } var offsetOpt = options["offset"] if offsetOpt != nil { params["offset"] = offsetOpt } var orderOpt = options["order"] if orderOpt != nil { params["order[]"] = orderOpt } var reservedInstanceFiltersOpt = options["reserved_instance_filters"] if reservedInstanceFiltersOpt != nil { params["reserved_instance_filters[]"] = reservedInstanceFiltersOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("ReservedInstance", "export") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } res = string(respBody) return res, err } // GET /api/reserved_instances/actions/filter_options // // Gets the filter options for Reserved Instances that overlap with the requested time period. func (loc *ReservedInstanceLocator) FilterOptions(endTime *time.Time, startTime *time.Time, options rsapi.APIParams) (*Filter, error) { var res *Filter var params rsapi.APIParams params = rsapi.APIParams{ "end_time": endTime, "start_time": startTime, } var filterTypesOpt = options["filter_types"] if filterTypesOpt != nil { params["filter_types[]"] = filterTypesOpt } var limitOpt = options["limit"] if limitOpt != nil { params["limit"] = limitOpt } var offsetOpt = options["offset"] if offsetOpt != nil { params["offset"] = offsetOpt } var orderOpt = options["order"] if orderOpt != nil { params["order[]"] = orderOpt } var reservedInstanceFiltersOpt = options["reserved_instance_filters"] if reservedInstanceFiltersOpt != nil { params["reserved_instance_filters[]"] = reservedInstanceFiltersOpt } var searchTermOpt = options["search_term"] if searchTermOpt != nil { params["search_term"] = searchTermOpt } var timezoneOpt = options["timezone"] if timezoneOpt != nil { params["timezone"] = timezoneOpt } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("ReservedInstance", "filter_options") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** ReservedInstancePurchase ******/ // ReservedInstancePurchases can be applied to InstanceCombinations in Scenarios to model changes in the cost. These are not actually purchased in the cloud and are only used for cost simulation purposes. type ReservedInstancePurchase struct { } //===== Locator // ReservedInstancePurchaseLocator exposes the ReservedInstancePurchase resource actions. type ReservedInstancePurchaseLocator struct { Href api *API } // ReservedInstancePurchaseLocator builds a locator from the given href. func (api *API) ReservedInstancePurchaseLocator(href string) *ReservedInstancePurchaseLocator { return &ReservedInstancePurchaseLocator{Href(href), api} } //===== Actions // POST /api/scenarios/:scenario_id/instance_combinations/:instance_combination_id/reserved_instance_purchases // // Create a new ReservedInstancePurchase. This is not actually purchased in the cloud and is only used for cost simulation purposes. func (loc *ReservedInstancePurchaseLocator) Create(autoRenew bool, duration int, offeringType string, quantity int, startDate *time.Time, options rsapi.APIParams) (*ReservedInstancePurchaseLocator, error) { var res *ReservedInstancePurchaseLocator if offeringType == "" { return res, fmt.Errorf("offeringType is required") } var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{ "auto_renew": autoRenew, "duration": duration, "offering_type": offeringType, "quantity": quantity, "start_date": startDate, } uri, err := loc.ActionPath("ReservedInstancePurchase", "create") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } location := resp.Header.Get("Location") if len(location) == 0 { return res, fmt.Errorf("Missing location header in response") } else { return &ReservedInstancePurchaseLocator{Href(location), loc.api}, nil } } // GET /api/scenarios/:scenario_id/instance_combinations/:instance_combination_id/reserved_instance_purchases // // List all ReservedInstancePurchases for the InstanceCombination. func (loc *ReservedInstancePurchaseLocator) Index(options rsapi.APIParams) ([]*ReservedInstancePurchase, error) { var res []*ReservedInstancePurchase var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("ReservedInstancePurchase", "index") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/scenarios/:scenario_id/instance_combinations/:instance_combination_id/reserved_instance_purchases/:id // // Show a specific ReservedInstancePurchase. func (loc *ReservedInstancePurchaseLocator) Show(options rsapi.APIParams) (*ReservedInstancePurchase, error) { var res *ReservedInstancePurchase var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("ReservedInstancePurchase", "show") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // PATCH /api/scenarios/:scenario_id/instance_combinations/:instance_combination_id/reserved_instance_purchases/:id // // Update the provided attributes of a ReservedInstancePurchase. func (loc *ReservedInstancePurchaseLocator) Update(options rsapi.APIParams) (*ReservedInstancePurchase, error) { var res *ReservedInstancePurchase var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{} var autoRenewOpt = options["auto_renew"] if autoRenewOpt != nil { p["auto_renew"] = autoRenewOpt } var durationOpt = options["duration"] if durationOpt != nil { p["duration"] = durationOpt } var offeringTypeOpt = options["offering_type"] if offeringTypeOpt != nil { p["offering_type"] = offeringTypeOpt } var quantityOpt = options["quantity"] if quantityOpt != nil { p["quantity"] = quantityOpt } var startDateOpt = options["start_date"] if startDateOpt != nil { p["start_date"] = startDateOpt } uri, err := loc.ActionPath("ReservedInstancePurchase", "update") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // DELETE /api/scenarios/:scenario_id/instance_combinations/:instance_combination_id/reserved_instance_purchases/:id // // Delete a ReservedInstancePurchase. This is not actually deleted in the cloud and is only used for cost simulation purposes. func (loc *ReservedInstancePurchaseLocator) Destroy() error { var params rsapi.APIParams var p rsapi.APIParams uri, err := loc.ActionPath("ReservedInstancePurchase", "destroy") if err != nil { return err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return err } resp, err := loc.api.PerformRequest(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return fmt.Errorf("invalid response %s%s", resp.Status, sr) } return nil } /****** Scenario ******/ // Scenarios can be used to model changes in cloud usage to forecast costs over a 3-year period. // Use the forecast action to generate the results after you create a Scenario and add your InstanceCombinations, // ReservedInstancePurchases and Patterns. type Scenario struct { } //===== Locator // ScenarioLocator exposes the Scenario resource actions. type ScenarioLocator struct { Href api *API } // ScenarioLocator builds a locator from the given href. func (api *API) ScenarioLocator(href string) *ScenarioLocator { return &ScenarioLocator{Href(href), api} } //===== Actions // POST /api/scenarios // // Create a new Scenario. func (loc *ScenarioLocator) Create(snapshotTimestamp *time.Time, options rsapi.APIParams) (*ScenarioLocator, error) { var res *ScenarioLocator var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{ "snapshot_timestamp": snapshotTimestamp, } var filtersOpt = options["filters"] if filtersOpt != nil { p["filters"] = filtersOpt } var isBlankOpt = options["is_blank"] if isBlankOpt != nil { p["is_blank"] = isBlankOpt } var isPersistedOpt = options["is_persisted"] if isPersistedOpt != nil { p["is_persisted"] = isPersistedOpt } var nameOpt = options["name"] if nameOpt != nil { p["name"] = nameOpt } var privateCloudInstanceCountOpt = options["private_cloud_instance_count"] if privateCloudInstanceCountOpt != nil { p["private_cloud_instance_count"] = privateCloudInstanceCountOpt } uri, err := loc.ActionPath("Scenario", "create") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } location := resp.Header.Get("Location") if len(location) == 0 { return res, fmt.Errorf("Missing location header in response") } else { return &ScenarioLocator{Href(location), loc.api}, nil } } // GET /api/scenarios // // List all Scenarios. func (loc *ScenarioLocator) Index(options rsapi.APIParams) ([]*Scenario, error) { var res []*Scenario var params rsapi.APIParams params = rsapi.APIParams{} var includeNonPersistedOpt = options["include_non_persisted"] if includeNonPersistedOpt != nil { params["include_non_persisted"] = includeNonPersistedOpt } var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("Scenario", "index") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/scenarios/:id // // Show a specific Scenario. func (loc *ScenarioLocator) Show(options rsapi.APIParams) (*Scenario, error) { var res *Scenario var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("Scenario", "show") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // PATCH /api/scenarios/:id // // Update the provided attributes of a Scenario. func (loc *ScenarioLocator) Update(options rsapi.APIParams) (*Scenario, error) { var res *Scenario var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{} var isPersistedOpt = options["is_persisted"] if isPersistedOpt != nil { p["is_persisted"] = isPersistedOpt } var nameOpt = options["name"] if nameOpt != nil { p["name"] = nameOpt } var privateCloudInstanceCountOpt = options["private_cloud_instance_count"] if privateCloudInstanceCountOpt != nil { p["private_cloud_instance_count"] = privateCloudInstanceCountOpt } var snapshotTimestampOpt = options["snapshot_timestamp"] if snapshotTimestampOpt != nil { p["snapshot_timestamp"] = snapshotTimestampOpt } uri, err := loc.ActionPath("Scenario", "update") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // DELETE /api/scenarios/:id // // Delete a Scenario. func (loc *ScenarioLocator) Destroy() error { var params rsapi.APIParams var p rsapi.APIParams uri, err := loc.ActionPath("Scenario", "destroy") if err != nil { return err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return err } resp, err := loc.api.PerformRequest(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return fmt.Errorf("invalid response %s%s", resp.Status, sr) } return nil } // GET /api/scenarios/:id/actions/forecast // // Run a simulation to generate a 3-year forecast showing the `average_instance_count`, `instance_upfront_cost`, // `instance_usage_cost` and `instance_recurring_cost` metrics. This call might get major changes so it's best to avoid using it currently. // If there are missing prices for any of the InstanceCombinations then these metrics will be excluded from the results for that InstanceCombination. func (loc *ScenarioLocator) Forecast(options rsapi.APIParams) (*TimeSeriesMetricsResult, error) { var res *TimeSeriesMetricsResult var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("Scenario", "forecast") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** ScheduledReport ******/ // ScheduledReports are emailed to you, and include usage, cost, and the change from the previous reporting period. // These emails include links to AnalysisSnapshots, which are generated automatically by us. type ScheduledReport struct { } //===== Locator // ScheduledReportLocator exposes the ScheduledReport resource actions. type ScheduledReportLocator struct { Href api *API } // ScheduledReportLocator builds a locator from the given href. func (api *API) ScheduledReportLocator(href string) *ScheduledReportLocator { return &ScheduledReportLocator{Href(href), api} } //===== Actions // POST /api/scheduled_reports // // Create a new ScheduledReport. func (loc *ScheduledReportLocator) Create(frequency string, name string, options rsapi.APIParams) (*ScheduledReportLocator, error) { var res *ScheduledReportLocator if frequency == "" { return res, fmt.Errorf("frequency is required") } if name == "" { return res, fmt.Errorf("name is required") } var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{ "frequency": frequency, "name": name, } var additionalEmailsOpt = options["additional_emails"] if additionalEmailsOpt != nil { p["additional_emails"] = additionalEmailsOpt } var attachCsvOpt = options["attach_csv"] if attachCsvOpt != nil { p["attach_csv"] = attachCsvOpt } var filtersOpt = options["filters"] if filtersOpt != nil { p["filters"] = filtersOpt } uri, err := loc.ActionPath("ScheduledReport", "create") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } location := resp.Header.Get("Location") if len(location) == 0 { return res, fmt.Errorf("Missing location header in response") } else { return &ScheduledReportLocator{Href(location), loc.api}, nil } } // GET /api/scheduled_reports // // List all ScheduledReports. func (loc *ScheduledReportLocator) Index(options rsapi.APIParams) ([]*ScheduledReport, error) { var res []*ScheduledReport var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("ScheduledReport", "index") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/scheduled_reports/:id // // Show a specific ScheduledReport. func (loc *ScheduledReportLocator) Show(options rsapi.APIParams) (*ScheduledReport, error) { var res *ScheduledReport var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("ScheduledReport", "show") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // PATCH /api/scheduled_reports/:id // // Update the provided attributes of a ScheduledReport. func (loc *ScheduledReportLocator) Update(options rsapi.APIParams) (*ScheduledReport, error) { var res *ScheduledReport var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{} var additionalEmailsOpt = options["additional_emails"] if additionalEmailsOpt != nil { p["additional_emails"] = additionalEmailsOpt } var attachCsvOpt = options["attach_csv"] if attachCsvOpt != nil { p["attach_csv"] = attachCsvOpt } var frequencyOpt = options["frequency"] if frequencyOpt != nil { p["frequency"] = frequencyOpt } var nameOpt = options["name"] if nameOpt != nil { p["name"] = nameOpt } uri, err := loc.ActionPath("ScheduledReport", "update") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // DELETE /api/scheduled_reports/:id // // Delete a ScheduledReport. func (loc *ScheduledReportLocator) Destroy() error { var params rsapi.APIParams var p rsapi.APIParams uri, err := loc.ActionPath("ScheduledReport", "destroy") if err != nil { return err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return err } resp, err := loc.api.PerformRequest(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return fmt.Errorf("invalid response %s%s", resp.Status, sr) } return nil } // POST /api/scheduled_reports/actions/create_defaults // // Create the default Scheduled Report: a weekly report with no filters func (loc *ScheduledReportLocator) CreateDefaults(options rsapi.APIParams) (*ScheduledReport, error) { var res *ScheduledReport var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("ScheduledReport", "create_defaults") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** TempInstancePrice ******/ // This is a temporary API call that can be used by the Cloud Analytics UI until the // Pricing Service is live, at which point this API call will be deleted. This is not included in the public docs. type TempInstancePrice struct { } //===== Locator // TempInstancePriceLocator exposes the TempInstancePrice resource actions. type TempInstancePriceLocator struct { Href api *API } // TempInstancePriceLocator builds a locator from the given href. func (api *API) TempInstancePriceLocator(href string) *TempInstancePriceLocator { return &TempInstancePriceLocator{Href(href), api} } //===== Actions // GET /api/temp_instance_prices // // Returns a JSON blob with all prices for Scenario Builder. func (loc *TempInstancePriceLocator) Index() (string, error) { var res string var params rsapi.APIParams var p rsapi.APIParams uri, err := loc.ActionPath("TempInstancePrice", "index") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } res = string(respBody) return res, err } /****** User ******/ // Users can have various permissions on multiple accounts. Users with admin permissions in an account // can modify that account's users. This resource is not included in the public docs. type User struct { } //===== Locator // UserLocator exposes the User resource actions. type UserLocator struct { Href api *API } // UserLocator builds a locator from the given href. func (api *API) UserLocator(href string) *UserLocator { return &UserLocator{Href(href), api} } //===== Actions // POST /api/users // // Create a new user with the requested permissions in the requested accounts, and emails // them the login details. Returns an error if the user already exists. func (loc *UserLocator) Create(accounts []*UserAccounts, email string, options rsapi.APIParams) (*UserLocator, error) { var res *UserLocator if len(accounts) == 0 { return res, fmt.Errorf("accounts is required") } if email == "" { return res, fmt.Errorf("email is required") } var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{ "accounts": accounts, "email": email, } uri, err := loc.ActionPath("User", "create") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } location := resp.Header.Get("Location") if len(location) == 0 { return res, fmt.Errorf("Missing location header in response") } else { return &UserLocator{Href(location), loc.api}, nil } } // GET /api/users // // List all users. func (loc *UserLocator) Index(options rsapi.APIParams) ([]*User, error) { var res []*User var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("User", "index") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // GET /api/users/:id // // Show a specific user. func (loc *UserLocator) Show(options rsapi.APIParams) (*User, error) { var res *User var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("User", "show") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // PATCH /api/users/:id // // Update a specific user's account permissions. // This cannot be used to update other user parameters such as their name or password. func (loc *UserLocator) Update(options rsapi.APIParams) (*User, error) { var res *User var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{} var accountsOpt = options["accounts"] if accountsOpt != nil { p["accounts"] = accountsOpt } uri, err := loc.ActionPath("User", "update") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // POST /api/users/actions/invite // // Invites a user to the requested account and gives them the required permissions // so they can add/edit cloud credentials, the user is created if they don't already exist. // This is used during new user onboarding as the user who signs-up might not be the person who has // the cloud credentials required to connect their clouds to RightScale. func (loc *UserLocator) Invite(options rsapi.APIParams) (*User, error) { var res *User var params rsapi.APIParams var p rsapi.APIParams p = rsapi.APIParams{} var accountIdOpt = options["account_id"] if accountIdOpt != nil { p["account_id"] = accountIdOpt } var emailOpt = options["email"] if emailOpt != nil { p["email"] = emailOpt } var messageOpt = options["message"] if messageOpt != nil { p["message"] = messageOpt } uri, err := loc.ActionPath("User", "invite") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** UserSetting ******/ // Used by the Cloud Analytics UI to keep track of various UI states. type UserSetting struct { } //===== Locator // UserSettingLocator exposes the UserSetting resource actions. type UserSettingLocator struct { Href api *API } // UserSettingLocator builds a locator from the given href. func (api *API) UserSettingLocator(href string) *UserSettingLocator { return &UserSettingLocator{Href(href), api} } //===== Actions // GET /api/user_settings // // List the UserSettings. func (loc *UserSettingLocator) Show(options rsapi.APIParams) (*UserSetting, error) { var res *UserSetting var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams uri, err := loc.ActionPath("UserSetting", "show") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } // PATCH /api/user_settings // // Update the provided attributes of UserSettings. func (loc *UserSettingLocator) Update(options rsapi.APIParams) (*UserSetting, error) { var res *UserSetting var params rsapi.APIParams params = rsapi.APIParams{} var viewOpt = options["view"] if viewOpt != nil { params["view"] = viewOpt } var p rsapi.APIParams p = rsapi.APIParams{} var dateRangeOpt = options["date_range"] if dateRangeOpt != nil { p["date_range"] = dateRangeOpt } var dismissedDialogsOpt = options["dismissed_dialogs"] if dismissedDialogsOpt != nil { p["dismissed_dialogs"] = dismissedDialogsOpt } var excludedTagTypesOpt = options["excluded_tag_types"] if excludedTagTypesOpt != nil { p["excluded_tag_types"] = excludedTagTypesOpt } var filtersOpt = options["filters"] if filtersOpt != nil { p["filters"] = filtersOpt } var granularityOpt = options["granularity"] if granularityOpt != nil { p["granularity"] = granularityOpt } var mainMenuVisibilityOpt = options["main_menu_visibility"] if mainMenuVisibilityOpt != nil { p["main_menu_visibility"] = mainMenuVisibilityOpt } var metricsOpt = options["metrics"] if metricsOpt != nil { p["metrics"] = metricsOpt } var moduleStatesOpt = options["module_states"] if moduleStatesOpt != nil { p["module_states"] = moduleStatesOpt } var onboardingStatusOpt = options["onboarding_status"] if onboardingStatusOpt != nil { p["onboarding_status"] = onboardingStatusOpt } var selectedCloudVendorNamesOpt = options["selected_cloud_vendor_names"] if selectedCloudVendorNamesOpt != nil { p["selected_cloud_vendor_names"] = selectedCloudVendorNamesOpt } var sortingOpt = options["sorting"] if sortingOpt != nil { p["sorting"] = sortingOpt } var tableColumnVisibilityOpt = options["table_column_visibility"] if tableColumnVisibilityOpt != nil { p["table_column_visibility"] = tableColumnVisibilityOpt } uri, err := loc.ActionPath("UserSetting", "update") if err != nil { return res, err } req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p) if err != nil { return res, err } resp, err := loc.api.PerformRequest(req) if err != nil { return res, err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 299 { respBody, _ := ioutil.ReadAll(resp.Body) sr := string(respBody) if sr != "" { sr = ": " + sr } return res, fmt.Errorf("invalid response %s%s", resp.Status, sr) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { return res, err } err = json.Unmarshal(respBody, &res) return res, err } /****** Data Types ******/ type AccountParam struct { CloudAccounts []*CloudAccount `json:"cloud_accounts,omitempty"` CloudAnalyticsEnabled bool `json:"cloud_analytics_enabled,omitempty"` EnterpriseId int `json:"enterprise_id,omitempty"` EnterpriseName string `json:"enterprise_name,omitempty"` ExpiresIn int `json:"expires_in,omitempty"` Href string `json:"href,omitempty"` Id int `json:"id,omitempty"` IsCloudAnalyticsBlockedByBillingAdminOnly bool `json:"is_cloud_analytics_blocked_by_billing_admin_only,omitempty"` IsEnterpriseParent bool `json:"is_enterprise_parent,omitempty"` Kind string `json:"kind,omitempty"` Name string `json:"name,omitempty"` OwnerId int `json:"owner_id,omitempty"` ParentAccountId int `json:"parent_account_id,omitempty"` ParentAccountName string `json:"parent_account_name,omitempty"` PlanCode string `json:"plan_code,omitempty"` ShardId int `json:"shard_id,omitempty"` UserHasActor bool `json:"user_has_actor,omitempty"` UserHasAdmin bool `json:"user_has_admin,omitempty"` UserHasEnterpriseManager bool `json:"user_has_enterprise_manager,omitempty"` UsesIpWhitelisting bool `json:"uses_ip_whitelisting,omitempty"` } type AnalysisSnapshotParam struct { CreatedAt *time.Time `json:"created_at,omitempty"` CreatedBy string `json:"created_by,omitempty"` EndTime *time.Time `json:"end_time,omitempty"` ExcludedTagTypes []string `json:"excluded_tag_types,omitempty"` Filters []*Filter `json:"filters,omitempty"` Granularity string `json:"granularity,omitempty"` Href string `json:"href,omitempty"` IsComparison bool `json:"is_comparison,omitempty"` Kind string `json:"kind,omitempty"` Metrics []string `json:"metrics,omitempty"` MissingAccessToSomeAccounts bool `json:"missing_access_to_some_accounts,omitempty"` ModuleStates []*ModuleState `json:"module_states,omitempty"` StartTime *time.Time `json:"start_time,omitempty"` UpdatedAt *time.Time `json:"updated_at,omitempty"` Uuid string `json:"uuid,omitempty"` } type BudgetAlertParam struct { AdditionalEmails []string `json:"additional_emails,omitempty"` AttachCsv bool `json:"attach_csv,omitempty"` Budget *ReturnBudgetStruct `json:"budget,omitempty"` CreatedAt *time.Time `json:"created_at,omitempty"` Filters []*Filter `json:"filters,omitempty"` Frequency string `json:"frequency,omitempty"` Href string `json:"href,omitempty"` Id int `json:"id,omitempty"` Kind string `json:"kind,omitempty"` Name string `json:"name,omitempty"` Type_ string `json:"type,omitempty"` UpdatedAt *time.Time `json:"updated_at,omitempty"` } type BudgetStruct struct { Amount float64 `json:"amount,omitempty"` Period string `json:"period,omitempty"` } type CloudAccount struct { CloudId int `json:"cloud_id,omitempty"` CloudName string `json:"cloud_name,omitempty"` CloudType string `json:"cloud_type,omitempty"` Kind string `json:"kind,omitempty"` } type CurrentUserParam struct { Company string `json:"company,omitempty"` CreatedAt *time.Time `json:"created_at,omitempty"` Email string `json:"email,omitempty"` FirstName string `json:"first_name,omitempty"` Id int `json:"id,omitempty"` Kind string `json:"kind,omitempty"` LastName string `json:"last_name,omitempty"` Phone string `json:"phone,omitempty"` Timezone string `json:"timezone,omitempty"` UpdatedAt *time.Time `json:"updated_at,omitempty"` } type DateRangeStruct struct { EndTime *time.Time `json:"end_time,omitempty"` IsComparison bool `json:"is_comparison,omitempty"` StartTime *time.Time `json:"start_time,omitempty"` Type_ string `json:"type,omitempty"` } type Filter struct { Kind string `json:"kind,omitempty"` Label string `json:"label,omitempty"` TagResourceType string `json:"tag_resource_type,omitempty"` Type_ string `json:"type,omitempty"` Value string `json:"value,omitempty"` } type InstanceCombinationLinks struct { Scenario *ScenarioParam `json:"scenario,omitempty"` } type InstanceCombinationParam struct { CloudName string `json:"cloud_name,omitempty"` CloudVendorName string `json:"cloud_vendor_name,omitempty"` CreatedAt *time.Time `json:"created_at,omitempty"` DatacenterName string `json:"datacenter_name,omitempty"` Href string `json:"href,omitempty"` Id int `json:"id,omitempty"` InstanceTypeName string `json:"instance_type_name,omitempty"` Kind string `json:"kind,omitempty"` Links *InstanceCombinationLinks `json:"links,omitempty"` MonthlyUsageHours int `json:"monthly_usage_hours,omitempty"` MonthlyUsageOption string `json:"monthly_usage_option,omitempty"` Patterns []*PatternParam `json:"patterns,omitempty"` Platform string `json:"platform,omitempty"` Quantity int `json:"quantity,omitempty"` ReservedInstancePurchases []*ReservedInstancePurchaseParam `json:"reserved_instance_purchases,omitempty"` Scenario *ScenarioParam `json:"scenario,omitempty"` UpdatedAt *time.Time `json:"updated_at,omitempty"` } type InstanceParam struct { AccountId int `json:"account_id,omitempty"` AccountName string `json:"account_name,omitempty"` CloudId int `json:"cloud_id,omitempty"` CloudName string `json:"cloud_name,omitempty"` CloudVendorName string `json:"cloud_vendor_name,omitempty"` DatacenterKey string `json:"datacenter_key,omitempty"` DatacenterName string `json:"datacenter_name,omitempty"` DeploymentId int `json:"deployment_id,omitempty"` DeploymentName string `json:"deployment_name,omitempty"` EstimatedCostForPeriod float64 `json:"estimated_cost_for_period,omitempty"` EstimatedManagedRcuCountForPeriod float64 `json:"estimated_managed_rcu_count_for_period,omitempty"` IncarnatorId int `json:"incarnator_id,omitempty"` IncarnatorType string `json:"incarnator_type,omitempty"` InstanceEndAt *time.Time `json:"instance_end_at,omitempty"` InstanceKey string `json:"instance_key,omitempty"` InstanceName string `json:"instance_name,omitempty"` InstanceRsid string `json:"instance_rsid,omitempty"` InstanceStartAt *time.Time `json:"instance_start_at,omitempty"` InstanceTypeKey string `json:"instance_type_key,omitempty"` InstanceTypeName string `json:"instance_type_name,omitempty"` InstanceUid string `json:"instance_uid,omitempty"` Kind string `json:"kind,omitempty"` Platform string `json:"platform,omitempty"` ProvisionedByUserEmail string `json:"provisioned_by_user_email,omitempty"` ProvisionedByUserId int `json:"provisioned_by_user_id,omitempty"` ServerTemplateId int `json:"server_template_id,omitempty"` ServerTemplateName string `json:"server_template_name,omitempty"` State string `json:"state,omitempty"` Tags []*Tag `json:"tags,omitempty"` TotalUsageHours float64 `json:"total_usage_hours,omitempty"` } type InstanceUsagePeriodParam struct { EstimatedCost float64 `json:"estimated_cost,omitempty"` EstimatedManagedRcuCount float64 `json:"estimated_managed_rcu_count,omitempty"` HourlyPrice float64 `json:"hourly_price,omitempty"` InstanceKey string `json:"instance_key,omitempty"` InstanceTypeName string `json:"instance_type_name,omitempty"` Kind string `json:"kind,omitempty"` PricingType string `json:"pricing_type,omitempty"` RcuRate float64 `json:"rcu_rate,omitempty"` ReservationUid string `json:"reservation_uid,omitempty"` UsageEndAt *time.Time `json:"usage_end_at,omitempty"` UsageStartAt *time.Time `json:"usage_start_at,omitempty"` } type Metrics struct { AverageInstanceCount float64 `json:"average_instance_count,omitempty"` HighestInstanceCount float64 `json:"highest_instance_count,omitempty"` InstanceUsageCost float64 `json:"instance_usage_cost,omitempty"` Kind string `json:"kind,omitempty"` LowestInstanceCount float64 `json:"lowest_instance_count,omitempty"` ManagedInstanceHours float64 `json:"managed_instance_hours,omitempty"` ManagedInstanceRcuCount float64 `json:"managed_instance_rcu_count,omitempty"` ReservedInstanceRecurringCost float64 `json:"reserved_instance_recurring_cost,omitempty"` ReservedInstanceUpfrontCost float64 `json:"reserved_instance_upfront_cost,omitempty"` TotalCost float64 `json:"total_cost,omitempty"` UnmanagedInstanceHours float64 `json:"unmanaged_instance_hours,omitempty"` UnmanagedInstanceRcuCount float64 `json:"unmanaged_instance_rcu_count,omitempty"` WastedReservedInstanceCost float64 `json:"wasted_reserved_instance_cost,omitempty"` } type MetricsResult struct { BreakdownMetricsResults []*MetricsResult `json:"breakdown_metrics_results,omitempty"` Group map[string]interface{} `json:"group,omitempty"` Kind string `json:"kind,omitempty"` Metrics *Metrics `json:"metrics,omitempty"` } type ModuleState struct { Active bool `json:"active,omitempty"` Expanded bool `json:"expanded,omitempty"` Kind string `json:"kind,omitempty"` SortKey string `json:"sort_key,omitempty"` Type_ string `json:"type,omitempty"` } type PatternParam struct { CreatedAt *time.Time `json:"created_at,omitempty"` Href string `json:"href,omitempty"` Id int `json:"id,omitempty"` Kind string `json:"kind,omitempty"` Months string `json:"months,omitempty"` Name string `json:"name,omitempty"` Operation string `json:"operation,omitempty"` Scenarios []*ScenarioParam `json:"scenarios,omitempty"` Summary string `json:"summary,omitempty"` Type_ string `json:"type,omitempty"` UpdatedAt *time.Time `json:"updated_at,omitempty"` Value float64 `json:"value,omitempty"` Years string `json:"years,omitempty"` } type ReservedInstanceParam struct { AccountId int `json:"account_id,omitempty"` AccountName string `json:"account_name,omitempty"` CloudId int `json:"cloud_id,omitempty"` CloudName string `json:"cloud_name,omitempty"` CloudVendorName string `json:"cloud_vendor_name,omitempty"` CostSaved float64 `json:"cost_saved,omitempty"` DatacenterKey string `json:"datacenter_key,omitempty"` DatacenterName string `json:"datacenter_name,omitempty"` Duration int `json:"duration,omitempty"` EndTime *time.Time `json:"end_time,omitempty"` InstanceCount int `json:"instance_count,omitempty"` InstanceTypeKey string `json:"instance_type_key,omitempty"` InstanceTypeName string `json:"instance_type_name,omitempty"` Kind string `json:"kind,omitempty"` OfferingType string `json:"offering_type,omitempty"` Platform string `json:"platform,omitempty"` ReservationUid string `json:"reservation_uid,omitempty"` StartTime *time.Time `json:"start_time,omitempty"` State string `json:"state,omitempty"` Tenancy string `json:"tenancy,omitempty"` UnusedRecurringCost float64 `json:"unused_recurring_cost,omitempty"` UtilizationPercentage float64 `json:"utilization_percentage,omitempty"` } type ReservedInstancePurchaseLinks struct { InstanceCombination *InstanceCombinationParam `json:"instance_combination,omitempty"` } type ReservedInstancePurchaseParam struct { AutoRenew bool `json:"auto_renew,omitempty"` CreatedAt *time.Time `json:"created_at,omitempty"` Duration int `json:"duration,omitempty"` Href string `json:"href,omitempty"` Id int `json:"id,omitempty"` InstanceCombination *InstanceCombinationParam `json:"instance_combination,omitempty"` Kind string `json:"kind,omitempty"` Links *ReservedInstancePurchaseLinks `json:"links,omitempty"` OfferingType string `json:"offering_type,omitempty"` Quantity int `json:"quantity,omitempty"` StartDate *time.Time `json:"start_date,omitempty"` UpdatedAt *time.Time `json:"updated_at,omitempty"` } type ReturnBudgetStruct struct { Amount float64 `json:"amount,omitempty"` Period string `json:"period,omitempty"` } type ReturnCurrentUserStruct struct { BetaEnabled bool `json:"beta_enabled,omitempty"` CanSeeCostAndRcuMetrics bool `json:"can_see_cost_and_rcu_metrics,omitempty"` CanSeeManagedRcus bool `json:"can_see_managed_rcus,omitempty"` CanSeeUnmanagedRcus bool `json:"can_see_unmanaged_rcus,omitempty"` Company string `json:"company,omitempty"` Email string `json:"email,omitempty"` FirstLoginAt *time.Time `json:"first_login_at,omitempty"` FirstName string `json:"first_name,omitempty"` HasAdminOnAnyAccount bool `json:"has_admin_on_any_account,omitempty"` HasCloudAnalyticsEnabledAccounts bool `json:"has_cloud_analytics_enabled_accounts,omitempty"` HasNonIpWhitelistedAccountsWithAdmin bool `json:"has_non_ip_whitelisted_accounts_with_admin,omitempty"` HasOnlyExpiredAccounts bool `json:"has_only_expired_accounts,omitempty"` Id int `json:"id,omitempty"` IsCloudAnalyticsOnly bool `json:"is_cloud_analytics_only,omitempty"` IsRightscaleEmployee bool `json:"is_rightscale_employee,omitempty"` IsSelfserviceUser bool `json:"is_selfservice_user,omitempty"` IsTeamUser bool `json:"is_team_user,omitempty"` LastName string `json:"last_name,omitempty"` NotificationMessage string `json:"notification_message,omitempty"` Phone string `json:"phone,omitempty"` SelfserviceUrl string `json:"selfservice_url,omitempty"` Timezone string `json:"timezone,omitempty"` TimezoneOffsetSeconds int `json:"timezone_offset_seconds,omitempty"` TrialEndDate *time.Time `json:"trial_end_date,omitempty"` } type ReturnGoogleAnalyticsStruct struct { AccountId string `json:"account_id,omitempty"` DomainName string `json:"domain_name,omitempty"` } type ReturnUserSettingsDateRangeStruct struct { EndTime *time.Time `json:"end_time,omitempty"` IsComparison bool `json:"is_comparison,omitempty"` StartTime *time.Time `json:"start_time,omitempty"` Type_ string `json:"type,omitempty"` } type ScenarioParam struct { CreatedAt *time.Time `json:"created_at,omitempty"` Filters []*Filter `json:"filters,omitempty"` HistoricMetricsResults []*TimeSeriesMetricsResult `json:"historic_metrics_results,omitempty"` Href string `json:"href,omitempty"` Id int `json:"id,omitempty"` InstanceCombinations []*InstanceCombinationParam `json:"instance_combinations,omitempty"` IsPersisted bool `json:"is_persisted,omitempty"` Kind string `json:"kind,omitempty"` Name string `json:"name,omitempty"` PrivateCloudInstanceCount int `json:"private_cloud_instance_count,omitempty"` SnapshotTimestamp *time.Time `json:"snapshot_timestamp,omitempty"` UpdatedAt *time.Time `json:"updated_at,omitempty"` } type ScheduledReportParam struct { AdditionalEmails []string `json:"additional_emails,omitempty"` AttachCsv bool `json:"attach_csv,omitempty"` CreatedAt *time.Time `json:"created_at,omitempty"` Filters []*Filter `json:"filters,omitempty"` Frequency string `json:"frequency,omitempty"` Href string `json:"href,omitempty"` Id int `json:"id,omitempty"` Kind string `json:"kind,omitempty"` Name string `json:"name,omitempty"` UpdatedAt *time.Time `json:"updated_at,omitempty"` } type Tag struct { Key string `json:"key,omitempty"` Kind string `json:"kind,omitempty"` ResourceType string `json:"resource_type,omitempty"` Value string `json:"value,omitempty"` } type TimeSeriesMetricsResult struct { Kind string `json:"kind,omitempty"` Results []*MetricsResult `json:"results,omitempty"` Timestamp *time.Time `json:"timestamp,omitempty"` } type UserAccounts struct { AccountId int `json:"account_id,omitempty"` AccountName string `json:"account_name,omitempty"` BillingAdminOnly bool `json:"billing_admin_only,omitempty"` CloudAnalyticsAccountSettingEnabled bool `json:"cloud_analytics_account_setting_enabled,omitempty"` CloudAnalyticsEnabled bool `json:"cloud_analytics_enabled,omitempty"` Kind string `json:"kind,omitempty"` Permissions []string `json:"permissions,omitempty"` } type UserEnvironment struct { AnalyticsUiSha string `json:"analytics_ui_sha,omitempty"` CloudManagementUrl string `json:"cloud_management_url,omitempty"` CurrentUser *ReturnCurrentUserStruct `json:"current_user,omitempty"` Environment string `json:"environment,omitempty"` GoogleAnalytics *ReturnGoogleAnalyticsStruct `json:"google_analytics,omitempty"` Kind string `json:"kind,omitempty"` UseLocalAssets bool `json:"use_local_assets,omitempty"` UserSettings *UserSettingParam `json:"user_settings,omitempty"` } type UserOnboardingStatus struct { AccountIdToAddFirstCloud int `json:"account_id_to_add_first_cloud,omitempty"` AccountNames []string `json:"account_names,omitempty"` Kind string `json:"kind,omitempty"` Status string `json:"status,omitempty"` UrlToAddFirstCloud string `json:"url_to_add_first_cloud,omitempty"` } type UserParam struct { Accounts []*UserAccounts `json:"accounts,omitempty"` Email string `json:"email,omitempty"` HasAnyExpiredAccounts bool `json:"has_any_expired_accounts,omitempty"` HasAnyIpWhitelistedAccountsWithAdmin bool `json:"has_any_ip_whitelisted_accounts_with_admin,omitempty"` Href string `json:"href,omitempty"` Id int `json:"id,omitempty"` Kind string `json:"kind,omitempty"` } type UserSettingParam struct { DateRange *ReturnUserSettingsDateRangeStruct `json:"date_range,omitempty"` DismissedDialogs map[string]interface{} `json:"dismissed_dialogs,omitempty"` ExcludedTagTypes []string `json:"excluded_tag_types,omitempty"` Filters []*Filter `json:"filters,omitempty"` Granularity string `json:"granularity,omitempty"` Kind string `json:"kind,omitempty"` MainMenuVisibility string `json:"main_menu_visibility,omitempty"` Metrics []string `json:"metrics,omitempty"` ModuleStates []*ModuleState `json:"module_states,omitempty"` OnboardingStatus string `json:"onboarding_status,omitempty"` SelectedCloudVendorNames map[string]interface{} `json:"selected_cloud_vendor_names,omitempty"` Sorting map[string]interface{} `json:"sorting,omitempty"` TableColumnVisibility map[string]interface{} `json:"table_column_visibility,omitempty"` }
package dto type FindUserDto struct { Email string `json:"email"` Username string `json:"usernameOrId"` IsSubscribed bool `json:"isSubscribed"` Role int `json:"role"` }
package main import ( "testing" "github.com/jackytck/projecteuler/tools" ) func TestP108(t *testing.T) { cases := []tools.TestCase{ {In: 2, Out: 4}, {In: 1000, Out: 180180}, } tools.TestIntInt(t, cases, solve, "P108") }
package work import ( "log" "time" ) func createMonitor(monitorType string) Monitor { switch monitorType { case "cpu": return &Cpu{} case "disk": return &Disk{} case "free": log.Println(111) return &Disk{} case "uptime": return nil default: log.Println("不受支持的监控类型") return nil } } func Run(monitotType string, circle int64, stop chan bool) { monitor := createMonitor(monitotType) if monitor != nil { for { monitor.SetMonitorData() log.Println(monitor) select { case flag := <-stop: if flag == true { return } default: } time.Sleep(time.Duration(circle) * time.Second) } } }
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. package gluetidb import ( "bytes" "context" "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/gluetikv" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" pd "github.com/tikv/pd/client" "go.uber.org/zap" ) // Asserting Glue implements glue.ConsoleGlue and glue.Glue at compile time. var ( _ glue.ConsoleGlue = Glue{} _ glue.Glue = Glue{} ) const ( defaultCapOfCreateTable = 512 defaultCapOfCreateDatabase = 64 brComment = `/*from(br)*/` ) // New makes a new tidb glue. func New() Glue { log.Debug("enabling no register config") config.UpdateGlobal(func(conf *config.Config) { conf.SkipRegisterToDashboard = true conf.Log.EnableSlowLog.Store(false) conf.TiKVClient.CoprReqTimeout = 1800 * time.Second }) return Glue{} } // Glue is an implementation of glue.Glue using a new TiDB session. type Glue struct { glue.StdIOGlue tikvGlue gluetikv.Glue } type tidbSession struct { se session.Session } // GetDomain implements glue.Glue. func (Glue) GetDomain(store kv.Storage) (*domain.Domain, error) { existDom, _ := session.GetDomain(nil) initStatsSe, err := session.CreateSession(store) if err != nil { return nil, errors.Trace(err) } se, err := session.CreateSession(store) if err != nil { return nil, errors.Trace(err) } dom, err := session.GetDomain(store) if err != nil { return nil, errors.Trace(err) } if existDom == nil { err = session.InitMDLVariable(store) if err != nil { return nil, err } // create stats handler for backup and restore. err = dom.UpdateTableStatsLoop(se, initStatsSe) if err != nil { return nil, errors.Trace(err) } } return dom, nil } // CreateSession implements glue.Glue. func (Glue) CreateSession(store kv.Storage) (glue.Session, error) { se, err := session.CreateSession(store) if err != nil { return nil, errors.Trace(err) } tiSession := &tidbSession{ se: se, } return tiSession, nil } // Open implements glue.Glue. func (g Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { return g.tikvGlue.Open(path, option) } // OwnsStorage implements glue.Glue. func (Glue) OwnsStorage() bool { return true } // StartProgress implements glue.Glue. func (g Glue) StartProgress(ctx context.Context, cmdName string, total int64, redirectLog bool) glue.Progress { return g.tikvGlue.StartProgress(ctx, cmdName, total, redirectLog) } // Record implements glue.Glue. func (g Glue) Record(name string, value uint64) { g.tikvGlue.Record(name, value) } // GetVersion implements glue.Glue. func (g Glue) GetVersion() string { return g.tikvGlue.GetVersion() } // UseOneShotSession implements glue.Glue. func (g Glue) UseOneShotSession(store kv.Storage, closeDomain bool, fn func(glue.Session) error) error { se, err := session.CreateSession(store) if err != nil { return errors.Trace(err) } glueSession := &tidbSession{ se: se, } defer func() { se.Close() log.Info("one shot session closed") }() // dom will be created during session.CreateSession. dom, err := session.GetDomain(store) if err != nil { return errors.Trace(err) } if err = session.InitMDLVariable(store); err != nil { return errors.Trace(err) } // because domain was created during the whole program exists. // and it will register br info to info syncer. // we'd better close it as soon as possible. if closeDomain { defer func() { dom.Close() log.Info("one shot domain closed") }() } err = fn(glueSession) if err != nil { return errors.Trace(err) } return nil } // GetSessionCtx implements glue.Glue func (gs *tidbSession) GetSessionCtx() sessionctx.Context { return gs.se } // Execute implements glue.Session. func (gs *tidbSession) Execute(ctx context.Context, sql string) error { return gs.ExecuteInternal(ctx, sql) } func (gs *tidbSession) ExecuteInternal(ctx context.Context, sql string, args ...interface{}) error { ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) rs, err := gs.se.ExecuteInternal(ctx, sql, args...) if err != nil { return errors.Trace(err) } defer func() { vars := gs.se.GetSessionVars() vars.TxnCtxMu.Lock() vars.TxnCtx.InfoSchema = nil vars.TxnCtxMu.Unlock() }() // Some of SQLs (like ADMIN RECOVER INDEX) may lazily take effect // when we are polling the result set. // At least call `next` once for triggering theirs side effect. // (Maybe we'd better drain all returned rows?) if rs != nil { //nolint: errcheck defer rs.Close() c := rs.NewChunk(nil) if err := rs.Next(ctx, c); err != nil { log.Warn("Error during draining result of internal sql.", logutil.Redact(zap.String("sql", sql)), logutil.ShortError(err)) return nil } } return nil } // CreateDatabase implements glue.Session. func (gs *tidbSession) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { d := domain.GetDomain(gs.se).DDL() query, err := gs.showCreateDatabase(schema) if err != nil { return errors.Trace(err) } gs.se.SetValue(sessionctx.QueryString, query) schema = schema.Clone() if len(schema.Charset) == 0 { schema.Charset = mysql.DefaultCharset } return d.CreateSchemaWithInfo(gs.se, schema, ddl.OnExistIgnore) } // CreatePlacementPolicy implements glue.Session. func (gs *tidbSession) CreatePlacementPolicy(ctx context.Context, policy *model.PolicyInfo) error { d := domain.GetDomain(gs.se).DDL() gs.se.SetValue(sessionctx.QueryString, gs.showCreatePlacementPolicy(policy)) // the default behaviour is ignoring duplicated policy during restore. return d.CreatePlacementPolicyWithInfo(gs.se, policy, ddl.OnExistIgnore) } // SplitBatchCreateTable provide a way to split batch into small batch when batch size is large than 6 MB. // The raft entry has limit size of 6 MB, a batch of CreateTables may hit this limitation // TODO: shall query string be set for each split batch create, it looks does not matter if we set once for all. func (gs *tidbSession) SplitBatchCreateTable(schema model.CIStr, infos []*model.TableInfo, cs ...ddl.CreateTableWithInfoConfigurier) error { var err error d := domain.GetDomain(gs.se).DDL() err = d.BatchCreateTableWithInfo(gs.se, schema, infos, append(cs, ddl.OnExistIgnore)...) if kv.ErrEntryTooLarge.Equal(err) { log.Info("entry too large, split batch create table", zap.Int("num table", len(infos))) if len(infos) == 1 { return err } mid := len(infos) / 2 err = gs.SplitBatchCreateTable(schema, infos[:mid], cs...) if err != nil { return err } err = gs.SplitBatchCreateTable(schema, infos[mid:], cs...) if err != nil { return err } return nil } return err } // CreateTables implements glue.BatchCreateTableSession. func (gs *tidbSession) CreateTables(_ context.Context, tables map[string][]*model.TableInfo, cs ...ddl.CreateTableWithInfoConfigurier) error { var dbName model.CIStr // Disable foreign key check when batch create tables. gs.se.GetSessionVars().ForeignKeyChecks = false for db, tablesInDB := range tables { dbName = model.NewCIStr(db) queryBuilder := strings.Builder{} cloneTables := make([]*model.TableInfo, 0, len(tablesInDB)) for _, table := range tablesInDB { query, err := gs.showCreateTable(table) if err != nil { return errors.Trace(err) } queryBuilder.WriteString(query) queryBuilder.WriteString(";") table = table.Clone() // Clone() does not clone partitions yet :( if table.Partition != nil { newPartition := *table.Partition newPartition.Definitions = append([]model.PartitionDefinition{}, table.Partition.Definitions...) table.Partition = &newPartition } cloneTables = append(cloneTables, table) } gs.se.SetValue(sessionctx.QueryString, queryBuilder.String()) if err := gs.SplitBatchCreateTable(dbName, cloneTables, cs...); err != nil { //It is possible to failure when TiDB does not support model.ActionCreateTables. //In this circumstance, BatchCreateTableWithInfo returns errno.ErrInvalidDDLJob, //we fall back to old way that creating table one by one log.Warn("batch create table from tidb failure", zap.Error(err)) return err } } return nil } // CreateTable implements glue.Session. func (gs *tidbSession) CreateTable(_ context.Context, dbName model.CIStr, table *model.TableInfo, cs ...ddl.CreateTableWithInfoConfigurier) error { d := domain.GetDomain(gs.se).DDL() query, err := gs.showCreateTable(table) if err != nil { return errors.Trace(err) } gs.se.SetValue(sessionctx.QueryString, query) // Disable foreign key check when batch create tables. gs.se.GetSessionVars().ForeignKeyChecks = false // Clone() does not clone partitions yet :( table = table.Clone() if table.Partition != nil { newPartition := *table.Partition newPartition.Definitions = append([]model.PartitionDefinition{}, table.Partition.Definitions...) table.Partition = &newPartition } return d.CreateTableWithInfo(gs.se, dbName, table, append(cs, ddl.OnExistIgnore)...) } // Close implements glue.Session. func (gs *tidbSession) Close() { gs.se.Close() } // GetGlobalVariables implements glue.Session. func (gs *tidbSession) GetGlobalVariable(name string) (string, error) { return gs.se.GetSessionVars().GlobalVarsAccessor.GetTiDBTableValue(name) } // showCreateTable shows the result of SHOW CREATE TABLE from a TableInfo. func (gs *tidbSession) showCreateTable(tbl *model.TableInfo) (string, error) { table := tbl.Clone() table.AutoIncID = 0 result := bytes.NewBuffer(make([]byte, 0, defaultCapOfCreateTable)) // this can never fail. _, _ = result.WriteString(brComment) if err := executor.ConstructResultOfShowCreateTable(gs.se, tbl, autoid.Allocators{}, result); err != nil { return "", errors.Trace(err) } return result.String(), nil } // showCreateDatabase shows the result of SHOW CREATE DATABASE from a dbInfo. func (gs *tidbSession) showCreateDatabase(db *model.DBInfo) (string, error) { result := bytes.NewBuffer(make([]byte, 0, defaultCapOfCreateDatabase)) // this can never fail. _, _ = result.WriteString(brComment) if err := executor.ConstructResultOfShowCreateDatabase(gs.se, db, true, result); err != nil { return "", errors.Trace(err) } return result.String(), nil } func (gs *tidbSession) showCreatePlacementPolicy(policy *model.PolicyInfo) string { return executor.ConstructResultOfShowCreatePlacementPolicy(policy) } // mockSession is used for test. type mockSession struct { se session.Session globalVars map[string]string } // GetSessionCtx implements glue.Glue func (s *mockSession) GetSessionCtx() sessionctx.Context { return s.se } // Execute implements glue.Session. func (s *mockSession) Execute(ctx context.Context, sql string) error { return s.ExecuteInternal(ctx, sql) } func (s *mockSession) ExecuteInternal(ctx context.Context, sql string, args ...interface{}) error { ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) rs, err := s.se.ExecuteInternal(ctx, sql, args...) if err != nil { return err } // Some of SQLs (like ADMIN RECOVER INDEX) may lazily take effect // when we are polling the result set. // At least call `next` once for triggering theirs side effect. // (Maybe we'd better drain all returned rows?) if rs != nil { //nolint: errcheck defer rs.Close() c := rs.NewChunk(nil) if err := rs.Next(ctx, c); err != nil { return nil } } return nil } // CreateDatabase implements glue.Session. func (*mockSession) CreateDatabase(_ context.Context, _ *model.DBInfo) error { log.Fatal("unimplemented CreateDatabase for mock session") return nil } // CreatePlacementPolicy implements glue.Session. func (*mockSession) CreatePlacementPolicy(_ context.Context, _ *model.PolicyInfo) error { log.Fatal("unimplemented CreateDatabase for mock session") return nil } // CreateTables implements glue.BatchCreateTableSession. func (*mockSession) CreateTables(_ context.Context, _ map[string][]*model.TableInfo, _ ...ddl.CreateTableWithInfoConfigurier) error { log.Fatal("unimplemented CreateDatabase for mock session") return nil } // CreateTable implements glue.Session. func (*mockSession) CreateTable(_ context.Context, _ model.CIStr, _ *model.TableInfo, _ ...ddl.CreateTableWithInfoConfigurier) error { log.Fatal("unimplemented CreateDatabase for mock session") return nil } // Close implements glue.Session. func (s *mockSession) Close() { s.se.Close() } // GetGlobalVariables implements glue.Session. func (s *mockSession) GetGlobalVariable(name string) (string, error) { if ret, ok := s.globalVars[name]; ok { return ret, nil } return "True", nil } // MockGlue only used for test type MockGlue struct { se session.Session GlobalVars map[string]string } func (m *MockGlue) SetSession(se session.Session) { m.se = se } // GetDomain implements glue.Glue. func (*MockGlue) GetDomain(store kv.Storage) (*domain.Domain, error) { return nil, nil } // CreateSession implements glue.Glue. func (m *MockGlue) CreateSession(store kv.Storage) (glue.Session, error) { glueSession := &mockSession{ se: m.se, globalVars: m.GlobalVars, } return glueSession, nil } // Open implements glue.Glue. func (*MockGlue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { return nil, nil } // OwnsStorage implements glue.Glue. func (*MockGlue) OwnsStorage() bool { return true } // StartProgress implements glue.Glue. func (*MockGlue) StartProgress(ctx context.Context, cmdName string, total int64, redirectLog bool) glue.Progress { return nil } // Record implements glue.Glue. func (*MockGlue) Record(name string, value uint64) { } // GetVersion implements glue.Glue. func (*MockGlue) GetVersion() string { return "mock glue" } // UseOneShotSession implements glue.Glue. func (m *MockGlue) UseOneShotSession(store kv.Storage, closeDomain bool, fn func(glue.Session) error) error { glueSession := &mockSession{ se: m.se, } return fn(glueSession) }
package runner // This file defines an interface for task queues used by the runner import ( "context" "os" "regexp" "strings" "time" "github.com/go-stack/stack" "github.com/karlmutch/errors" ) // convert types take an int and return a string value. type MsgHandler func(ctx context.Context, project string, subscription string, credentials string, data []byte) (resource *Resource, ack bool) type TaskQueue interface { // Refresh is used to scan the catalog of queues work could arrive on and pass them back to the caller Refresh(qNameMatch *regexp.Regexp, timeout time.Duration) (known map[string]interface{}, err errors.Error) // Process a unit of work after it arrives on a queue Work(ctx context.Context, qTimeout time.Duration, subscription string, handler MsgHandler) (msgs uint64, resource *Resource, err errors.Error) // Check that the specified queue exists Exists(ctx context.Context, subscription string) (exists bool, err errors.Error) } // NewTaskQueue is used to initiate processing for any of the types of queues // the runner supports. It also performs some lazy initialization. // func NewTaskQueue(project string, creds string) (tq TaskQueue, err errors.Error) { // The Google creds will come down as .json files, AWS will be a number of credential and config file names switch { case strings.HasSuffix(creds, ".json"): return NewPubSub(project, creds) case strings.HasPrefix(project, "amqp://"): return NewRabbitMQ(project, creds) default: files := strings.Split(creds, ",") for _, file := range files { _, errGo := os.Stat(file) if errGo != nil { return nil, errors.Wrap(errGo).With("stack", stack.Trace().TrimRuntime()).With("file", file).With("project", project) } } return NewSQS(project, creds) } }
package RegularExpressions import ( "fmt" "testing" ) func TestTask4(t *testing.T) { // syntax abstract tree of this regular expression: (a(|b))* regularTree := Repeat{Pattern: Concatenate{ Left: Literal{Character: 'a'}, Right: Choose{ Left: Empty{}, Right: Literal{Character: 'b'}, }, }} handler := Handler{} fmt.Println(handler.Matches(regularTree, "")) //true fmt.Println(handler.Matches(regularTree, "a")) //true fmt.Println(handler.Matches(regularTree, "ab")) //true fmt.Println(handler.Matches(regularTree, "aba")) //true fmt.Println(handler.Matches(regularTree, "abab")) //true fmt.Println(handler.Matches(regularTree, "abaab")) //true fmt.Println(handler.Matches(regularTree, "abba")) //false fmt.Println(handler.Matches(regularTree, "ba")) //false }
package main import ( "net" "time" ) func main() { conn := getConn() //建立连接 for { _, err := conn.Write([]byte("Hello World!")) //向服务端发数据 if err != nil { //发送数据出错就重新建立连接 conn = getConn() } time.Sleep(10 * time.Second) //睡眠10秒 } } func getConn() net.Conn { for { conn, err := net.Dial("tcp", "23.106.153.177:1234") //建立连接 if err != nil { //出错就重新建立连接 time.Sleep(1 * time.Second) continue } return conn } }
package ginja import ( "errors" "os" "testing" . "github.com/smartystreets/goconvey/convey" ) func TestError(t *testing.T) { Convey("Error implements Error interface", t, func() { err := Error{Title: "Test error"} So(err, ShouldImplement, (*error)(nil)) So(err.Error(), ShouldNotBeBlank) So(err.Error(), ShouldHaveSameTypeAs, "test") So(err.Error(), ShouldEqual, "Test error") }) } type _testStringer struct{} func (_ _testStringer) String() string { return "Stringer error" } func TestNewError(t *testing.T) { Convey("Error handles different sources", t, func() { err := NewError("Test error") So(err.Error(), ShouldEqual, "Test error") err = NewError(errors.New("Test error")) So(err.Error(), ShouldEqual, "Test error") err = NewError(os.ErrInvalid) So(err.Error(), ShouldEqual, "invalid argument") err = NewError(_testStringer{}) So(err.Error(), ShouldEqual, "Stringer error") err = NewError(struct{}{}) So(err.Error(), ShouldEqual, "Unknown error occurred") }) }
package controllers import ( "alta-store/lib/database" "alta-store/models" "net/http" "strconv" "github.com/labstack/echo" ) func GetProductsController(c echo.Context) error { products, err := database.GetProducts() if err != nil { return echo.NewHTTPError(http.StatusBadRequest, err.Error()) } return c.JSON(http.StatusOK, map[string]interface{}{ "status": "success", "users": products, }) } func GetProductController(c echo.Context) error { id, err := strconv.Atoi(c.Param("id")) if err != nil { return c.JSON(http.StatusBadRequest, map[string]interface{}{ "status": "invalid id", }) } product, err := database.GetProduct(id) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, err.Error) } return c.JSON(http.StatusOK, map[string]interface{}{ "status": "success", "users": product, }) } func CreateProductController(c echo.Context) error { product, err := database.CreateProduct(c) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, err.Error()) } return c.JSON(http.StatusOK, map[string]interface{}{ "status": "success", "users": product, }) } func UpdateProductController(c echo.Context) error { var product models.Product id, err := strconv.Atoi(c.Param("id")) if err != nil { return c.JSON(http.StatusBadRequest, map[string]interface{}{ "status": "invalid id", }) } c.Bind(&product) update_product, err := database.UpdateProduct(id, product) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, err.Error()) } return c.JSON(http.StatusOK, map[string]interface{}{ "status": "success", "users": update_product, }) } func DeleteProductController(c echo.Context) error { id, err := strconv.Atoi(c.Param("id")) if err != nil { return c.JSON(http.StatusBadRequest, map[string]interface{}{ "status": "invalid id", }) } product, err := database.DeleteProduct(id) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, err.Error()) } return c.JSON(http.StatusOK, map[string]interface{}{ "status": "success", "data": product, }) }
// Package handlers contains the request handler functions. package handlers import "github.com/spazbite187/sensornet/app" // Data ... type Data struct { Data *app.Data }
package rsync import ( "github.com/cpusoft/goutil/belogs" "github.com/cpusoft/goutil/ginserver" "github.com/gin-gonic/gin" model "rpstir2-model" ) // start to rsync from sync func RsyncRequest(c *gin.Context) { belogs.Debug("RsyncRequest(): start") syncUrls := model.SyncUrls{} err := c.ShouldBindJSON(&syncUrls) belogs.Info("RsyncRequest(): syncUrls:", syncUrls, err) if err != nil { belogs.Error("RsyncRequest(): ShouldBindJSON:", err) ginserver.ResponseFail(c, err, nil) return } go rsyncRequest(&syncUrls) ginserver.ResponseOk(c, nil) } // start local rsync func LocalRsyncRequest(c *gin.Context) { belogs.Debug("LocalRsyncRequest(): start") syncUrls := model.SyncUrls{} err := c.ShouldBindJSON(&syncUrls) belogs.Info("LocalRsyncRequest(): syncUrls:", syncUrls, err) if err != nil { belogs.Error("LocalRsyncRequest(): ShouldBindJSON:", err) ginserver.ResponseFail(c, err, nil) return } rsyncResult, err := localRsyncRequest(&syncUrls) if err != nil { belogs.Error("LocalRsyncRequest(): localRsyncRequest:", err) ginserver.ResponseFail(c, err, nil) return } belogs.Debug("LocalRsyncRequest(): rsyncResult:", rsyncResult) ginserver.ResponseOk(c, rsyncResult) }
package nsnet import ( "fmt" "net" "time" ) // network interface check // waits for netsetgo to creates tunneling between host and container func WaitForNetwork() error { maxAttempt := 3 checkInterval := time.Second for i := 0; i < maxAttempt; i++ { interfaces, err := net.Interfaces() if err != nil { return err } if len(interfaces) > 1 { return nil } time.Sleep(checkInterval) } return fmt.Errorf("too much attempts of waiting for network") }
//+build linux,arm package main import ( "fmt" "time" "github.com/zyxar/berry/core" "github.com/zyxar/berry/device/ds1307" ) var ( clock *ds1307.Clock addrid uint = 0x68 busid uint = 0x01 ) func initClock() (err error) { clock, err = ds1307.New(addrid, busid) return } func clockRoutine() { if clock != nil { var err error for { if now, err = clock.Get(); err != nil { printToLcd(fmt.Sprintf("err in read clock: %v", err)) } core.Delay(step) } } else { for { now = time.Now() core.Delay(step) } } }
package validation import "errors" // Verror is an error that occurs // during validation, we can // return this to a user type Verror struct { error } // Payload is the value we // process type Payload struct { Name string `json:"name"` Age int `json:"age"` } // ValidatePayload is 1 implementation of // the closure in our controller func ValidatePayload(p *Payload) error { if p.Name == "" { return Verror{errors.New("name is required")} } if p.Age <= 0 || p.Age >= 120 { return Verror{errors.New("age is required and must be a value greater than 0 and less than 120")} } return nil }
package runners import ( "errors" "fmt" "time" "github.com/hyperpilotio/go-utils/log" "github.com/hyperpilotio/workload-profiler/clients" "github.com/hyperpilotio/workload-profiler/db" "github.com/hyperpilotio/workload-profiler/jobs" "github.com/hyperpilotio/workload-profiler/models" ) type ProfileRun struct { Id string DeployerClient *clients.DeployerClient BenchmarkControllerClient *clients.BenchmarkControllerClient SlowCookerClient *clients.SlowCookerClient DeploymentId string MetricsDB *db.MetricsDB ApplicationConfig *models.ApplicationConfig ProfileLog *log.FileLog State string Created time.Time SkipUnreserveOnFailure bool DirectJob bool } func (run *ProfileRun) IsDirectJob() bool { return run.DirectJob } func (run *ProfileRun) GetId() string { return run.Id } func (run *ProfileRun) GetApplicationConfig() *models.ApplicationConfig { return run.ApplicationConfig } func (run *ProfileRun) GetLog() *log.FileLog { return run.ProfileLog } func (run *ProfileRun) GetState() string { return run.State } func (run *ProfileRun) SetState(state string) { run.State = state } func (run *ProfileRun) GetSummary() jobs.JobSummary { return jobs.JobSummary{ DeploymentId: run.DeploymentId, RunId: run.Id, Status: run.State, Create: run.Created, } } func (run *ProfileRun) GetJobDeploymentConfig() jobs.JobDeploymentConfig { return jobs.JobDeploymentConfig{} } func (run *ProfileRun) IsSkipUnreserveOnFailure() bool { return run.SkipUnreserveOnFailure } func (run *ProfileRun) GetColocatedAgentUrls(agent string, service string, placementHost string) ([]string, error) { var colocatedService string switch placementHost { case "loadtester": colocatedService = run.ApplicationConfig.LoadTester.Name case "service": colocatedService = service default: return nil, errors.New("Unknown placement host for benchmark agent: " + placementHost) } run.ProfileLog.Logger.Info("Getting %s url for colocated service %s from deployer client %+v", agent, colocatedService, *run.DeployerClient) agentUrls, err := run.DeployerClient.GetColocatedServiceUrls(run.DeploymentId, colocatedService, agent, run.ProfileLog.Logger) if err != nil { message := fmt.Sprintf( "Unable to get service %s url located next to %s: %s", agent, colocatedService, err.Error()) run.ProfileLog.Logger.Warningf(message) return nil, errors.New(message) } return agentUrls, nil } type ProfileResults struct { Id string StageResults []StageResult } type StageResult struct { Id string StartTime string EndTime string }
/* Copyright 2020 The Qmgo Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package operator // define the update operators // refer: https://docs.mongodb.com/manual/reference/operator/update/ const ( // Fields CurrentDate = "$currentDate" Inc = "$inc" Min = "$min" Max = "$max" Mul = "$mul" Rename = "$rename" Set = "$set" SetOnInsert = "$setOnInsert" Unset = "$unset" // Array Operators AddToSet = "$addToSet" Pop = "$pop" Pull = "$pull" Push = "$push" PullAll = "$pullAll" // Array modifiers Each = "$each" Position = "$position" Sort = "$sort" // Array bitwise Bit = "$bit" )
package trie import ( "testing" "github.com/google/btree" "github.com/openacid/low/mathext/zipf" ) type KVElt struct { Key string Val int32 } func (kv *KVElt) Less(than btree.Item) bool { o := than.(*KVElt) return kv.Key < o.Key } func makeKVElts(srcKeys []string, srcVals []int32) []*KVElt { elts := make([]*KVElt, len(srcKeys)) for i, k := range srcKeys { elts[i] = &KVElt{Key: k, Val: srcVals[i]} } return elts } var OutputBtree int func Benchmark_btree(b *testing.B) { benchBigKeySet(b, func(b *testing.B, typ string, keys []string) { values := makeI32s(len(keys)) bt := btree.New(32) elts := makeKVElts(keys, values) for _, v := range elts { bt.ReplaceOrInsert(v) } accesses := zipf.Accesses(2, 1.5, len(keys), b.N, nil) b.ResetTimer() var id int32 for i := 0; i < b.N; i++ { idx := accesses[i] itm := &KVElt{Key: keys[idx], Val: values[idx]} ee := bt.Get(itm) id += ee.(*KVElt).Val } OutputBtree = int(id) }) }
package pain import ( "encoding/xml" "github.com/thought-machine/finance-messaging/iso20022" ) type Document00800101 struct { XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:pain.008.001.01 Document"` Message *CustomerDirectDebitInitiationV01 `xml:"pain.008.001.01"` } func (d *Document00800101) AddMessage() *CustomerDirectDebitInitiationV01 { d.Message = new(CustomerDirectDebitInitiationV01) return d.Message } // Scope // The CustomerDirectDebitInitiation message is sent by the initiating party to the forwarding agent or creditor agent. It is used to request single or bulk collection(s) of funds from one or various debtor's account(s) for a creditor. // Usage // The CustomerDirectDebitInitiation message can contain one or more direct debit instructions. // The message can be used in a direct or a relay scenario: // - In a direct scenario, the message is sent directly to the creditor agent. The creditor agent is the account servicer of the creditor. // - In a relay scenario, the message is sent to a forwarding agent. The forwarding agent acts as a concentrating financial institution. It will forward the CustomerDirectDebitInitiation message to the creditor agent. // The message can also be used by an initiating party that has authority to send the message on behalf of the creditor. This caters for example for the scenario of a payments factory initiating all payments on behalf of a large corporate. // The CustomerDirectDebitInitiation message can be used in domestic and cross-border scenarios. // The CustomerDirectDebitInitiation may or may not contain mandate related information, i.e. extracts from a mandate, such as MandateIdentification or DateOfSignature. The CustomerDirectDebitInitiation message must not be considered as a mandate. // The CustomerDirectDebitInitiation message must not be used by the creditor agent to execute the direct debit instruction(s). The FIToFICustomerDirectDebit message must be used instead. // If it is agreed to include the payment information related to the credit side only once (i.e. Grouped mode), the PaymentInformation block will be present only once. If it is agreed to repeat the payment information related to the credit side (i.e. Single mode), the PaymentInformation block must be present once per occurrence of the DirectDebitTransactionInformation block. The CustomerDirectDebitInitiation message also allows for a Mixed mode where the PaymentInformation block can be repeated and each PaymentInformation block can contain one or several DirectDebitTransactionInformation block(s). // Single // When grouping is set to Single, information for each individual instruction is included separately. This means the // PaymentInformation block is repeated, and present for each occurrence of the Direct Debit TransactionInformation block. // Grouped // When grouping is set to Grouped, the PaymentInformation block will be present once and the Direct Debit // TransactionInformation block will be repeated. // Mixed // When grouping is set to Mixed, the PaymentInformation block may be present once or may be repeated. Each sequence // of the PaymentInformation block may contain one or several Direct Debit TransactionInformation block(s). type CustomerDirectDebitInitiationV01 struct { // Set of characteristics shared by all individual transactions included in the message. GroupHeader *iso20022.GroupHeader1 `xml:"GrpHdr"` // Set of characteristics that apply to the credit side of the payment transactions included in the direct debit transaction initiation. PaymentInformation []*iso20022.PaymentInstructionInformation2 `xml:"PmtInf"` } func (c *CustomerDirectDebitInitiationV01) AddGroupHeader() *iso20022.GroupHeader1 { c.GroupHeader = new(iso20022.GroupHeader1) return c.GroupHeader } func (c *CustomerDirectDebitInitiationV01) AddPaymentInformation() *iso20022.PaymentInstructionInformation2 { newValue := new(iso20022.PaymentInstructionInformation2) c.PaymentInformation = append(c.PaymentInformation, newValue) return newValue }
package gannettApi import ( "fmt" "net/url" ) // Use for querying for the list of articles var GannettApiSearchRoot = "http://api.gannett-cdn.com/prod/Search/v4/assets/proxy" // Use for getting the article content var GannettApiPresentationRoot = "http://api.gannett-cdn.com/presentation/v4/assets" /* Get default query param values */ func GetDefaultSearchValues(siteCode string, gannettSearchAPIKey string) url.Values { defaultValues := url.Values{} defaultValues.Set("q", "statusname:published") defaultValues.Set("fq", fmt.Sprintf("sitecode:%s", siteCode)) defaultValues.Set("sc", siteCode) defaultValues.Set("apiKey", "newsfetch") defaultValues.Set("format", "json") defaultValues.Set("rows", "100") defaultValues.Set("api_key", gannettSearchAPIKey) return defaultValues }
package utils import ( "KServer/library/kiface/iutils" "encoding/json" "github.com/golang/protobuf/proto" ) type ByteTool struct { *Protobuf Data []byte } func NewIByte() iutils.IByte { return &ByteTool{} } func (b *ByteTool) ProtoBuf(value proto.Message) error { return b.Protobuf.Decode(b.Data, value) } func (b *ByteTool) String() string { return string(b.Data) } func (b *ByteTool) Json(value interface{}) error { return json.Unmarshal(b.Data, value) } func (b *ByteTool) Bytes() []byte { return b.Data } func (b *ByteTool) SetData(data []byte) { b.Data = data }
package main import ( "html/template" "net/http" "github.com/satori/go.uuid" ) var t *template.Template func init(){ t=template.Must(template.ParseFiles("96files.gohtml")) } func main() { http.HandleFunc("/",index) http.Handle("/favicon.ico",http.NotFoundHandler()) http.ListenAndServe(":8080",nil) } func index(w http.ResponseWriter,r *http.Request){ c:=getcookie(w,r) t.ExecuteTemplate(w,"96files.gohtml",c.Value) } func getcookie(w http.ResponseWriter,r *http.Request) *http.Cookie{ c,err:=r.Cookie("photo-cookie") if err!=nil{ id:=uuid.NewV4() c=&http.Cookie{ Name:"photo-cookie", Value:id.String(), } http.SetCookie(w,c) } return c }
package server import ( "encoding/json" "errors" "fmt" "github.com/idena-network/idena-indexer/log" "net/http" "strconv" ) type Response struct { Result interface{} `json:"result,omitempty"` Error *RespError `json:"error,omitempty"` } type RespError struct { Message string `json:"message"` } func WriteErrorResponse(w http.ResponseWriter, err error, logger log.Logger) { WriteResponse(w, nil, err, logger) } func WriteResponse(w http.ResponseWriter, result interface{}, err error, logger log.Logger) { w.Header().Set("Content-Type", "application/json") err = json.NewEncoder(w).Encode(getResponse(result, err)) if err != nil { logger.Error(fmt.Sprintf("Unable to write API response: %v", err)) return } } func getResponse(result interface{}, err error) Response { if err != nil { return getErrorResponse(err) } return Response{ Result: result, } } func getErrorResponse(err error) Response { return getErrorMsgResponse(err.Error()) } func getErrorMsgResponse(errMsg string) Response { return Response{ Error: &RespError{ Message: errMsg, }, } } func ToUint(vars map[string]string, name string) (uint64, error) { value, err := strconv.ParseUint(vars[name], 10, 64) if err != nil { return 0, errors.New(fmt.Sprintf("wrong value %s=%v", name, vars[name])) } return value, nil } func ReadPaginatorParams(vars map[string]string) (uint64, uint64, error) { startIndex, err := ToUint(vars, "skip") if err != nil { return 0, 0, err } count, err := ToUint(vars, "limit") if err != nil { return 0, 0, err } return startIndex, count, nil }
// Copyright © 2017 NAME HERE <EMAIL ADDRESS> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "errors" "fmt" "os" "path/filepath" "sync" "github.com/gosuri/uiprogress" "github.com/spf13/cobra" "net" "time" "github.com/dutchcoders/goftp" "github.com/mythay/anet" ) var fUser, fPasswd, fDestdir, fLocalfile string var progress = uiprogress.New() // putCmd represents the put command var putCmd = &cobra.Command{ Use: "put [ip]...", Short: "upload file to ftp server", Long: `upload file to multiple ftp server at the same time. For example: aftp put 192.168.1.1 192.168.1.2`, RunE: func(cmd *cobra.Command, args []string) error { // TODO: Work your own magic here nargs := len(args) if nargs == 0 { return errors.New("at least one ip is needed") } ips := make([]net.IP, 0, 255) for _, arg := range args { subips, err := anet.ParseIPRange(arg) if err != nil { return err } ips = append(ips, subips...) } nips := len(ips) if _, err := os.Stat(fLocalfile); err != nil { return fmt.Errorf("local file '%s' not exist", fLocalfile) } destfile := filepath.Base(fLocalfile) tsk := &batchUpload{user: fUser, passwd: fPasswd, destdir: fDestdir, localfile: fLocalfile, destfile: destfile, verbose: true} progress.Start() wg := sync.WaitGroup{} wg.Add(nips) errmsg := make(chan error, nips) for _, ip := range ips { go func(nip net.IP) { var err error defer wg.Done() err = tsk.upload(nip) if err != nil { errmsg <- err } }(ip) } wg.Wait() progress.Stop() errcount := 0 close(errmsg) for msg := range errmsg { fmt.Println(msg) errcount++ } fmt.Printf("\nSTATISTIC: %d/%d success\n", nips-errcount, nips) return nil }, } func init() { RootCmd.AddCommand(putCmd) putCmd.Flags().StringVarP(&fUser, "username", "u", "pcfactory", "A valid ftp user name") putCmd.Flags().StringVarP(&fPasswd, "password", "p", "pcfactory", "Correspoding ftp password") putCmd.Flags().StringVarP(&fDestdir, "directory", "d", "/fw", "Ftp server directory to store the file") putCmd.Flags().StringVarP(&fLocalfile, "localfile", "l", "App2.out", "local file path to be uploaded") progress.Width = 40 } type batchUpload struct { user string passwd string destdir string localfile string destfile string verbose bool } type pFile struct { *os.File offset int bar *uiprogress.Bar } func newPFile(fpath string) (*pFile, error) { f, err := os.Open(fpath) if err != nil { return nil, err } size := fileSize(fpath) bar := progress.AddBar(size).AppendCompleted().PrependElapsed() return &pFile{f, 0, bar}, nil } func (f *pFile) Read(b []byte) (n int, err error) { n, err = f.File.Read(b) f.offset += n f.bar.Set(f.offset - 1) f.bar.Incr() return n, err } func fileSize(path string) int { fileInfo, err := os.Stat(path) if err != nil { return 0 } fileSize := fileInfo.Size() //获取size return int(fileSize) } func (batch *batchUpload) upload(destip net.IP) error { var err error iscomplete := false connstr := fmt.Sprintf("%s:21", destip.String()) // because ftp lib has no time, try to connect it just for test try, err := net.DialTimeout("tcp", connstr, time.Second*1) if err != nil { return err } try.Close() f, err := newPFile(batch.localfile) if err != nil { return err } defer f.Close() f.bar.PrependFunc(func(b *uiprogress.Bar) string { if err != nil { return fmt.Sprintf("%s :ERROR", destip) } if iscomplete { return fmt.Sprintf("%s :DONE ", destip) } return fmt.Sprintf("%s : ", destip) }) defer f.bar.Incr() ftp, err := goftp.Connect(fmt.Sprintf("%s:21", destip)) if err != nil { return err } defer ftp.Quit() if err = ftp.Login(batch.user, batch.passwd); err != nil { return err } if err = ftp.Cwd(batch.destdir); err != nil { return err } if err = ftp.Stor(batch.destfile, f); err != nil { return err } iscomplete = true return nil }
package phases import ( "mobingi/ocean/pkg/config" configstorage "mobingi/ocean/pkg/storage" ) func Init(cfg *config.Config) (configstorage.Cluster, error) { storage := configstorage.NewStorage() err := storage.Init(cfg) if err != nil { return nil, err } return storage, nil }
package client import ( "net/http" "github.com/go-osin/session" ) // TODO: deprecated with cookie const ( SessKeyUser = "user" SessKeyToken = "token" ) var ( SessionIDCookieName = "_sess" ) func init() { SetupSessionStore(session.NewInMemStore()) } func SetupSessionStore(store session.Store) { session.Global.Close() session.Global = session.NewCookieManagerOptions(store, &session.CookieMngrOptions{ SessIDCookieName: SessionIDCookieName, AllowHTTP: true, }) } func SessionLoad(r *http.Request) session.Session { sess := session.Global.Load(r) if sess == nil { sess = session.NewSession() } return sess } func SessionSave(sess session.Session, w http.ResponseWriter) { session.Global.Save(sess, w) } func UserFromSession(sess session.Session) (u *User, ok bool) { u, ok = sess.Get(SessKeyUser).(*User) return }
package reporting import ( "fmt" "log" "time" "github.com/streadway/amqp" ) type Publisher struct { channel *amqp.Channel connexion *amqp.Connection exchangeName string routingKey string } func (pub *Publisher) Init(params map[string]string) { var err error pub.connexion, err = amqp.Dial(params["url"]) failOnError(err, "Failed to connect to RabbitMQ") pub.channel, err = pub.connexion.Channel() failOnError(err, "Failed to open a channel") pub.exchangeName = params["exchange_name"] pub.routingKey = params["routing_key"] err = pub.channel.ExchangeDeclare( pub.exchangeName, // name "topic", // type true, // durable false, // auto-deleted false, // internal false, // no-wait nil, // arguments ) failOnError(err, "Failed to declare an exchange") } func (pub *Publisher) Close() { pub.connexion.Close() pub.channel.Close() } func (pub *Publisher) Send(report Report) { marshal, err := report.Encode() if err != nil { fmt.Println("Error encoding report", err.Error()) return } pub.channel.Publish( pub.exchangeName, // exchange pub.routingKey, // routing key false, // mandatory false, // immediate amqp.Publishing{ ContentType: "application/json", Body: []byte(marshal), }) } func failOnError(err error, msg string) { if err != nil { log.Fatalf("%s: %s", msg, err) time.Sleep(20 * time.Second) panic(fmt.Sprintf("%s: %s", msg, err)) } }
package Plugins import ( "../Misc" "../Parse" "fmt" "github.com/go-redis/redis" "strings" "sync" "time" ) func Redis(info Misc.HostInfo, ch chan int, wg *sync.WaitGroup) { ip := fmt.Sprintf("%s:%d", info.Host, info.Port) client := redis.NewClient(&redis.Options{ Addr: ip, Password: info.Password, DB: 0, }) pong, err := client.Ping().Result() if err != nil && !strings.Contains(pong, "PONG") && info.ErrShow { info.PrintFail() } else if err == nil && strings.Contains(pong, "PONG") { success += 1 client.Close() info.PrintSuccess() if info.Output != "" { info.OutputTXT() } } wg.Done() <-ch } func RedisConn(info *Misc.HostInfo, ch chan int) { var hosts, passwords []string var err error var wg = sync.WaitGroup{} stime := time.Now() if info.Ports == "" { info.Port = REDISPORT } else { p, _ := Parse.ParsePort(info.Ports) info.Port = p[0] } hosts, err = Parse.ParseIP(info.Host) Misc.CheckErr(err) passwords, err = Parse.ParsePass(info) Misc.CheckErr(err) wg.Add(len(hosts) * len(passwords)) Misc.InfoPrinter.Println("Total length", len(hosts)*len(passwords)) for _, host := range hosts { for _, pass := range passwords { info.Host = host info.Password = pass go Redis(*info, ch, &wg) ch <- 1 } } wg.Wait() end := time.Since(stime) Misc.InfoPrinter.Println("All Done") Misc.InfoPrinter.Println("Number of successes:", success) Misc.InfoPrinter.Println("Time consumed:", end) }
package minnow import ( "fmt" "log" "os" "sync" "time" ) type ProcessorRegistry struct { definitionsPath Path processorPools map[ProcessorId]*ProcessorPool mutex *sync.RWMutex logger *log.Logger } func NewProcessorRegistry(definitionsPath Path) (*ProcessorRegistry, error) { processorPools := make(map[ProcessorId]*ProcessorPool) mutex := new(sync.RWMutex) logger := log.New(os.Stdout, "ProcessorRegistry: ", 0) registry := &ProcessorRegistry{definitionsPath, processorPools, mutex, logger} err := registry.BuildProcessorMap() if err != nil { return nil, err } return registry, nil } func (registry *ProcessorRegistry) Run() { for range time.Tick(time.Duration(5) * time.Minute) { err := registry.BuildProcessorMap() if err != nil { registry.logger.Print(err.Error()) } } } func (registry *ProcessorRegistry) BuildProcessorMap() error { definitionPaths, err := registry.definitionsPath.Glob("*") if err != nil { return err } // Filter out any non-directories definitionDirPaths := make([]Path, 0) for _, definitionPath := range definitionPaths { if definitionPath.IsDir() { definitionDirPaths = append(definitionDirPaths, definitionPath) } } processorPools := make(map[ProcessorId]*ProcessorPool) for _, definitionDirPath := range definitionDirPaths { processor, err := NewProcessor(definitionDirPath) if err != nil { registry.logger.Print(err.Error()) continue } processorPool := NewProcessorPool(processor, processor.GetPoolSize()) processorPools[processor.GetId()] = processorPool registry.logger.Printf("Registered processor %s with pool_size %d", processor.GetId(), processor.GetPoolSize()) } registry.mutex.Lock() defer registry.mutex.Unlock() // stop old ProcessorPool queues so they don't get more data for _, processorPool := range registry.processorPools { processorPool.Stop() } registry.processorPools = processorPools if len(processorPools) == 0 { return fmt.Errorf("No processors found in %s", registry.definitionsPath) } return nil } func (registry *ProcessorRegistry) MatchingProcessorIds(metadata Properties) []ProcessorId { registry.mutex.RLock() defer registry.mutex.RUnlock() matchingProcessorIds := make([]ProcessorId, 0) for _, processorPool := range registry.processorPools { if processorPool.ProcessorHookMatches(metadata) { matchingProcessorIds = append(matchingProcessorIds, processorPool.GetProcessorId()) } } return matchingProcessorIds } func (registry *ProcessorRegistry) SendToProcessorId(processorId ProcessorId, runRequest RunRequest) error { registry.mutex.RLock() defer registry.mutex.RUnlock() if processorPool, found := registry.processorPools[processorId]; found { processorPool.Run(runRequest) return nil } return fmt.Errorf("Could not send RunRequest to ProcessorId %s", processorId) } func (registry *ProcessorRegistry) ProcessorNameForId(processorId ProcessorId) (string, error) { registry.mutex.RLock() defer registry.mutex.RUnlock() if processorPool, found := registry.processorPools[processorId]; found { return processorPool.GetProcessorName(), nil } return "", fmt.Errorf("Could not find name for ProcessorId %s", processorId) }
package main import "flag" // 标准参数 var ( // 显示版本号 paramVersion = flag.Bool("version", false, "Show version") // 工作模式 paramMode = flag.String("mode", "v2", "v2") // 并发导出,提高导出速度, 输出日志会混乱 paramPara = flag.Bool("para", false, "parallel export by your cpu count") paramLanguage = flag.String("lan", "en_us", "set output language") paramPath = flag.String("path", "Config", "path of files") paramClientOut = flag.String("c", "Client", "path of client lua") paramServerOut = flag.String("s", "Server", "path of server out") ) // 文件类型导出 var ( paramPackageName = flag.String("package", "", "override the package name in table @Types") paramCombineStructName = flag.String("combinename", "Table", "combine struct name, code struct flagstr") paramProtoOut = flag.String("proto_out", "", "output protobuf define (*.proto)") paramPbtOut = flag.String("pbt_out", "", "output proto text format (*.pbt)") paramLuaOut = flag.String("lua_out", "", "output lua code (*.lua)") paramJsonOut = flag.String("json_out", "", "output json format (*.json)") paramCSharpOut = flag.String("csharp_out", "", "output c# class and deserialize code (*.cs)") paramGoOut = flag.String("go_out", "", "output golang code (*.go)") paramBinaryOut = flag.String("binary_out", "", "output binary format(*.bin)") paramTypeOut = flag.String("type_out", "", "output table types(*.json)") paramCppOut = flag.String("cpp_out", "", "output c++ format (*.cpp)") )
package errors import "net/http" func ResourceNotFound(err error, w http.ResponseWriter) { if err != nil { w.WriteHeader(http.StatusNotFound) return } } func InternalServerError(err error, w http.ResponseWriter) { if err != nil { w.WriteHeader(http.StatusInternalServerError) return } }
package device // #cgo CFLAGS: -g -Wall // #cgo LDFLAGS: -lSoapySDR // #include <stdlib.h> // #include <stddef.h> // #include <SoapySDR/Device.h> // #include <SoapySDR/Formats.h> // #include <SoapySDR/Types.h> import "C" import "unsafe" // ListSensors gets a list of the available global readable sensors. // // Return a list of available sensor string names func (dev *SDRDevice) ListSensors() []string { length := C.size_t(0) info := C.SoapySDRDevice_listSensors(dev.device, &length) defer stringArrayClear(info, length) return stringArray2Go(info, length) } // GetSensorInfo gets meta-information about a sensor. // // Params: // - key: the ID name of an available sensor // // Return meta-information about a sensor func (dev *SDRDevice) GetSensorInfo(key string) SDRArgInfo { cKey := C.CString(key) defer C.free(unsafe.Pointer(cKey)) info := C.SoapySDRDevice_getSensorInfo(dev.device, cKey) defer argInfoClear(info) return argInfo2Go(&info) } // ReadSensor reads a global sensor given the name. The value returned is a string which can represent // a boolean ("true"/"false"), an integer, or float. // // Params: // - key: the ID name of an available sensor // // Return the current value of the sensor func (dev *SDRDevice) ReadSensor(key string) string { cKey := C.CString(key) defer C.free(unsafe.Pointer(cKey)) val := (*C.char)(C.SoapySDRDevice_readSensor(dev.device, cKey)) defer C.free(unsafe.Pointer(val)) return C.GoString(val) } // ListChannelSensors gets a list of the available channel readable sensors. // // Params: // - direction: the channel direction RX or TX // - channel: an available channel on the device // // Return a list of available sensor string names func (dev *SDRDevice) ListChannelSensors(direction Direction, channel uint) []string { length := C.size_t(0) info := C.SoapySDRDevice_listChannelSensors(dev.device, C.int(direction), C.size_t(channel), &length) defer stringArrayClear(info, length) return stringArray2Go(info, length) } // GetChannelSensorInfo gets meta-information about a channel sensor. // // Params: // - direction: the channel direction RX or TX // - channel: an available channel on the device // - key: the ID name of an available sensor // // Return meta-information about a sensor func (dev *SDRDevice) GetChannelSensorInfo(direction Direction, channel uint, key string) SDRArgInfo { cKey := C.CString(key) defer C.free(unsafe.Pointer(cKey)) info := C.SoapySDRDevice_getChannelSensorInfo(dev.device, C.int(direction), C.size_t(channel), cKey) defer argInfoClear(info) return argInfo2Go(&info) } // ReadChannelSensor reads a channel sensor given the name. The value returned is a string which can represent // a boolean ("true"/"false"), an integer, or float. // // Params: // - direction: the channel direction RX or TX // - channel: an available channel on the device // - key: the ID name of an available sensor // // Return the current value of the sensor func (dev *SDRDevice) ReadChannelSensor(direction Direction, channel uint, key string) string { cKey := C.CString(key) defer C.free(unsafe.Pointer(cKey)) val := (*C.char)(C.SoapySDRDevice_readChannelSensor(dev.device, C.int(direction), C.size_t(channel), cKey)) defer C.free(unsafe.Pointer(val)) return C.GoString(val) }
// +build i2c,!spi package main import ( _ "github.com/djthorpe/gopi-hw/sys/i2c" ) const ( MODULE_NAME = "sensors/bme680/i2c" )
package saucecloud import ( "archive/zip" "context" "os" "testing" "time" "github.com/jarcoal/httpmock" "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" "github.com/saucelabs/saucectl/internal/config" "github.com/saucelabs/saucectl/internal/cypress" "github.com/saucelabs/saucectl/internal/job" "github.com/saucelabs/saucectl/internal/mocks" ) func TestPreliminarySteps_Basic(t *testing.T) { runner := CypressRunner{Project: cypress.Project{Cypress: cypress.Cypress{Version: "5.6.2"}}} assert.Nil(t, runner.checkCypressVersion()) } func TestPreliminarySteps_NoCypressVersion(t *testing.T) { want := "missing cypress version. Check available versions here: https://docs.staging.saucelabs.net/testrunner-toolkit#supported-frameworks-and-browsers" runner := CypressRunner{} err := runner.checkCypressVersion() assert.NotNil(t, err) assert.Equal(t, want, err.Error()) } func TestRunSuite(t *testing.T) { httpmock.Activate() defer httpmock.DeactivateAndReset() // Fake JobStarter starter := mocks.FakeJobStarter{ StartJobFn: func(ctx context.Context, opts job.StartOptions) (jobID string, isRDC bool, err error) { return "fake-job-id", false, nil }, } reader := mocks.FakeJobReader{ PollJobFn: func(ctx context.Context, id string, interval time.Duration) (job.Job, error) { return job.Job{ID: id, Passed: true}, nil }, } writer := mocks.FakeJobWriter{ UploadAssetFn: func(jobID string, fileName string, contentType string, content []byte) error { return nil }, } runner := CypressRunner{ CloudRunner: CloudRunner{ JobStarter: &starter, JobReader: &reader, JobWriter: &writer, }, } opts := job.StartOptions{} j, skipped, err := runner.runJob(opts) assert.Nil(t, err) assert.False(t, skipped) assert.Equal(t, j.ID, "fake-job-id") } func TestRunSuites(t *testing.T) { httpmock.Activate() defer httpmock.DeactivateAndReset() // Fake JobStarter starter := mocks.FakeJobStarter{ StartJobFn: func(ctx context.Context, opts job.StartOptions) (jobID string, isRDC bool, err error) { return "fake-job-id", false, nil }, } reader := mocks.FakeJobReader{ PollJobFn: func(ctx context.Context, id string, interval time.Duration) (job.Job, error) { return job.Job{ID: id, Passed: true}, nil }, GetJobAssetFileNamesFn: func(ctx context.Context, jobID string) ([]string, error) { return []string{"file1", "file2"}, nil }, GetJobAssetFileContentFn: func(ctx context.Context, jobID, fileName string) ([]byte, error) { return []byte("file content"), nil }, } writer := mocks.FakeJobWriter{ UploadAssetFn: func(jobID string, fileName string, contentType string, content []byte) error { return nil }, } downloader := &mocks.FakeArifactDownloader{ DownloadArtifactFn: func(jobID string) { }, } ccyReader := mocks.CCYReader{ReadAllowedCCYfn: func(ctx context.Context) (int, error) { return 1, nil }} runner := CypressRunner{ CloudRunner: CloudRunner{ JobStarter: &starter, JobReader: &reader, JobWriter: &writer, CCYReader: ccyReader, ArtifactDownloader: downloader, }, Project: cypress.Project{ Suites: []cypress.Suite{ {Name: "dummy-suite"}, }, Sauce: config.SauceConfig{ Concurrency: 1, }, }, } ret := runner.runSuites("dummy-file-id") assert.True(t, ret) } func TestArchiveProject(t *testing.T) { os.Mkdir("./test-arch/", 0755) defer func() { os.RemoveAll("./test-arch/") }() runner := CypressRunner{ Project: cypress.Project{ RootDir: "../../tests/e2e/", Cypress: cypress.Cypress{ ConfigFile: "cypress.json", }, }, } wd, _ := os.Getwd() log.Info().Msg(wd) z, err := runner.archiveProject(runner.Project, "./test-arch/", runner.Project.RootDir, "") if err != nil { t.Fail() } zipFile, _ := os.Open(z) defer func() { zipFile.Close() }() zipInfo, _ := zipFile.Stat() zipStream, _ := zip.NewReader(zipFile, zipInfo.Size()) var cypressConfig *zip.File for _, f := range zipStream.File { if f.Name == "cypress.json" { cypressConfig = f break } } assert.NotNil(t, cypressConfig) rd, _ := cypressConfig.Open() b := make([]byte, 3) n, err := rd.Read(b) assert.Equal(t, 3, n) assert.Equal(t, []byte("{}\n"), b) } func TestUploadProject(t *testing.T) { uploader := &mocks.FakeProjectUploader{ UploadSuccess: true, } runner := CypressRunner{ CloudRunner: CloudRunner{ ProjectUploader: uploader, }, } id, err := runner.uploadProject("/my-dummy-project.zip", "project") assert.Equal(t, "fake-id", id) assert.Nil(t, err) uploader.UploadSuccess = false id, err = runner.uploadProject("/my-dummy-project.zip", "project") assert.Equal(t, "", id) assert.NotNil(t, err) } func TestRunProject(t *testing.T) { os.Mkdir("./test-arch/", 0755) httpmock.Activate() defer func() { os.RemoveAll("./test-arch/") httpmock.DeactivateAndReset() }() // Fake JobStarter starter := mocks.FakeJobStarter{ StartJobFn: func(ctx context.Context, opts job.StartOptions) (jobID string, isRDC bool, err error) { return "fake-job-id", false, nil }, } reader := mocks.FakeJobReader{ PollJobFn: func(ctx context.Context, id string, interval time.Duration) (job.Job, error) { return job.Job{ID: id, Passed: true}, nil }, GetJobAssetFileNamesFn: func(ctx context.Context, jobID string) ([]string, error) { return []string{"file1", "file2"}, nil }, GetJobAssetFileContentFn: func(ctx context.Context, jobID, fileName string) ([]byte, error) { return []byte("file content"), nil }, } writer := mocks.FakeJobWriter{ UploadAssetFn: func(jobID string, fileName string, contentType string, content []byte) error { return nil }, } downloader := mocks.FakeArifactDownloader{ DownloadArtifactFn: func(jobID string) {}, } ccyReader := mocks.CCYReader{ReadAllowedCCYfn: func(ctx context.Context) (int, error) { return 1, nil }} uploader := &mocks.FakeProjectUploader{ UploadSuccess: true, } runner := CypressRunner{ CloudRunner: CloudRunner{ JobStarter: &starter, JobReader: &reader, JobWriter: &writer, CCYReader: ccyReader, ProjectUploader: uploader, ArtifactDownloader: &downloader, }, Project: cypress.Project{ RootDir: ".", Cypress: cypress.Cypress{ Version: "5.6.0", ConfigFile: "../../tests/e2e/cypress.json", }, Suites: []cypress.Suite{ {Name: "dummy-suite"}, }, Sauce: config.SauceConfig{ Concurrency: 1, }, }, } cnt, err := runner.RunProject() assert.Nil(t, err) assert.Equal(t, cnt, 0) } func TestCypress_GetSuiteNames(t *testing.T) { runner := &CypressRunner{ Project: cypress.Project{ Suites: []cypress.Suite{ {Name: "suite1"}, {Name: "suite2"}, {Name: "suite3"}, }, }, } assert.Equal(t, "suite1, suite2, suite3", runner.getSuiteNames()) }
package main import ( "fmt" "google.golang.org/grpc" "log" "math" "mid/calc/calcpb" "net" ) type Server struct { calcpb.UnimplementedCalcServiceServer } func main() { l, err := net.Listen("tcp", "0.0.0.0:50051") if err != nil { log.Fatalf("Failed to listen:%v", err) } s := grpc.NewServer() calcpb.RegisterCalcServiceServer(s, &Server{}) log.Println("Server is running on port:50051") if err := s.Serve(l); err != nil { log.Fatalf("failed to serve:%v", err) } } func (s *Server) Calculate(req *calcpb.CalculateRequest, stream calcpb.CalcService_CalculateServer) error { fmt.Printf("GreetManyTimes function was invoked with %v \n", req) number := req.GetCalc().GetNumber() for number%2 == 0 { res := &calcpb.CalculateResponse{Res: 2} if err := stream.Send(res); err != nil { log.Fatalf("error while sending result: %v", err.Error()) } number/=2 } for i := 3; float64(i) <= math.Sqrt(float64(number)); i+=2 { for int(number) % i ==0 { res := &calcpb.CalculateResponse{Res: int64(i)} if err := stream.Send(res); err != nil { log.Fatalf("error while sending result: %v", err.Error()) } number/=int64(i) } } if number > 2{ res := &calcpb.CalculateResponse{Res: int64(number)} if err := stream.Send(res); err != nil { log.Fatalf("error while sending result: %v", err.Error()) } } return nil }
package apocalisp import ( "apocalisp/core" "fmt" "os" "path/filepath" "runtime/debug" "github.com/peterh/liner" ) func withLiner(handler func(*liner.State)) { state := liner.NewLiner() defer state.Close() state.SetCtrlCAborts(false) handler(state) } func Repl(eval func(*core.Type, *core.Environment) (*core.Type, error), parser core.Parser) { // decrease max stack size to make TCO-related tests useful debug.SetMaxStack(1 * 1024 * 1024) cwd, err := os.Getwd() if err != nil { fmt.Print("Error while calling 'os.Getwd()'.") os.Exit(1) } historyFilePath := filepath.Join(cwd, ".apocalisp_history") line := liner.NewLiner() defer line.Close() line.SetCtrlCAborts(true) // read/write history if f, err := os.Open(historyFilePath); err == nil { line.ReadHistory(f) f.Close() } defer func() { if f, err := os.Create(historyFilePath); err == nil { line.WriteHistory(f) f.Close() } }() // environment environment := DefaultEnvironment(parser, eval) argv := core.NewList() for i := range os.Args { if i > 1 { argv.Append(core.Type{String: &os.Args[i]}) } } environment.Set("*ARGV*", *argv) environment.Set("*host-language*", *core.NewString("apocalisp")) _, _ = Rep(`(def! not (fn* (a) (if a false true)))`, environment, eval, parser) _, _ = Rep(`(def! load-file (fn* (f) (eval (read-string (str "(do " (slurp f) "\nnil)")))))`, environment, eval, parser) _, _ = Rep(`(defmacro! cond (fn* (& xs) (if (> (count xs) 0) (list 'if (first xs) (if (> (count xs) 1) (nth xs 1) (throw "odd number of forms to cond")) (cons 'cond (rest (rest xs)))))))`, environment, eval, parser) if len(os.Args) >= 2 { _, _ = Rep(fmt.Sprintf(`(load-file "%s")`, os.Args[1]), environment, eval, parser) } else { _, _ = Rep(`(println (str "Mal [" *host-language* "]"))`, environment, eval, parser) for { if sexpr, err := line.Prompt("user> "); err == nil { line.AppendHistory(sexpr) if output, err := Rep(sexpr, environment, eval, parser); err == nil { if len(output) > 0 { fmt.Println(output) } } else { fmt.Println(err.Error()) } } else { fmt.Println("\nFarewell!") break } } } }
// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package grumpy import ( "bytes" "fmt" "reflect" "unicode" "unicode/utf8" ) var ( // UnicodeType is the object representing the Python 'unicode' type. UnicodeType = newBasisType("unicode", reflect.TypeOf(Unicode{}), toUnicodeUnsafe, BaseStringType) ) // Unicode represents Python 'unicode' objects. The string value is stored as // utf-32 data. type Unicode struct { Object value []rune } // NewUnicode returns a new Unicode holding the given string value. value is // assumed to be a valid utf-8 string. func NewUnicode(value string) *Unicode { return NewUnicodeFromRunes(bytes.Runes([]byte(value))) } // NewUnicodeFromRunes returns a new Unicode holding the given runes. func NewUnicodeFromRunes(value []rune) *Unicode { return &Unicode{Object{typ: UnicodeType}, value} } func toUnicodeUnsafe(o *Object) *Unicode { return (*Unicode)(o.toPointer()) } // Encode translates the runes in s into a str with the given encoding. // // NOTE: If s contains surrogates (e.g. U+D800), Encode will raise // UnicodeDecodeError consistent with CPython 3.x but different than 2.x. func (s *Unicode) Encode(f *Frame, encoding, errors string) (*Str, *BaseException) { // TODO: Support custom encodings and error handlers. normalized := normalizeEncoding(encoding) if normalized != "utf8" { return nil, f.RaiseType(LookupErrorType, fmt.Sprintf("unknown encoding: %s", encoding)) } buf := bytes.Buffer{} for i, r := range s.Value() { switch { case utf8.ValidRune(r): buf.WriteRune(r) case errors == EncodeIgnore: // Do nothing case errors == EncodeReplace: buf.WriteRune(unicode.ReplacementChar) case errors == EncodeStrict: format := "'%s' codec can't encode character %s in position %d" return nil, f.RaiseType(UnicodeEncodeErrorType, fmt.Sprintf(format, encoding, escapeRune(r), i)) default: format := "unknown error handler name '%s'" return nil, f.RaiseType(LookupErrorType, fmt.Sprintf(format, errors)) } } return NewStr(buf.String()), nil } // ToObject upcasts s to an Object. func (s *Unicode) ToObject() *Object { return &s.Object } // Value returns the underlying string value held by s. func (s *Unicode) Value() []rune { return s.value } func unicodeAdd(f *Frame, v, w *Object) (*Object, *BaseException) { unicodeV := toUnicodeUnsafe(v) unicodeW, raised := unicodeCoerce(f, w) if raised != nil { return nil, raised } lenV := len(unicodeV.Value()) newLen := lenV + len(unicodeW.Value()) if newLen < 0 { return nil, f.RaiseType(OverflowErrorType, errResultTooLarge) } value := make([]rune, newLen) copy(value, unicodeV.Value()) copy(value[lenV:], unicodeW.Value()) return NewUnicodeFromRunes(value).ToObject(), nil } func unicodeContains(f *Frame, o *Object, value *Object) (*Object, *BaseException) { lhs := toUnicodeUnsafe(o).Value() s, raised := unicodeCoerce(f, value) if raised != nil { return nil, raised } rhs := s.Value() lhsLen, rhsLen := len(lhs), len(rhs) maxOffset := lhsLen - rhsLen for offset := 0; offset <= maxOffset; offset++ { if runeSliceCmp(lhs[offset:offset+rhsLen], rhs) == 0 { return True.ToObject(), nil } } return False.ToObject(), nil } func unicodeEncode(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) { // TODO: Accept unicode for encoding and errors args. expectedTypes := []*Type{UnicodeType, StrType, StrType} argc := len(args) if argc >= 1 && argc < 3 { expectedTypes = expectedTypes[:argc] } if raised := checkMethodArgs(f, "encode", args, expectedTypes...); raised != nil { return nil, raised } encoding := EncodeDefault if argc > 1 { encoding = toStrUnsafe(args[1]).Value() } errors := EncodeStrict if argc > 2 { errors = toStrUnsafe(args[2]).Value() } ret, raised := toUnicodeUnsafe(args[0]).Encode(f, encoding, errors) if raised != nil { return nil, raised } return ret.ToObject(), nil } func unicodeEq(f *Frame, v, w *Object) (*Object, *BaseException) { return unicodeCompareEq(f, toUnicodeUnsafe(v), w, true) } func unicodeGE(f *Frame, v, w *Object) (*Object, *BaseException) { return unicodeCompare(f, toUnicodeUnsafe(v), w, False, True, True) } // unicodeGetItem returns a slice of string depending on whether index is an // integer or a slice. If index is neither of those types then a TypeError is // returned. func unicodeGetItem(f *Frame, o, key *Object) (*Object, *BaseException) { s := toUnicodeUnsafe(o).Value() switch { case key.typ.slots.Index != nil: index, raised := seqCheckedIndex(f, len(s), toIntUnsafe(key).Value()) if raised != nil { return nil, raised } return NewUnicodeFromRunes([]rune{s[index]}).ToObject(), nil case key.isInstance(SliceType): slice := toSliceUnsafe(key) start, stop, step, sliceLen, raised := slice.calcSlice(f, len(s)) if raised != nil { return nil, raised } if step == 1 { return NewUnicodeFromRunes(s[start:stop]).ToObject(), nil } result := make([]rune, 0, sliceLen) for j := start; j < stop; j += step { result = append(result, s[j]) } return NewUnicodeFromRunes([]rune(result)).ToObject(), nil } return nil, f.RaiseType(TypeErrorType, fmt.Sprintf("unicode indices must be integers or slice, not %s", key.typ.Name())) } func unicodeGetNewArgs(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) { if raised := checkMethodArgs(f, "__getnewargs__", args, UnicodeType); raised != nil { return nil, raised } return NewTuple1(args[0]).ToObject(), nil } func unicodeGT(f *Frame, v, w *Object) (*Object, *BaseException) { return unicodeCompare(f, toUnicodeUnsafe(v), w, False, False, True) } func unicodeHash(f *Frame, o *Object) (*Object, *BaseException) { s := toUnicodeUnsafe(o).Value() l := len(s) if l == 0 { return NewInt(0).ToObject(), nil } h := int(s[0]) << 7 for _, r := range s { h = (1000003 * h) ^ int(r) } h ^= l if h == -1 { h = -2 } return NewInt(h).ToObject(), nil } func unicodeJoin(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) { if raised := checkMethodArgs(f, "join", args, UnicodeType, ObjectType); raised != nil { return nil, raised } var result *Object raised := seqApply(f, args[1], func(parts []*Object, _ bool) (raised *BaseException) { result, raised = unicodeJoinParts(f, toUnicodeUnsafe(args[0]), parts) return raised }) if raised != nil { return nil, raised } return result, nil } func unicodeLE(f *Frame, v, w *Object) (*Object, *BaseException) { return unicodeCompare(f, toUnicodeUnsafe(v), w, True, True, False) } func unicodeLen(f *Frame, o *Object) (*Object, *BaseException) { return NewInt(len(toUnicodeUnsafe(o).Value())).ToObject(), nil } func unicodeLT(f *Frame, v, w *Object) (*Object, *BaseException) { return unicodeCompare(f, toUnicodeUnsafe(v), w, True, False, False) } func unicodeMul(f *Frame, v, w *Object) (*Object, *BaseException) { value := toUnicodeUnsafe(v).Value() numChars := len(value) n, ok, raised := strRepeatCount(f, numChars, w) if raised != nil { return nil, raised } if !ok { return NotImplemented, nil } newLen := numChars * n newValue := make([]rune, newLen) for i := 0; i < newLen; i += numChars { copy(newValue[i:], value) } return NewUnicodeFromRunes(newValue).ToObject(), nil } func unicodeNative(f *Frame, o *Object) (reflect.Value, *BaseException) { // Encode to utf-8 when passing data out to Go. s, raised := toUnicodeUnsafe(o).Encode(f, EncodeDefault, EncodeStrict) if raised != nil { return reflect.Value{}, raised } return reflect.ValueOf(s.Value()), nil } func unicodeNE(f *Frame, v, w *Object) (*Object, *BaseException) { return unicodeCompareEq(f, toUnicodeUnsafe(v), w, false) } func unicodeNew(f *Frame, t *Type, args Args, _ KWArgs) (ret *Object, raised *BaseException) { // TODO: Accept keyword arguments: string, encoding, errors. if t != UnicodeType { // Allocate a plain unicode then copy it's value into an object // of the unicode subtype. s, raised := unicodeNew(f, UnicodeType, args, nil) if raised != nil { return nil, raised } result := toUnicodeUnsafe(newObject(t)) result.value = toUnicodeUnsafe(s).Value() return result.ToObject(), nil } expectedTypes := []*Type{ObjectType, StrType, StrType} argc := len(args) if argc < 3 { expectedTypes = expectedTypes[:argc] } if raised := checkMethodArgs(f, "__new__", args, expectedTypes...); raised != nil { return nil, raised } if argc == 0 { return NewUnicodeFromRunes(nil).ToObject(), nil } arg0 := args[0] if argc == 1 { if unicode := arg0.typ.slots.Unicode; unicode != nil { ret, raised = unicode.Fn(f, arg0) } else if arg0.typ == UnicodeType { ret = toUnicodeUnsafe(arg0).ToObject() } else if arg0.isInstance(UnicodeType) { // Return a unicode object (not a subtype). ret = NewUnicodeFromRunes(toUnicodeUnsafe(arg0).Value()).ToObject() } else if str := arg0.typ.slots.Str; str != nil { ret, raised = str.Fn(f, arg0) } else { var s *Str if s, raised = Repr(f, arg0); raised == nil { ret = s.ToObject() } } if raised != nil { return nil, raised } u, raised := unicodeCoerce(f, ret) if raised != nil { return nil, raised } return u.ToObject(), nil } if !arg0.isInstance(StrType) { format := "coercing to Unicode: need str, %s found" return nil, f.RaiseType(TypeErrorType, fmt.Sprintf(format, arg0.typ.Name())) } encoding := toStrUnsafe(args[1]).Value() errors := "strict" if argc > 2 { errors = toStrUnsafe(args[2]).Value() } s, raised := toStrUnsafe(arg0).Decode(f, encoding, errors) if raised != nil { return nil, raised } return s.ToObject(), nil } func unicodeRepr(_ *Frame, o *Object) (*Object, *BaseException) { buf := bytes.Buffer{} buf.WriteString("u'") for _, r := range toUnicodeUnsafe(o).Value() { if escape, ok := escapeMap[r]; ok { buf.WriteString(escape) } else if r <= unicode.MaxASCII && unicode.IsPrint(r) { buf.WriteRune(r) } else { buf.Write(escapeRune(r)) } } buf.WriteRune('\'') return NewStr(buf.String()).ToObject(), nil } func unicodeStr(f *Frame, o *Object) (*Object, *BaseException) { ret, raised := toUnicodeUnsafe(o).Encode(f, EncodeDefault, EncodeStrict) if raised != nil { return nil, raised } return ret.ToObject(), nil } func unicodeStrip(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) { expectedTypes := []*Type{UnicodeType, ObjectType} argc := len(args) if argc == 1 { expectedTypes = expectedTypes[:argc] } if raised := checkMethodArgs(f, "strip", args, expectedTypes...); raised != nil { return nil, raised } s := toUnicodeUnsafe(args[0]) charsArg := None if argc > 1 { charsArg = args[1] } matchFunc := unicode.IsSpace if charsArg != None { chars, raised := unicodeCoerce(f, charsArg) if raised != nil { return nil, raised } matchFunc = func(r rune) bool { for _, c := range chars.Value() { if r == c { return true } } return false } } runes := s.Value() numRunes := len(runes) lindex := 0 for ; lindex < numRunes; lindex++ { if !matchFunc(runes[lindex]) { break } } rindex := numRunes for ; rindex > lindex; rindex-- { if !matchFunc(runes[rindex-1]) { break } } result := make([]rune, rindex-lindex) copy(result, runes[lindex:rindex]) return NewUnicodeFromRunes(result).ToObject(), nil } func initUnicodeType(dict map[string]*Object) { dict["__getnewargs__"] = newBuiltinFunction("__getnewargs__", unicodeGetNewArgs).ToObject() dict["encode"] = newBuiltinFunction("encode", unicodeEncode).ToObject() dict["join"] = newBuiltinFunction("join", unicodeJoin).ToObject() dict["strip"] = newBuiltinFunction("strip", unicodeStrip).ToObject() UnicodeType.slots.Add = &binaryOpSlot{unicodeAdd} UnicodeType.slots.Contains = &binaryOpSlot{unicodeContains} UnicodeType.slots.Eq = &binaryOpSlot{unicodeEq} UnicodeType.slots.GE = &binaryOpSlot{unicodeGE} UnicodeType.slots.GetItem = &binaryOpSlot{unicodeGetItem} UnicodeType.slots.GT = &binaryOpSlot{unicodeGT} UnicodeType.slots.Hash = &unaryOpSlot{unicodeHash} UnicodeType.slots.LE = &binaryOpSlot{unicodeLE} UnicodeType.slots.Len = &unaryOpSlot{unicodeLen} UnicodeType.slots.LT = &binaryOpSlot{unicodeLT} UnicodeType.slots.Mul = &binaryOpSlot{unicodeMul} UnicodeType.slots.NE = &binaryOpSlot{unicodeNE} UnicodeType.slots.New = &newSlot{unicodeNew} UnicodeType.slots.Native = &nativeSlot{unicodeNative} UnicodeType.slots.RMul = &binaryOpSlot{unicodeMul} UnicodeType.slots.Repr = &unaryOpSlot{unicodeRepr} UnicodeType.slots.Str = &unaryOpSlot{unicodeStr} } func unicodeCompare(f *Frame, v *Unicode, w *Object, ltResult, eqResult, gtResult *Int) (*Object, *BaseException) { rhs := []rune(nil) if w.isInstance(UnicodeType) { rhs = toUnicodeUnsafe(w).Value() } else if w.isInstance(StrType) { ret, raised := toStrUnsafe(w).Decode(f, EncodeDefault, EncodeStrict) if raised != nil { return nil, raised } rhs = ret.Value() } else { return NotImplemented, nil } switch runeSliceCmp(v.Value(), rhs) { case -1: return ltResult.ToObject(), nil case 0: return eqResult.ToObject(), nil default: return gtResult.ToObject(), nil } } func runeSliceCmp(lhs []rune, rhs []rune) int { lhsLen, rhsLen := len(lhs), len(rhs) minLen := lhsLen if rhsLen < lhsLen { minLen = rhsLen } for i := 0; i < minLen; i++ { if lhs[i] < rhs[i] { return -1 } if lhs[i] > rhs[i] { return 1 } } if lhsLen < rhsLen { return -1 } if lhsLen > rhsLen { return 1 } return 0 } // unicodeCompareEq returns the result of comparing whether v and w are equal // (when eq is true) or unequal (when eq is false). It differs from // unicodeCompare in that it will safely decode w if it has type str and // therefore will not raise UnicodeDecodeError. func unicodeCompareEq(f *Frame, v *Unicode, w *Object, eq bool) (*Object, *BaseException) { if w.isInstance(UnicodeType) { // Do the standard comparison knowing that we won't raise // UnicodeDecodeError for w. return unicodeCompare(f, v, w, GetBool(!eq), GetBool(eq), GetBool(!eq)) } if !w.isInstance(StrType) { return NotImplemented, nil } lhs := v.Value() lhsLen := len(lhs) i := 0 // Decode w as utf-8. for _, r := range toStrUnsafe(w).Value() { // lhs[i] should never be RuneError so the second part of the // condition should catch that case. if i >= lhsLen || lhs[i] != r { return GetBool(!eq).ToObject(), nil } i++ } return GetBool((i == lhsLen) == eq).ToObject(), nil } func unicodeCoerce(f *Frame, o *Object) (*Unicode, *BaseException) { switch { case o.isInstance(StrType): return toStrUnsafe(o).Decode(f, EncodeDefault, EncodeStrict) case o.isInstance(UnicodeType): return toUnicodeUnsafe(o), nil default: format := "coercing to Unicode: need string, %s found" return nil, f.RaiseType(TypeErrorType, fmt.Sprintf(format, o.typ.Name())) } } func unicodeJoinParts(f *Frame, s *Unicode, parts []*Object) (*Object, *BaseException) { numParts := len(parts) if numParts == 0 { return NewUnicode("").ToObject(), nil } sep := s.Value() sepLen := len(sep) unicodeParts := make([]*Unicode, numParts) // Calculate the size of the required buffer. numRunes := (numParts - 1) * len(sep) for i, part := range parts { s, raised := unicodeCoerce(f, part) if raised != nil { return nil, raised } unicodeParts[i] = s numRunes += len(s.Value()) } // Piece together the result string into buf. buf := make([]rune, numRunes) offset := 0 for i, part := range unicodeParts { if i > 0 { copy(buf[offset:offset+sepLen], sep) offset += sepLen } s := part.Value() l := len(s) copy(buf[offset:offset+l], s) offset += l } return NewUnicodeFromRunes(buf).ToObject(), nil }
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Author: Mahmoud Abdelsalam <scroveez@gmail.com> * */ package el import ( "bytes" "crypto/aes" _cipher "crypto/cipher" "crypto/rand" "github.com/golang/snappy" ) type elCipher struct { block _cipher.Block } const cipherBlockSize = 16 func newElCipher(key []byte) (*elCipher, error) { s := new(elCipher) key = PKCS5Padding(key, cipherBlockSize) block, err := aes.NewCipher(key) if err != nil { return nil, err } s.block = block return s, nil } func (s *elCipher) encrypt(msg []byte) []byte { cmsg := make([]byte, snappy.MaxEncodedLen(len(msg))) cmsg = snappy.Encode(cmsg, msg) pmsg := PKCS5Padding(cmsg, cipherBlockSize) buf := make([]byte, len(pmsg)+cipherBlockSize) iv := buf[:cipherBlockSize] rand.Read(iv) encrypter := _cipher.NewCBCEncrypter(s.block, iv) encrypter.CryptBlocks(buf[cipherBlockSize:], pmsg) return buf } func (s *elCipher) decrypt(iv []byte, ctext []byte) []byte { defer func() { if err := recover(); err != nil { logger.Error("%v", err) } }() decrypter := _cipher.NewCBCDecrypter(s.block, iv) buf := make([]byte, len(ctext)) decrypter.CryptBlocks(buf, ctext) cmsg := PKCS5UnPadding(buf) msg, _ := snappy.Decode(nil, cmsg) return msg } func PKCS5Padding(ciphertext []byte, blockSize int) []byte { padding := blockSize - len(ciphertext)%blockSize padtext := bytes.Repeat([]byte{byte(padding)}, padding) return append(ciphertext, padtext...) } func PKCS5UnPadding(origData []byte) []byte { length := len(origData) unpadding := int(origData[length-1]) return origData[:(length - unpadding)] }
package main import ( "fmt" ) const target = 347991 const size = 1001 const half = size / 2 //dir 0 = right 1 = up 2 = left 3 = down func main() { var spiral [size][size]int spiral[half][half] = 1 dir := 3 row,col := half,half step := 1 found := false for !found { for i := 0; i < 2 && !found; i++ { dir = (dir + 1) % 4 for j := 0; j < step ; j++ { col,row = moveDir(dir,row,col) if row >= size || dir >= size { found = true break } spiral[row][col] = sumAdj(spiral,row,col) if spiral[row][col] > target { fmt.Println(spiral[row][col]) return } } } step++ } //fmt.Println(spiral) //build array //find entry //compute distance } func sumAdj(a [size][size]int, row,col int) (int) { return a[row-1][col-1]+ a[row][col-1] + a[row+1][col-1] + a[row+1][col] + a[row+1][col+1] + a[row][col+1] + a[row-1][col+1] + a[row-1][col] } func moveDir(dir,col,row int) (x,y int) { switch dir { case 0: col++ break case 1: row-- break case 2: col-- break case 3: row++ break } return row, col }
package main import ( "context" "github.com/signaux-faibles/keycloakUpdater/v2/logger" "github.com/signaux-faibles/libwekan" ) type BoardsMembers map[libwekan.BoardSlug]Users func manageBoardsMembers(wekan libwekan.Wekan, fromConfig Users) error { fields := logger.DataForMethod("manageBoardsMembers") // périmètre du stage wekanBoardsMembers := fromConfig.inferBoardsMember() domainBoards, err := wekan.SelectDomainBoards(context.Background()) if err != nil { return err } wekanBoardsMembers.addBoards(domainBoards) logger.Info("> inscrit les utilisateurs dans les tableaux", fields) for boardSlug, boardMembers := range wekanBoardsMembers { err := updateBoardMembers(wekan, boardSlug, boardMembers) if err != nil { return err } } return nil } func updateBoardMembers(wekan libwekan.Wekan, boardSlug libwekan.BoardSlug, boardMembers Users) error { fields := logger.DataForMethod("updateBoardMembers") fields.AddAny("board", boardSlug) board, err := wekan.GetBoardFromSlug(context.Background(), boardSlug) if err != nil { return err } currentUsersMap, currentUsersIDs, err := fetchCurrentWekanBoardMembers(wekan, board) if err != nil { return err } expectedUsersMap, expectedUsersIDs, err := fetchExpectedWekanBoardMembers(wekan, boardMembers) if err != nil { return err } alreadyBoardMembers, expectedInactiveBoardMembers, newBoardMembers := intersect(currentUsersIDs, expectedUsersIDs) logger.Debug(">> examine les nouvelles inscriptions", fields) for _, userID := range append(alreadyBoardMembers, newBoardMembers...) { if err := ensureUserIsActiveBoardMember(wekan, expectedUsersMap[userID], board); err != nil { return err } } logger.Debug(">> examine les radiations", fields) for _, userID := range expectedInactiveBoardMembers { if _, ok := currentUsersMap[userID]; ok { if err := ensureUserIsInactiveBoardMember(wekan, currentUsersMap[userID], board); err != nil { return err } } } // globalWekan.AdminUser() est administrateur de toutes les boards, appliquons la règle logger.Debug(">> vérifie la participation de l'admin", fields) modified, err := wekan.EnsureUserIsBoardAdmin(context.Background(), board.ID, wekan.AdminID()) if modified { fields.AddAny("username", wekan.AdminUsername()) logger.Info(">>> donne les privilèges à l'admin", fields) } return err } func ensureUserIsActiveBoardMember(wekan libwekan.Wekan, user libwekan.User, board libwekan.Board) error { fields := logger.DataForMethod("ensureUserIsActiveBoardMember") fields.AddAny("username", user.Username) fields.AddAny("board", board.Slug) logger.Debug(">>> examine l'utilisateur", fields) modified, err := wekan.EnsureUserIsActiveBoardMember(context.Background(), board.ID, user.ID) if err != nil { return err } if modified { logger.Info(">>> inscrit l'utilisateur", fields) } return nil } func ensureUserIsInactiveBoardMember(wekan libwekan.Wekan, user libwekan.User, board libwekan.Board) error { fields := logger.DataForMethod("ensureUserIsInactiveBoardMember") fields.AddAny("username", user.Username) fields.AddAny("board", board.Slug) logger.Debug(">>> vérifie la non-participation", fields) modified, err := wekan.EnsureUserIsInactiveBoardMember(context.Background(), board.ID, user.ID) if err != nil { return err } if modified { logger.Info(">>> désinscrit l'utilisateur", fields) } return nil } // liste les usernames présents sur la board, actifs ou non et le place dans currentMembers func fetchCurrentWekanBoardMembers(wekan libwekan.Wekan, board libwekan.Board) (map[libwekan.UserID]libwekan.User, []libwekan.UserID, error) { currentMembersIDs := mapSlice(board.Members, func(member libwekan.BoardMember) libwekan.UserID { return member.UserID }) currentMembers, err := wekan.GetUsersFromIDs(context.Background(), currentMembersIDs) if err != nil { return nil, nil, err } currentUserMap := mapifySlice(currentMembers, libwekan.User.GetID) currentGenuineUserMap := selectMapByValue(currentUserMap, selectGenuineUserFunc(wekan)) currentGenuineUserIDs := keys(currentUserMap) return currentGenuineUserMap, currentGenuineUserIDs, nil } func fetchExpectedWekanBoardMembers(wekan libwekan.Wekan, boardMembers Users) (map[libwekan.UserID]libwekan.User, []libwekan.UserID, error) { // liste les usernames que l'on veut garder ou rendre actifs sur la board wantedMembersUsernames := []libwekan.Username{} // globalWekan.AdminUser() est membre de toutes les boards, ajoutons le ici pour ne pas risquer de l'oublier dans les utilisateurs wantedMembersUsernames = append(wantedMembersUsernames, wekan.AdminUsername()) for username := range boardMembers { wantedMembersUsernames = append(wantedMembersUsernames, libwekan.Username(username)) } wantedMembers, err := wekan.GetUsersFromUsernames(context.Background(), wantedMembersUsernames) if err != nil { return nil, nil, err } //wantedMembersIDs := mapSlice(wantedMembers, func(user libwekan.User) libwekan.UserID { return user.ID }) wantedUserMap := mapifySlice(wantedMembers, libwekan.User.GetID) return wantedUserMap, keys(wantedUserMap), err } func (users Users) inferBoardsMember() BoardsMembers { wekanBoardsUserSlice := make(map[libwekan.BoardSlug][]User) for _, user := range users { for _, boardSlug := range user.boards { if boardSlug != "" { boardSlug := libwekan.BoardSlug(boardSlug) wekanBoardsUserSlice[boardSlug] = append(wekanBoardsUserSlice[boardSlug], user) } } } wekanBoardsUsers := make(BoardsMembers) for boardSlug, userSlice := range wekanBoardsUserSlice { wekanBoardsUsers[boardSlug] = mapifySlice(userSlice, func(user User) Username { return user.email }) } return wekanBoardsUsers } func (boardsMembers BoardsMembers) addBoards(boards []libwekan.Board) BoardsMembers { if boardsMembers == nil { boardsMembers = make(BoardsMembers) } for _, b := range boards { if _, ok := boardsMembers[b.Slug]; !ok { boardsMembers[b.Slug] = make(Users) } } return boardsMembers }
package main import ( "flag" "fmt" "io/ioutil" "log" "os" "path" "strings" ) var source = flag.String("source", "./tsconfig.lib.json", "Source tsconfig json file") var destination = flag.String("destination", "./tsconfig.lib.json", "Destination tsconfig json file") var libs = flag.String("libs", "./libs", "the folder to iterate") func main() { fn := os.Args[1] flag.Parse() switch { case fn == "tsconfig": tsconfig() case fn == "index": index() default: log.Fatal("Please provide one of tsconfig,index") } if fn != "" { fmt.Println(fn) return } } func index() { folders := readFolders(*libs) for _, folder := range folders { if !strings.Contains(folder.Name(), ".") { // move the file folderName := folder.Name() oldpath := path.Join(*libs, folderName, "index.ts") newpath := path.Join(*libs, folderName, "src", "index.ts") fmt.Println(oldpath, newpath) err := os.Rename(oldpath, newpath) if err != nil { fmt.Println("An error occurred") log.Fatal(err) } // update the contents fmt.Println("Updating index.ts contents") lines := readSource(newpath) replacer := strings.NewReplacer("/src", "") for i, line := range lines { if strings.Contains(line, "/src") { lines[i] = replacer.Replace(line) } } output := strings.Join(lines, "\n") ioutil.WriteFile(newpath, []byte(output), 0644) } } } func tsconfig() { lines := readSource(*source) folders := readFolders(*libs) modified := []string{} for _, folder := range folders { if !strings.Contains(folder.Name(), ".") { folderName := folder.Name() modified = append(modified, folderName) for i, line := range lines { if strings.Contains(line, "outDir") { lines[i] = ` "outDir": "../../dist/out-tsc/libs/` + folderName + `",` } } output := strings.Join(lines, "\n") newpath := path.Join(*libs, folder.Name(), *destination) ioutil.WriteFile(newpath, []byte(output), 0644) } } fmt.Println(modified, "created") } func readSource(path string) []string { f, err := ioutil.ReadFile(path) if err != nil { log.Fatal(err) } return strings.Split(string(f), "\n") } func readFolders(path string) (folders []os.FileInfo) { folders, err := ioutil.ReadDir(path) if err != nil { log.Fatal(err) } return folders }
package day18 func IsPalindrome(input string) bool { stack := Stack{}.NewStack() queue := Queue{}.NewQueue() for _, char := range input { stack.Push(char) queue.EnQueue(char) } for stack.Pop() == queue.DeQueue() && stack.Len()/2 > 0 { } return stack.Len()/2 == 0 }
package models import ( orm "go-admin/global" "go-admin/tools" ) type Cust struct { Id int `json:"id" gorm:"type:int;primary_key"` // SysCode string `json:"sysCode" gorm:"type:varchar(50);"` // 系统编号 CustName string `json:"custName" gorm:"type:varchar(128);"` // 客户名称 SimpleName string `json:"simpleName" gorm:"type:varchar(128);"` // 助记名称 IndustryType string `json:"industryType" gorm:"type:varchar(255);"` // 所属行业 CustType string `json:"custType" gorm:"type:varchar(255);"` // 客户类型 CustStatus string `json:"custStatus" gorm:"type:varchar(255);"` // 客户状态 CustLevel string `json:"custLevel" gorm:"type:varchar(255);"` // 客户星级 Origin string `json:"origin" gorm:"type:varchar(255);"` // 客户来源 Location string `json:"location" gorm:"type:varchar(255);"` // 所在地区 Address string `json:"address" gorm:"type:varchar(255);"` // 详细地址 EnterpriseSize string `json:"enterpriseSize" gorm:"type:varchar(255);"` // 企业规模 ParentCust string `json:"parentCust" gorm:"type:int;"` // 上级客户 Remark string `json:"remark" gorm:"type:varchar(255);"` // 备注信息 CreateBy string `json:"createBy" gorm:"type:varchar(128);"` // UpdateBy string `json:"updateBy" gorm:"type:varchar(128);"` // DataScope string `json:"dataScope" gorm:"-"` Params string `json:"params" gorm:"-"` BaseModel } func (Cust) TableName() string { return "rcm_cust" } // 创建Cust func (e *Cust) Create() (Cust, error) { var doc Cust result := orm.Eloquent.Table(e.TableName()).Create(&e) if result.Error != nil { err := result.Error return doc, err } doc = *e return doc, nil } // 获取Cust func (e *Cust) Get() (Cust, error) { var doc Cust table := orm.Eloquent.Table(e.TableName()) if e.Id != 0 { table = table.Where("id = ?", e.Id) } if e.CustName != "" { table = table.Where("cust_name = ?", e.CustName) } if e.IndustryType != "" { table = table.Where("industry_type = ?", e.IndustryType) } if e.CustType != "" { table = table.Where("cust_type = ?", e.CustType) } if e.CustStatus != "" { table = table.Where("cust_status = ?", e.CustStatus) } if e.CustLevel != "" { table = table.Where("cust_level = ?", e.CustLevel) } if e.Origin != "" { table = table.Where("origin = ?", e.Origin) } if err := table.First(&doc).Error; err != nil { return doc, err } return doc, nil } // 获取Cust带分页 func (e *Cust) GetPage(pageSize int, pageIndex int) ([]Cust, int, error) { var doc []Cust table := orm.Eloquent.Select("*").Table(e.TableName()) if e.CustName != "" { table = table.Where("cust_name = ?", e.CustName) } if e.IndustryType != "" { table = table.Where("industry_type = ?", e.IndustryType) } if e.CustType != "" { table = table.Where("cust_type = ?", e.CustType) } if e.CustStatus != "" { table = table.Where("cust_status = ?", e.CustStatus) } if e.CustLevel != "" { table = table.Where("cust_level = ?", e.CustLevel) } if e.Origin != "" { table = table.Where("origin = ?", e.Origin) } // 数据权限控制(如果不需要数据权限请将此处去掉) dataPermission := new(DataPermission) dataPermission.UserId, _ = tools.StringToInt(e.DataScope) table,err := dataPermission.GetDataScope(e.TableName(), table) if err != nil { return nil, 0, err } var count int if err := table.Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&doc).Error; err != nil { return nil, 0, err } table.Where("`deleted_at` IS NULL").Count(&count) return doc, count, nil } // 更新Cust func (e *Cust) Update(id int) (update Cust, err error) { if err = orm.Eloquent.Table(e.TableName()).Where("id = ?", id).First(&update).Error; err != nil { return } //参数1:是要修改的数据 //参数2:是修改的数据 if err = orm.Eloquent.Table(e.TableName()).Model(&update).Updates(&e).Error; err != nil { return } return } // 删除Cust func (e *Cust) Delete(id int) (success bool, err error) { if err = orm.Eloquent.Table(e.TableName()).Where("id = ?", id).Delete(&Cust{}).Error; err != nil { success = false return } success = true return } //批量删除 func (e *Cust) BatchDelete(id []int) (Result bool, err error) { if err = orm.Eloquent.Table(e.TableName()).Where("id in (?)", id).Delete(&Cust{}).Error; err != nil { return } Result = true return }
/* * Copyright (c) 2020. Ant Group. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 */ package snapshot import ( "context" "fmt" "github.com/containerd/containerd/log" "github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots/storage" "github.com/pkg/errors" ) type WalkFunc = func(snapshots.Info) bool func GetSnapshotInfo(ctx context.Context, ms *storage.MetaStore, key string) (string, snapshots.Info, snapshots.Usage, error) { ctx, t, err := ms.TransactionContext(ctx, false) if err != nil { return "", snapshots.Info{}, snapshots.Usage{}, err } defer t.Rollback() id, info, usage, err := storage.GetInfo(ctx, key) if err != nil { return "", snapshots.Info{}, snapshots.Usage{}, err } return id, info, usage, nil } func GetSnapshot(ctx context.Context, ms *storage.MetaStore, key string) (*storage.Snapshot, error) { ctx, t, err := ms.TransactionContext(ctx, false) if err != nil { return nil, err } s, err := storage.GetSnapshot(ctx, key) if err != nil { return nil, errors.Wrap(err, "failed to get active mount") } err = t.Rollback() if err != nil { return nil, errors.Wrap(err, "failed to rollback transaction") } return &s, nil } func FindSnapshot(ctx context.Context, ms *storage.MetaStore, key string, fn WalkFunc) (string, snapshots.Info, error) { ctx, t, err := ms.TransactionContext(ctx, false) if err != nil { return "", snapshots.Info{}, err } defer t.Rollback() for cKey := key; cKey != ""; { id, info, _, err := storage.GetInfo(ctx, cKey) if err != nil { log.G(ctx).WithError(err).Warnf("failed to get info of %q", cKey) return "", snapshots.Info{}, err } if fn(info) { return id, info, nil } else { log.G(ctx).Infof("id %s is data layer, continue to check parent layer", id) } cKey = info.Parent } return "", snapshots.Info{}, fmt.Errorf("failed to find meta layer of key %s", key) } func UpdateSnapshotInfo(ctx context.Context, ms *storage.MetaStore, info snapshots.Info, fieldPaths ...string) (snapshots.Info, error) { ctx, t, err := ms.TransactionContext(ctx, true) if err != nil { return snapshots.Info{}, err } info, err = storage.UpdateInfo(ctx, info, fieldPaths...) if err != nil { t.Rollback() return snapshots.Info{}, err } if err := t.Commit(); err != nil { return snapshots.Info{}, err } return info, nil }
package catm import ( "encoding/xml" "github.com/thought-machine/finance-messaging/iso20022" ) type Document00300105 struct { XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:catm.003.001.05 Document"` Message *AcceptorConfigurationUpdateV05 `xml:"AccptrCfgtnUpd"` } func (d *Document00300105) AddMessage() *AcceptorConfigurationUpdateV05 { d.Message = new(AcceptorConfigurationUpdateV05) return d.Message } // Update of the acceptor configuration to be downloaded by the terminal management system. type AcceptorConfigurationUpdateV05 struct { // Set of characteristics related to the transfer of the acceptor parameters. Header *iso20022.Header27 `xml:"Hdr"` // Acceptor configuration to be downloaded from the terminal management system. AcceptorConfiguration *iso20022.AcceptorConfiguration5 `xml:"AccptrCfgtn"` // Trailer of the message containing a MAC or a digital signature. SecurityTrailer *iso20022.ContentInformationType12 `xml:"SctyTrlr,omitempty"` } func (a *AcceptorConfigurationUpdateV05) AddHeader() *iso20022.Header27 { a.Header = new(iso20022.Header27) return a.Header } func (a *AcceptorConfigurationUpdateV05) AddAcceptorConfiguration() *iso20022.AcceptorConfiguration5 { a.AcceptorConfiguration = new(iso20022.AcceptorConfiguration5) return a.AcceptorConfiguration } func (a *AcceptorConfigurationUpdateV05) AddSecurityTrailer() *iso20022.ContentInformationType12 { a.SecurityTrailer = new(iso20022.ContentInformationType12) return a.SecurityTrailer }
package sdl2 import ( "fmt" "io/ioutil" "github.com/veandco/go-sdl2/mix" "github.com/evelritual/goose/audio" ) const ( maxVol = 128 minVol = 0 ) // Player wraps needed methods for the audio.Player interface. type Player struct { currVol int } // Sound holds SDL chunk data for playback in use with the audio.Sound interface. type Sound struct { chunk *mix.Chunk } // NewAudioPlayer initializes the SDL mixer and returns a default Player. // Player must be closed manually. func (s *SDL2) NewAudioPlayer() (audio.Player, error) { err := mix.OpenAudio(44100, mix.DEFAULT_FORMAT, 2, 4096) if err != nil { return nil, fmt.Errorf("error opening sdl mix: %v", err) } return &Player{ currVol: maxVol, }, nil } // SetVolume sets the volume across the entire SDL player. This will not hold // true if new mixing channels are allocated beyond the default. func (p *Player) SetVolume(volume float32) error { if volume < 0.0 || volume > 1.0 { return fmt.Errorf("volume out of range") } v := int(float32(maxVol) * volume) // paranoia about floating point error if v < minVol { v = minVol } else if v > maxVol { v = maxVol } p.currVol = v mix.Volume(-1, v) return nil } // NewSound loads a WAV file into memory and converts it into a chunk. Sound // must be closed manually. func (p *Player) NewSound(soundPath string) (audio.Sound, error) { d, err := ioutil.ReadFile(soundPath) if err != nil { return nil, fmt.Errorf("error loading sound file: %v", err) } c, err := mix.QuickLoadWAV(d) if err != nil { return nil, fmt.Errorf("error loading wav: %v", err) } return &Sound{ chunk: c, }, nil } // Close releases audio player resources and stops all playback. func (p *Player) Close() error { mix.CloseAudio() return nil } // Play begins playback of the Sound through the sdl mixer. func (s *Sound) Play() error { _, err := s.chunk.Play(-1, 0) if err != nil { return fmt.Errorf("error playing sound: %v", err) } return nil } // Close frees the sdl chunk resource. func (s *Sound) Close() error { s.chunk.Free() return nil }
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package primitive import ( "context" ) // Invoker finish a message invoke on producer/consumer. type Invoker func(ctx context.Context, req, reply interface{}) error // Interceptor intercepts the invoke of a producer/consumer on messages. // In PushConsumer call, the req is []*MessageExt type and the reply is ConsumeResultHolder, // use type assert to get real type. type Interceptor func(ctx context.Context, req, reply interface{}, next Invoker) error func ChainInterceptors(interceptors ...Interceptor) Interceptor { if len(interceptors) == 0 { return nil } if len(interceptors) == 1 { return interceptors[0] } return func(ctx context.Context, req, reply interface{}, invoker Invoker) error { return interceptors[0](ctx, req, reply, getChainedInterceptor(interceptors, 0, invoker)) } } func getChainedInterceptor(interceptors []Interceptor, cur int, finalInvoker Invoker) Invoker { if cur == len(interceptors)-1 { return finalInvoker } return func(ctx context.Context, req, reply interface{}) error { return interceptors[cur+1](ctx, req, reply, getChainedInterceptor(interceptors, cur+1, finalInvoker)) } }
package server import "github.com/majgis/htmls/token" // A section of a HTMLTemplate used for final rendering type templateSection struct { htmlToken token.HTMLToken bytes []byte ch chan responseChunk }
package mandrill import ( "log" "testing" ) func TestUsersInfo(t *testing.T) { u := UsersAPI{} info, err := u.GetInfo("NjixlbCzdB14TazGCnYyEQ") if err != nil { log.Println("UsersAPI GetInfo Error") log.Print(err) } log.Println("UsersAPI GetInfo Successful") log.Printf("UsersAPI GetInfo Results: %#v\n\n", info) } func TestUsersPing(t *testing.T) { u := UsersAPI{} ping, err := u.Ping("NjixlbCzdB14TazGCnYyEQ") if err != nil { log.Println("UsersAPI Ping Error") log.Print(err) } log.Println("UsersAPI Ping Successful") log.Printf("UsersAPI Ping Results: %#v\n\n", ping) } func TestUsersSenders(t *testing.T) { u := UsersAPI{} senders, err := u.Senders("NjixlbCzdB14TazGCnYyEQ") if err != nil { log.Println("UsersAPI Senders Error") log.Print(err) } log.Println("UsersAPI Senders Successful") log.Printf("UsersAPI Senders Results: %#v\n\n", senders) }
package nut import ( "time" "github.com/gin-gonic/gin" "github.com/go-pg/pg" ) func (p *AdminPlugin) indexLinks(l string, c *gin.Context) (interface{}, error) { var items []Link err := p.DB.Model(&items). Where("lang = ?", l). Order("loc ASC").Order("sort_order ASC").Select() return items, err } func (p *AdminPlugin) showLink(l string, c *gin.Context) (interface{}, error) { var item Link err := p.DB.Model(&item). Where("id = ?", c.Param("id")). Limit(1).Select() return item, err } type fmLink struct { Href string `json:"href" binding:"required"` Label string `json:"label" binding:"required"` Loc string `json:"loc" binding:"required"` SortOrder int `json:"sortOrder"` } func (p *AdminPlugin) createLink(l string, c *gin.Context) (interface{}, error) { var fm fmLink if err := c.BindJSON(&fm); err != nil { return nil, err } err := p.DB.RunInTransaction(func(tx *pg.Tx) error { return tx.Insert(&Link{ Href: fm.Href, Label: fm.Label, Loc: fm.Loc, SortOrder: fm.SortOrder, Lang: l, UpdatedAt: time.Now(), }) }) return gin.H{}, err } func (p *AdminPlugin) updateLink(l string, c *gin.Context) (interface{}, error) { var fm fmLink if err := c.BindJSON(&fm); err != nil { return nil, err } err := p.DB.RunInTransaction(func(tx *pg.Tx) error { _, err := tx.Model(&Link{ Href: fm.Href, Label: fm.Label, Loc: fm.Loc, SortOrder: fm.SortOrder, Lang: l, UpdatedAt: time.Now(), }). Column("href", "label", "loc", "sort_order", "lang", "updated_at"). Where("id = ?", c.Param("id")). Update() return err }) return gin.H{}, err } func (p *AdminPlugin) destroyLink(l string, c *gin.Context) (interface{}, error) { _, err := p.DB.Model(&Link{}).Where("id = ?", c.Param("id")).Delete() return gin.H{}, err }
package main import "fmt" func main() { a := make([]int, 5) fmt.Println(a, cap(a), len(a)) b := make([]int, 5, 1000) fmt.Println(b, len(b), cap(b)) }
package main import ( "fmt" ) func main() { p1 := struct { firstName string lastName string }{ firstName: "James", lastName: "Bond", } fmt.Println("p1 :: ", p1) fmt.Println("Individual Details :: ") fmt.Println("\t First Name :: ", p1.firstName) fmt.Println("\t Last Name :: ", p1.lastName) }
package tpl import ( "bytes" "encoding/xml" "fmt" "html/template" "io/ioutil" "log" "os" "path/filepath" "strconv" "strings" "github.com/anihouse/bot/config" "github.com/bwmarrin/discordgo" ) var ( tpls *template.Template ) func Init() { tpls = template.New("").Funcs(funcs) fmt.Println("Loading templates:") err := filepath.Walk(config.Bot.Templates, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { return nil } data, err := ioutil.ReadFile(path) if err != nil { return err } name, err := filepath.Rel(config.Bot.Templates, path) if err != nil { return err } _, err = tpls.New(name).Parse(string(data)) return err }) if err != nil { log.Fatal(err) } for _, tpl := range tpls.Templates() { fmt.Println("Load", tpl.Name()) } } func get(name string, data interface{}) (*schema, error) { buf := bytes.NewBufferString("") err := tpls.ExecuteTemplate(buf, name, data) if err != nil { return nil, err } var result schema s := bytes.NewBuffer(normalizeSpaces(buf.Bytes())) if err := xml.NewDecoder(s).Decode(&result); err != nil { return nil, err } return &result, nil } func ToSend(name string, data interface{}) (*discordgo.MessageSend, error) { m, err := get(name, data) if err != nil { return nil, err } result := new(discordgo.MessageSend) if content := strings.TrimSpace(m.Content); content != "" { result.Content = content } if embed := m.Embed; embed != nil { result.Embed = new(discordgo.MessageEmbed) if title := embed.Title; title != nil { result.Embed.Title = *title } if color := embed.Color; color != nil { if len([]rune(*color)) != 7 { return nil, fmt.Errorf("Unexpected color format: '%s'", *color) } c, err := strconv.ParseInt((*color)[1:], 16, 32) if err != nil { return nil, err } result.Embed.Color = int(c) } if description := strings.TrimSpace(embed.Description); description != "" { result.Embed.Description = description } if footer := embed.Footer; footer != nil { result.Embed.Footer = new(discordgo.MessageEmbedFooter) result.Embed.Footer.Text = *footer } if fields := embed.Fields; fields != nil { result.Embed.Fields = make([]*discordgo.MessageEmbedField, 0) for _, field := range *fields { result.Embed.Fields = append(result.Embed.Fields, &discordgo.MessageEmbedField{ Inline: field.Inline, Name: strings.TrimSpace(field.Name), Value: strings.TrimSpace(field.Value), }) } } } return result, nil } func ToEdit(name string, data interface{}) (*discordgo.MessageEdit, error) { m, err := get(name, data) if err != nil { return nil, err } result := new(discordgo.MessageEdit) if content := strings.TrimSpace(m.Content); content != "" { result.Content = &content } if embed := m.Embed; embed != nil { result.Embed = new(discordgo.MessageEmbed) if title := embed.Title; title != nil { result.Embed.Title = *title } if color := embed.Color; color != nil { if len([]rune(*color)) != 7 { return nil, fmt.Errorf("Unexpected color format: '%s'", *color) } c, err := strconv.ParseInt((*color)[1:], 16, 32) if err != nil { return nil, err } result.Embed.Color = int(c) } if description := strings.TrimSpace(embed.Description); description != "" { result.Embed.Description = description } if footer := embed.Footer; footer != nil { result.Embed.Footer = new(discordgo.MessageEmbedFooter) result.Embed.Footer.Text = *footer } if fields := embed.Fields; fields != nil { result.Embed.Fields = make([]*discordgo.MessageEmbedField, 0) for _, field := range *fields { result.Embed.Fields = append(result.Embed.Fields, &discordgo.MessageEmbedField{ Inline: field.Inline, Name: strings.TrimSpace(field.Name), Value: strings.TrimSpace(field.Value), }) } } } return result, nil }
package iris import ( "bytes" "net/http" "net/url" "strings" ) // PathParameter is a struct which contains Key and Value, used for named path parameters type PathParameter struct { Key string Value string } // PathParameters type for a slice of PathParameter // Tt's a slice of PathParameter type, because it's faster than map type PathParameters []PathParameter // Get returns a value from a key inside this Parameters // If no parameter with this key given then it returns an empty string func (params PathParameters) Get(key string) string { for _, p := range params { if p.Key == key { return p.Value } } return "" } // Set sets a PathParameter to the PathParameters , it's not used anywhere. func (params PathParameters) Set(key string, value string) { params = append(params, PathParameter{key, value}) } // String returns a string implementation of all parameters that this PathParameters object keeps // hasthe form of key1=value1,key2=value2... func (params PathParameters) String() string { var buff bytes.Buffer for i := 0; i < len(params); i++ { buff.WriteString(params[i].Key) buff.WriteString("=") buff.WriteString(params[i].Value) if i < len(params)-1 { buff.WriteString(",") } } return buff.String() } // ParseParams receives a string and returns PathParameters (slice of PathParameter) // received string must have this form: key1=value1,key2=value2... func ParseParams(str string) PathParameters { _paramsstr := strings.Split(str, ",") if len(_paramsstr) == 0 { return nil } params := make(PathParameters, 0) // PathParameters{} for i := 0; i < len(_paramsstr); i++ { idxOfEq := strings.IndexRune(_paramsstr[i], '=') if idxOfEq == -1 { //error return nil } key := _paramsstr[i][:idxOfEq] val := _paramsstr[i][idxOfEq+1:] params = append(params, PathParameter{key, val}) } return params } // URLParams the URL.Query() is a complete function which returns the url get parameters from the url query, We don't have to do anything else here. func URLParams(req *http.Request) url.Values { return req.URL.Query() } // URLParam returns the get parameter from a request , if any func URLParam(req *http.Request, key string) string { return req.URL.Query().Get(key) }
package main import ( "fmt" "os" "jvmgo_c/ch8/cmd" "jvmgo_c/ch8/classpath" "jvmgo_c/ch8/rtda/heap" "strings" "jvmgo_c/ch8/interpreter" ) func main() { cmd := cmd.ParseCmd() if cmd.VersionFlag { fmt.Println("version 0.0.1") }else if cmd.HelpFlag { fmt.Printf("Usage: %s [-option] class [args...]\n",os.Args[0]) } else{ startJVM(cmd) } } func startJVM(cmd *cmd.Cmd) { cp:=classpath.Parse(cmd.XjreOption,cmd.CpOption) classLoader:= heap.NewClassLoader(cp,cmd.VerboseClassFlag) className := strings.Replace(cmd.Class,".","/",-1) mainClass := classLoader.LoadClass(className) mainMethod := mainClass.GetMainMethod() if mainMethod != nil { interpreter.Interpret(mainMethod,cmd.VerboseInstFlag,cmd.Args) } else { fmt.Printf("Main method not found in class %s\n",cmd.Class) } }
package git import ( "context" "encoding/json" "fmt" "net/http" "os" "github.com/google/go-github/v39/github" "github.com/labstack/echo/v4" "golang.org/x/oauth2" ) type ( handler struct { confPath string } Config struct { Owner string Repo string Token string } ) func NewHandler(confPath string) *handler { return &handler{confPath: confPath} } func (h *handler) Register(g *echo.Group) { g.GET("/commit/:hash", h.getCommit) } func (h *handler) getCommit(c echo.Context) error { cfg, err := h.readConfig() if err != nil { return echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("failed to read config: %v", err)) } client := github.NewClient(oauth2.NewClient(oauth2.NoContext, oauth2.StaticTokenSource(&oauth2.Token{AccessToken: cfg.Token}))) commit, _, err := client.Git.GetCommit(context.Background(), cfg.Owner, cfg.Repo, c.Param("hash")) if err != nil { return echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("failed to get commit: %v", err)) } return c.JSON(http.StatusOK, commit) } func (h *handler) readConfig() (*Config, error) { f, err := os.Open(h.confPath) if err != nil { return nil, fmt.Errorf("failed to open config file: %w", err) } defer f.Close() var cfg Config if err := json.NewDecoder(f).Decode(&cfg); err != nil { return nil, fmt.Errorf("failed to decode config file: %w", err) } return &cfg, nil }
package main import ( "fmt" ) func obterNota(nota float64) string { if(nota >= 6) { return "Aprovado" } return "Reprovado" // return "aprovado" ? nota >= 6 : "reporvado" // Não existe operador ternário igual ao C } func main() { fmt.Println(obterNota(6.2)) }
package cmd import ( "fmt" "log" cups "github.com/jbpratt78/go-cups" "github.com/spf13/cobra" ) var optionsCmd = &cobra.Command{ Use: "options", Short: "Options from the printer's attribute list", Run: func(cmd *cobra.Command, args []string) { if len(args) < 1 { log.Fatal("not enough args") } conn := cups.NewDefaultConnection() currDest := conn.Dests[0] arg := args[0] if arg == "all" { fmt.Println("Current options for ", currDest.Name) for k, v := range currDest.GetOptions() { fmt.Printf("\t%s = %s\n", k, v) } } else { //fmt.Println("Current option: ", arg, currDest.GetOption(arg)) option, err := currDest.GetOption(arg) if err != nil { log.Fatal(err) } fmt.Println("Current option for", arg, option) } }, } func init() { rootCmd.AddCommand(optionsCmd) }
package main import ( "log" "github.com/petar/GoLLRB/llrb" ) type Num struct { num int } func NewNum(num int) *Num { return &Num{num: num} } func (n *Num) Less(than llrb.Item) bool { return n.num < than.(*Num).num } func Iterator(item llrb.Item) bool { log.Println("item: ", item) return true } func main() { tree := llrb.New() tree.InsertNoReplace(NewNum(1)) tree.InsertNoReplace(NewNum(1)) tree.InsertNoReplace(NewNum(3)) tree.InsertNoReplace(NewNum(3)) tree.InsertNoReplace(NewNum(2)) log.Printf("len: %v", tree.Len()) tree.AscendGreaterOrEqual(NewNum(1), Iterator) deleted := tree.Delete(NewNum(3)) log.Println("deleted: ", deleted) len := tree.Len() for i := 0; i < len; i++ { itemMin := tree.Min() itemMax := tree.Max() log.Printf("min %v max %v", itemMin, itemMax) tree.DeleteMin() } }
package keys import ( "github.com/spf13/cobra" "github.com/foundriesio/fioctl/client" "github.com/foundriesio/fioctl/subcommands" ) var ( api *client.Api ) var cmd = &cobra.Command{ Use: "keys", Short: "Manage keys in use by your factory fleet", PersistentPreRun: func(cmd *cobra.Command, args []string) { api = subcommands.Login(cmd) }, } var caCmd = &cobra.Command{ Use: "ca", Short: "Manage Public Key Infrastructure for your device gateway", Long: `Every factory can have its own dedicated device gateway. This allows customers to own the PKI infrastructure of their factory. This infrastructure is used to manage mutual TLS between your devices and the Foundries.io device gateway.`, } func NewCommand() *cobra.Command { subcommands.RequireFactory(caCmd) cmd.AddCommand(caCmd) return cmd }
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package endpointslice import ( "sync" discovery "k8s.io/api/discovery/v1beta1" "k8s.io/apimachinery/pkg/types" ) // endpointSliceResourceVersions tracks expected EndpointSlice resource versions // by EndpointSlice name. type endpointSliceResourceVersions map[string]string // endpointSliceTracker tracks EndpointSlices and their associated resource // versions to help determine if a change to an EndpointSlice has been processed // by the EndpointSlice controller. type endpointSliceTracker struct { // lock protects resourceVersionsByService. lock sync.Mutex // resourceVersionsByService tracks the list of EndpointSlices and // associated resource versions expected for a given Service. resourceVersionsByService map[types.NamespacedName]endpointSliceResourceVersions } // newEndpointSliceTracker creates and initializes a new endpointSliceTracker. func newEndpointSliceTracker() *endpointSliceTracker { return &endpointSliceTracker{ resourceVersionsByService: map[types.NamespacedName]endpointSliceResourceVersions{}, } } // Has returns true if the endpointSliceTracker has a resource version for the // provided EndpointSlice. func (est *endpointSliceTracker) Has(endpointSlice *discovery.EndpointSlice) bool { est.lock.Lock() defer est.lock.Unlock() rrv := est.relatedResourceVersions(endpointSlice) _, ok := rrv[endpointSlice.Name] return ok } // Stale returns true if this endpointSliceTracker does not have a resource // version for the provided EndpointSlice or it does not match the resource // version of the provided EndpointSlice. func (est *endpointSliceTracker) Stale(endpointSlice *discovery.EndpointSlice) bool { est.lock.Lock() defer est.lock.Unlock() rrv := est.relatedResourceVersions(endpointSlice) return rrv[endpointSlice.Name] != endpointSlice.ResourceVersion } // Update adds or updates the resource version in this endpointSliceTracker for // the provided EndpointSlice. func (est *endpointSliceTracker) Update(endpointSlice *discovery.EndpointSlice) { est.lock.Lock() defer est.lock.Unlock() rrv := est.relatedResourceVersions(endpointSlice) rrv[endpointSlice.Name] = endpointSlice.ResourceVersion } // Delete removes the resource version in this endpointSliceTracker for the // provided EndpointSlice. func (est *endpointSliceTracker) Delete(endpointSlice *discovery.EndpointSlice) { est.lock.Lock() defer est.lock.Unlock() rrv := est.relatedResourceVersions(endpointSlice) delete(rrv, endpointSlice.Name) } // relatedResourceVersions returns the set of resource versions tracked for the // Service corresponding to the provided EndpointSlice. If no resource versions // are currently tracked for this service, an empty set is initialized. func (est *endpointSliceTracker) relatedResourceVersions(endpointSlice *discovery.EndpointSlice) endpointSliceResourceVersions { serviceNN := getServiceNN(endpointSlice) vers, ok := est.resourceVersionsByService[serviceNN] if !ok { vers = endpointSliceResourceVersions{} est.resourceVersionsByService[serviceNN] = vers } return vers } // getServiceNN returns a namespaced name for the Service corresponding to the // provided EndpointSlice. func getServiceNN(endpointSlice *discovery.EndpointSlice) types.NamespacedName { serviceName, _ := endpointSlice.Labels[discovery.LabelServiceName] return types.NamespacedName{Name: serviceName, Namespace: endpointSlice.Namespace} } // managedByChanged returns true if one of the provided EndpointSlices is // managed by the EndpointSlice controller while the other is not. func managedByChanged(endpointSlice1, endpointSlice2 *discovery.EndpointSlice) bool { return managedByController(endpointSlice1) != managedByController(endpointSlice2) } // managedByController returns true if the controller of the provided // EndpointSlices is the EndpointSlice controller. func managedByController(endpointSlice *discovery.EndpointSlice) bool { managedBy, _ := endpointSlice.Labels[discovery.LabelManagedBy] return managedBy == controllerName }
package dto import ( "github.com/d-d-j/ddj_master/common" "fmt" ) //Task is internal master structure that is used to match given input with result and control data processing. //All tasks are managed by TaskManager. type Task struct { Id int64 Type int32 AggregationType int32 Data Dto DataSize int32 ResponseChan chan *RestResponse // channel for sending response to (REST API) client ResultChan chan *Result // channel for sending result to worker } //This structure is used to get task with given Id. Data will be returned on BackChan type GetTaskRequest struct { TaskId int64 BackChan chan *Task } //This is Task constructor func NewTask(id int64, request RestRequest, resultChan chan *Result) *Task { t := new(Task) t.Id = id t.Type = request.Type t.AggregationType = common.CONST_UNINITIALIZED t.Data = request.Data t.DataSize = int32(request.Data.Size()) t.ResponseChan = request.Response t.ResultChan = resultChan if t.Type == common.TASK_SELECT { if query, ok := t.Data.(*Query); ok { t.AggregationType = query.AggregationType } else { panic("Type mismatch. TaskType select can be used only with Query data") } } return t } //This method create new request that contains current task. Task will be handle by specific deviceId func (t *Task) MakeRequest(deviceId int32) *Request { return NewRequest(t.Id, t.Type, t.DataSize, t.Data, deviceId) } //This method create new Request that will be handle by all node's devices func (t *Task) MakeRequestForAllGpus() *Request { return NewRequest(t.Id, t.Type, t.DataSize, t.Data, common.ALL_GPUs) } func (t *Task) String() string { return fmt.Sprintf("Task #%d, type: %d, size: %d", t.Id, t.Type, t.DataSize) }
package triplestore import ( "bytes" "fmt" "io/ioutil" "log" "net/http" "github.com/UFOKN/nabu/internal/graph" ) //BlazeUpdateNQ updates the blaze triple store func BlazeUpdateNQ(s []byte, sue string) ([]byte, error) { nt, g, err := graph.NQToNTCtx(string(s)) if err != nil { log.Printf("nqToNTCtx err: %s triple: %s", err, string(s)) } p := "INSERT DATA { " pab := []byte(p) gab := []byte(fmt.Sprintf(" graph <%s> { ", g)) u := " } }" uab := []byte(u) pab = append(pab, gab...) pab = append(pab, []byte(nt)...) pab = append(pab, uab...) // fmt.Println(string(pab)) //su := "INSERT DATA {" + s + "}" req, err := http.NewRequest("POST", sue, bytes.NewBuffer(pab)) if err != nil { log.Println(err) } req.Header.Set("Content-Type", "application/sparql-update") client := &http.Client{} resp, err := client.Do(req) if err != nil { log.Println(err) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { log.Println("response Body:", string(body)) log.Println("response Status:", resp.Status) log.Println("response Headers:", resp.Header) } return body, err } // BlazeUpateNT TODO rename to jenaUpdateNQ(s []bytes, graph string) need a jenaUpateNT(s []bytes) func BlazeUpateNT(s []byte, sue string) ([]byte, error) { p := "INSERT DATA { " u := " }" pab := []byte(p) uab := []byte(u) pab = append(pab, s...) pab = append(pab, uab...) // fmt.Println(string(pab)) //su := "INSERT DATA {" + s + "}" req, err := http.NewRequest("POST", sue, bytes.NewBuffer(pab)) if err != nil { log.Println(err) } req.Header.Set("Content-Type", "application/sparql-update") client := &http.Client{} resp, err := client.Do(req) if err != nil { log.Println(err) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { log.Println("response Body:", string(body)) log.Println("response Status:", resp.Status) log.Println("response Headers:", resp.Header) } return body, err }
package api_test import ( "context" "net/http" "reflect" "strings" "testing" "github.com/chanioxaris/go-datagovgr/datagovgrtest" "github.com/jarcoal/httpmock" ) func TestCrimeJustice_TrafficAccidents_Success(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.TrafficAccidents, httpmock.NewJsonResponderOrPanic(http.StatusOK, fixture.MockData.TrafficAccidents), ) got, err := fixture.API.CrimeJustice.TrafficAccidents(ctx) if err != nil { t.Fatalf("Unexpected error %v", err) } if !reflect.DeepEqual(got, fixture.MockData.TrafficAccidents) { t.Fatalf("Expected data %+v, but got %+v", fixture.MockData.TrafficAccidents, got) } } func TestCrimeJustice_TrafficAccidents_Error(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) expectedError := "unexpected status code" httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.TrafficAccidents, httpmock.NewJsonResponderOrPanic(http.StatusInternalServerError, nil), ) _, err := fixture.API.CrimeJustice.TrafficAccidents(ctx) if err == nil { t.Fatal("Expected error, but got nil") } if !strings.Contains(err.Error(), expectedError) { t.Fatalf(`Expected error to contain "%v", but got "%v"`, expectedError, err) } } func TestCrimeJustice_RescueOperations_Success(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.RescueOperations, httpmock.NewJsonResponderOrPanic(http.StatusOK, fixture.MockData.RescueOperations), ) got, err := fixture.API.CrimeJustice.RescueOperations(ctx) if err != nil { t.Fatalf("Unexpected error %v", err) } if !reflect.DeepEqual(got, fixture.MockData.RescueOperations) { t.Fatalf("Expected data %+v, but got %+v", fixture.MockData.RescueOperations, got) } } func TestCrimeJustice_RescueOperations_Error(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) expectedError := "unexpected status code" httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.RescueOperations, httpmock.NewJsonResponderOrPanic(http.StatusInternalServerError, nil), ) _, err := fixture.API.CrimeJustice.RescueOperations(ctx) if err == nil { t.Fatal("Expected error, but got nil") } if !strings.Contains(err.Error(), expectedError) { t.Fatalf(`Expected error to contain "%v", but got "%v"`, expectedError, err) } } func TestCrimeJustice_TrafficViolations_Success(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.TrafficViolations, httpmock.NewJsonResponderOrPanic(http.StatusOK, fixture.MockData.TrafficViolations), ) got, err := fixture.API.CrimeJustice.TrafficViolations(ctx) if err != nil { t.Fatalf("Unexpected error %v", err) } if !reflect.DeepEqual(got, fixture.MockData.TrafficViolations) { t.Fatalf("Expected data %+v, but got %+v", fixture.MockData.TrafficViolations, got) } } func TestCrimeJustice_TrafficViolations_Error(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) expectedError := "unexpected status code" httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.TrafficViolations, httpmock.NewJsonResponderOrPanic(http.StatusInternalServerError, nil), ) _, err := fixture.API.CrimeJustice.TrafficViolations(ctx) if err == nil { t.Fatal("Expected error, but got nil") } if !strings.Contains(err.Error(), expectedError) { t.Fatalf(`Expected error to contain "%v", but got "%v"`, expectedError, err) } } func TestCrimeJustice_CrimeStatistics_Success(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.CrimeStatistics, httpmock.NewJsonResponderOrPanic(http.StatusOK, fixture.MockData.CrimeStatistics), ) got, err := fixture.API.CrimeJustice.CrimeStatistics(ctx) if err != nil { t.Fatalf("Unexpected error %v", err) } if !reflect.DeepEqual(got, fixture.MockData.CrimeStatistics) { t.Fatalf("Expected data %+v, but got %+v", fixture.MockData.CrimeStatistics, got) } } func TestCrimeJustice_CrimeStatistics_Error(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) expectedError := "unexpected status code" httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.CrimeStatistics, httpmock.NewJsonResponderOrPanic(http.StatusInternalServerError, nil), ) _, err := fixture.API.CrimeJustice.CrimeStatistics(ctx) if err == nil { t.Fatal("Expected error, but got nil") } if !strings.Contains(err.Error(), expectedError) { t.Fatalf(`Expected error to contain "%v", but got "%v"`, expectedError, err) } } func TestCrimeJustice_FinancialCrimes_Success(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.FinancialCrimes, httpmock.NewJsonResponderOrPanic(http.StatusOK, fixture.MockData.FinancialCrimes), ) got, err := fixture.API.CrimeJustice.FinancialCrimes(ctx) if err != nil { t.Fatalf("Unexpected error %v", err) } if !reflect.DeepEqual(got, fixture.MockData.FinancialCrimes) { t.Fatalf("Expected data %+v, but got %+v", fixture.MockData.FinancialCrimes, got) } } func TestCrimeJustice_FinancialCrimes_Error(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) expectedError := "unexpected status code" httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.FinancialCrimes, httpmock.NewJsonResponderOrPanic(http.StatusInternalServerError, nil), ) _, err := fixture.API.CrimeJustice.FinancialCrimes(ctx) if err == nil { t.Fatal("Expected error, but got nil") } if !strings.Contains(err.Error(), expectedError) { t.Fatalf(`Expected error to contain "%v", but got "%v"`, expectedError, err) } } func TestCrimeJustice_NumberOfLawyers_Success(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.NumberOfLawyers, httpmock.NewJsonResponderOrPanic(http.StatusOK, fixture.MockData.NumberOfLawyers), ) got, err := fixture.API.CrimeJustice.NumberOfLawyers(ctx) if err != nil { t.Fatalf("Unexpected error %v", err) } if !reflect.DeepEqual(got, fixture.MockData.NumberOfLawyers) { t.Fatalf("Expected data %+v, but got %+v", fixture.MockData.NumberOfLawyers, got) } } func TestCrimeJustice_NumberOfLawyers_Error(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) expectedError := "unexpected status code" httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.NumberOfLawyers, httpmock.NewJsonResponderOrPanic(http.StatusInternalServerError, nil), ) _, err := fixture.API.CrimeJustice.NumberOfLawyers(ctx) if err == nil { t.Fatal("Expected error, but got nil") } if !strings.Contains(err.Error(), expectedError) { t.Fatalf(`Expected error to contain "%v", but got "%v"`, expectedError, err) } } func TestCrimeJustice_NumberOfLawFirms_Success(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.NumberOfLawFirms, httpmock.NewJsonResponderOrPanic(http.StatusOK, fixture.MockData.NumberOfLawFirms), ) got, err := fixture.API.CrimeJustice.NumberOfLawFirms(ctx) if err != nil { t.Fatalf("Unexpected error %v", err) } if !reflect.DeepEqual(got, fixture.MockData.NumberOfLawFirms) { t.Fatalf("Expected data %+v, but got %+v", fixture.MockData.NumberOfLawFirms, got) } } func TestCrimeJustice_NumberOfLawFirms_Error(t *testing.T) { ctx := context.Background() fixture := datagovgrtest.NewFixture(t) expectedError := "unexpected status code" httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder( http.MethodGet, fixture.URLPaths.NumberOfLawFirms, httpmock.NewJsonResponderOrPanic(http.StatusInternalServerError, nil), ) _, err := fixture.API.CrimeJustice.NumberOfLawFirms(ctx) if err == nil { t.Fatal("Expected error, but got nil") } if !strings.Contains(err.Error(), expectedError) { t.Fatalf(`Expected error to contain "%v", but got "%v"`, expectedError, err) } }
package web import ( "encoding/json" "sync" "github.com/pingcap/errors" "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/br/pkg/lightning/mydump" "go.uber.org/atomic" ) // checkpointsMap is a concurrent map (table name → checkpoints). // // Implementation note: Currently the checkpointsMap is only written from a // single goroutine inside (*RestoreController).listenCheckpointUpdates(), so // all writes are going to be single threaded. Writing to checkpoint is not // considered performance critical. The map can be read from any HTTP connection // goroutine. Therefore, we simply implement the concurrent map using a single // RWMutex. We may switch to more complicated data structure if contention is // shown to be a problem. // // Do not implement this using a sync.Map, its mutex can't protect the content // of a pointer. type checkpointsMap struct { mu sync.RWMutex checkpoints map[string]*checkpoints.TableCheckpoint } func makeCheckpointsMap() (res checkpointsMap) { res.checkpoints = make(map[string]*checkpoints.TableCheckpoint) return } func (cpm *checkpointsMap) clear() { cpm.mu.Lock() cpm.checkpoints = make(map[string]*checkpoints.TableCheckpoint) cpm.mu.Unlock() } func (cpm *checkpointsMap) insert(key string, cp *checkpoints.TableCheckpoint) { cpm.mu.Lock() cpm.checkpoints[key] = cp cpm.mu.Unlock() } type totalWritten struct { key string totalWritten int64 } func (cpm *checkpointsMap) update(diffs map[string]*checkpoints.TableCheckpointDiff) []totalWritten { totalWrittens := make([]totalWritten, 0, len(diffs)) cpm.mu.Lock() defer cpm.mu.Unlock() for key, diff := range diffs { cp := cpm.checkpoints[key] cp.Apply(diff) tw := int64(0) for _, engine := range cp.Engines { for _, chunk := range engine.Chunks { if engine.Status >= checkpoints.CheckpointStatusAllWritten { tw += chunk.TotalSize() } else { tw += chunk.Chunk.Offset - chunk.Key.Offset } } } totalWrittens = append(totalWrittens, totalWritten{key: key, totalWritten: tw}) } return totalWrittens } func (cpm *checkpointsMap) marshal(key string) ([]byte, error) { cpm.mu.RLock() defer cpm.mu.RUnlock() if cp, ok := cpm.checkpoints[key]; ok { return json.Marshal(cp) } return nil, errors.NotFoundf("table %s", key) } type taskStatus uint8 const ( taskStatusRunning taskStatus = 1 taskStatusCompleted taskStatus = 2 ) type tableInfo struct { TotalWritten int64 `json:"w"` TotalSize int64 `json:"z"` Status taskStatus `json:"s"` Message string `json:"m,omitempty"` Progresses []tableProgress `json:"progresses,omitempty"` } type tableProgress struct { Step string `json:"step"` Progress float64 `json:"progress"` } type taskProgress struct { mu sync.RWMutex Tables map[string]*tableInfo `json:"t"` Status taskStatus `json:"s"` Message string `json:"m,omitempty"` // The contents have their own mutex for protection checkpoints checkpointsMap } var ( currentProgress *taskProgress // whether progress is enabled progressEnabled = atomic.NewBool(false) ) // EnableCurrentProgress init current progress struct on demand. // NOTE: this call is not thread safe, so it should only be inited once at the very beginning of progress start. func EnableCurrentProgress() { currentProgress = &taskProgress{ checkpoints: makeCheckpointsMap(), } progressEnabled.Store(true) } // BroadcastStartTask sets the current task status to running. func BroadcastStartTask() { if !progressEnabled.Load() { return } currentProgress.mu.Lock() currentProgress.Status = taskStatusRunning currentProgress.mu.Unlock() currentProgress.checkpoints.clear() } // BroadcastEndTask sets the current task status to completed. func BroadcastEndTask(err error) { if !progressEnabled.Load() { return } errString := errors.ErrorStack(err) currentProgress.mu.Lock() currentProgress.Status = taskStatusCompleted currentProgress.Message = errString currentProgress.mu.Unlock() } // BroadcastInitProgress sets the total size of each table. func BroadcastInitProgress(databases []*mydump.MDDatabaseMeta) { if !progressEnabled.Load() { return } tables := make(map[string]*tableInfo, len(databases)) for _, db := range databases { for _, tbl := range db.Tables { name := common.UniqueTable(db.Name, tbl.Name) tables[name] = &tableInfo{TotalSize: tbl.TotalSize} } } currentProgress.mu.Lock() currentProgress.Tables = tables currentProgress.mu.Unlock() } // BroadcastTableCheckpoint updates the checkpoint of a table. func BroadcastTableCheckpoint(tableName string, cp *checkpoints.TableCheckpoint) { if !progressEnabled.Load() { return } currentProgress.mu.Lock() currentProgress.Tables[tableName].Status = taskStatusRunning currentProgress.mu.Unlock() // create a deep copy to avoid false sharing currentProgress.checkpoints.insert(tableName, cp.DeepCopy()) } // BroadcastTableProgress updates the progress of a table. func BroadcastTableProgress(tableName string, step string, progress float64) { if !progressEnabled.Load() { return } currentProgress.mu.Lock() progresses := currentProgress.Tables[tableName].Progresses var present bool for i, p := range progresses { if p.Step == step { progresses[i].Progress = progress present = true } } if !present { progresses = append(progresses, tableProgress{Step: step, Progress: progress}) } currentProgress.Tables[tableName].Progresses = progresses currentProgress.mu.Unlock() } // BroadcastCheckpointDiff updates the total written size of each table. func BroadcastCheckpointDiff(diffs map[string]*checkpoints.TableCheckpointDiff) { if !progressEnabled.Load() { return } totalWrittens := currentProgress.checkpoints.update(diffs) currentProgress.mu.Lock() for _, tw := range totalWrittens { currentProgress.Tables[tw.key].TotalWritten = tw.totalWritten } currentProgress.mu.Unlock() } // BroadcastError sets the error message of a table. func BroadcastError(tableName string, err error) { if !progressEnabled.Load() { return } errString := errors.ErrorStack(err) currentProgress.mu.Lock() if tbl := currentProgress.Tables[tableName]; tbl != nil { tbl.Status = taskStatusCompleted tbl.Message = errString } currentProgress.mu.Unlock() } // MarshalTaskProgress returns the current progress in JSON format. func MarshalTaskProgress() ([]byte, error) { if !progressEnabled.Load() { return nil, errors.New("progress is not enabled") } currentProgress.mu.RLock() defer currentProgress.mu.RUnlock() return json.Marshal(&currentProgress) } // MarshalTableCheckpoints returns the checkpoint of a table in JSON format. func MarshalTableCheckpoints(tableName string) ([]byte, error) { if !progressEnabled.Load() { return nil, errors.New("progress is not enabled") } return currentProgress.checkpoints.marshal(tableName) }
package api import ( "backend/models" ) type UseCase interface { GetObjects(firstNumber, count int) ([]models.Object, error) }
// DRUNKWATER TEMPLATE(add description and prototypes) // Question Title and Description on leetcode.com // Function Declaration and Function Prototypes on leetcode.com //17. Letter Combinations of a Phone Number //Given a string containing digits from 2-9 inclusive, return all possible letter combinations that the number could represent. //A mapping of digit to letters (just like on the telephone buttons) is given below. Note that 1 does not map to any letters. //Example: //Input: "23" //Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]. //Note: //Although the above answer is in lexicographical order, your answer could be in any order you want. //func letterCombinations(digits string) []string { //} // Time Is Money
package util import ( // "fmt" "sync" "time" ) //过期时间 10小时 const Time int64 = 1 //3600 * 10 //动态缓存数据库 var Caches *CacheManager type CacheManager struct { lock *sync.RWMutex caches map[string]*Cache } type Cache struct { Value interface{} Times int64 } func Init() { Caches = NewCacheManager(300) } func NewCacheManager(size int) *CacheManager { return &CacheManager{new(sync.RWMutex), make(map[string]*Cache, size)} } func (this *CacheManager) Set(key string, v interface{}) { this.lock.Lock() x := Cache{Value: v, Times: Time} this.caches[key] = &x this.lock.Unlock() } func (this *CacheManager) Get(key string) *Cache { this.lock.RLock() v := this.caches[key] this.lock.RUnlock() return v } func (this *CacheManager) Delete(key string) *Cache { this.lock.Lock() v := this.caches[key] delete(this.caches, key) this.lock.Unlock() return v } func (this *CacheManager) IsExist(key string) bool { if xy := this.Get(key); xy != nil { return true } else { return false } } func (this *CacheManager) IsExpired(key string, ttl int) bool { if xy := this.Get(key); xy != nil { return (time.Now().Unix() - xy.Times) >= int64(ttl) } else { return true } }
package gherkin import ( "testing" . "github.com/tychofreeman/go-matchers" ) type Context struct { wasCalled bool firstWasCalled bool secondWasCalled bool actionWasCalled bool secondActionCalled bool wasGivenRun bool wasThenRun bool givenData []map[string]string thenData []map[string]string whenData []map[string]string captured string timesRun int wasRun bool setUpWasCalled bool tearDownWasCalled bool setUpCalledBeforeStep bool } var featureText = `Feature: My Feature Scenario: Scenario 1 Given the first setup When the first action Then the first result But not the other first result Scenario: Scenario 2 Given the second setup When the second action Then the second result And the other second result Scenario: Scenario 3 * the third setup When the third action has leading spaces When the third action has trailing spaces This is ignored` func assertMatchCalledOrNot(t *testing.T, step string, pattern string, isCalled bool) { f := func(w *World, ctx *Context) { ctx.wasCalled = true } g := createWriterlessRunner() g.RegisterStepDef(pattern, f) ctx := &Context{} g.Execute(step, ctx) AssertThat(t, ctx.wasCalled, Equals(isCalled)) } func matchingFunctionIsCalled(t *testing.T, step string, pattern string) { assertMatchCalledOrNot(t, step, pattern, true) } func matchingFunctionIsNotCalled(t *testing.T, step string, pattern string) { assertMatchCalledOrNot(t, step, pattern, false) } func TestExecutesMatchingMethod(t *testing.T) { matchingFunctionIsCalled(t, featureText, ".") } func TestAvoidsNonMatchingMethod(t *testing.T) { matchingFunctionIsNotCalled(t, featureText, "^A") } func TestCallsOnlyFirstMatchingMethod(t *testing.T) { first := func(w *World, ctx *Context) { } second := func(w *World, ctx *Context) { ctx.wasCalled = true } c := &Context{} g := createWriterlessRunner() g.RegisterStepDef(".", first) g.RegisterStepDef(".", second) g.Execute("Given only the first step is called", c) AssertThat(t, c.wasCalled, Equals(false)) } func TestRemovesGivenFromMatchLine(t *testing.T) { matchingFunctionIsCalled(t, featureText, "^the first setup$") } func TestRemovesWhenFromMatchLine(t *testing.T) { matchingFunctionIsCalled(t, featureText, "^the first action$") } func TestRemovesThenFromMatchLine(t *testing.T) { matchingFunctionIsCalled(t, featureText, "^the first result$") } func TestRemovesAndFromMatchLine(t *testing.T) { matchingFunctionIsCalled(t, featureText, "^the other second result$") } func TestRemovesButFromMatchLine(t *testing.T) { matchingFunctionIsCalled(t, featureText, "^not the other first result$") } func TestRemovesStarFromMatchLine(t *testing.T) { matchingFunctionIsCalled(t, featureText, "^the third setup$") } func TestRemovesLeadingSpacesFromMatchLine(t *testing.T) { matchingFunctionIsCalled(t, featureText, "^the third action has leading spaces$") } func TestRemovesTrailingSpacesFromMatchLine(t *testing.T) { matchingFunctionIsCalled(t, featureText, "^the third action has trailing spaces$") } func TestMultipleStepsAreCalled(t *testing.T) { g := createWriterlessRunner() g.RegisterStepDef("^the first setup$", func(w *World, ctx *Context) { ctx.firstWasCalled = true }) g.RegisterStepDef("^the first action$", func(w *World, ctx *Context) { ctx.secondWasCalled = true }) c := &Context{} g.Execute(featureText, c) AssertThat(t, c.firstWasCalled, IsTrue) AssertThat(t, c.secondWasCalled, IsTrue) } func TestPendingSkipsTests(t *testing.T) { g := createWriterlessRunner() g.RegisterStepDef("^the first setup$", func(w *World, ctx *Context) { Pending() }) g.RegisterStepDef("^the first action$", func(w *World, ctx *Context) { ctx.actionWasCalled = true }) c := &Context{} g.Execute(featureText, c) AssertThat(t, c.actionWasCalled, IsFalse) } func TestPendingDoesntSkipSecondScenario(t *testing.T) { g := createWriterlessRunner() g.RegisterStepDef("^the first setup$", func(w *World, ctx *Context) { Pending() }) g.RegisterStepDef("^the second setup$", func(w *World, ctx *Context) { } ) g.RegisterStepDef("^the second action$", func(w *World, ctx *Context) { ctx.secondActionCalled = true }) c := &Context{} g.Execute(featureText, c) AssertThat(t, c.secondActionCalled, Equals(true)) } func TestBackgroundIsRunBeforeEachScenario(t *testing.T) { c := &Context{} g := createWriterlessRunner() g.RegisterStepDef("^background$", func(w *World, ctx *Context) { ctx.wasCalled = true }) g.Execute(`Feature: Background: Given background Scenario: Then this `, c) AssertThat(t, c.wasCalled, IsTrue) } func TestCallsSeUptBeforeScenario(t *testing.T) { c := &Context{} g := createWriterlessRunner() g.SetSetUpFn(func(ctx *Context) { ctx.setUpWasCalled = true }) g.RegisterStepDef(".", func(w *World, ctx *Context) { ctx.setUpCalledBeforeStep = ctx.setUpWasCalled }) g.Execute(`Feature: Scenario: Then this`, c) AssertThat(t, c.setUpCalledBeforeStep, IsTrue) } func TestCallsTearDownBeforeScenario(t *testing.T) { c := &Context{} g := createWriterlessRunner() g.SetTearDownFn(func(ctx *Context) { ctx.tearDownWasCalled = true }) g.Execute(`Feature: Scenario: Then this`, c) AssertThat(t, c.tearDownWasCalled, IsTrue) } func TestPassesTableListToMultiLineStep(t *testing.T) { c := &Context{} g := createWriterlessRunner() g.RegisterStepDef(".", func(w *World, ctx *Context) { ctx.thenData = w.MultiStep }) g.Execute(`Feature: Scenario: Then you should see these people |name|email| |Bob |bob@bob.com| `, c) expectedData := []map[string]string{ map[string]string{"name":"Bob", "email":"bob@bob.com"}, } AssertThat(t, c.thenData, Equals(expectedData)) } func TestErrorsIfTooFewFieldsInMultiLineStep(t *testing.T) { c := &Context{} g := createWriterlessRunner() // Assertions before end of test... defer func() { recover() AssertThat(t, c.wasGivenRun, IsFalse) AssertThat(t, c.wasThenRun, IsFalse) }() g.RegisterStepDef("given", func(w *World, ctx *Context) { ctx.wasGivenRun = true }) g.RegisterStepDef("then", func(w *World, ctx *Context) { ctx.wasThenRun = true }) g.Execute(`Feature: Scenario: Given given |name|addr| |bob| Then then`, c) } func TestSupportsMultipleMultiLineStepsPerScenario(t *testing.T) { c := &Context{} g := createWriterlessRunner() g.RegisterStepDef("given", func(w *World, ctx *Context) { ctx.givenData = w.MultiStep }) g.RegisterStepDef("when", func(w *World, ctx *Context) { ctx.whenData = w.MultiStep }) g.Execute(`Feature: Scenario: Given given |name|email| |Bob|bob@bob.com| |Jim|jim@jim.com| When when |breed|height| |wolf|2| |shihtzu|.5| `, c) expectedGivenData := []map[string]string{ map[string]string{ "name":"Bob", "email":"bob@bob.com"}, map[string]string{ "name":"Jim", "email":"jim@jim.com"}, } expectedWhenData := []map[string]string{ map[string]string{"breed":"wolf", "height":"2"}, map[string]string{"breed":"shihtzu", "height":".5"}, } AssertThat(t, c.givenData, Equals(expectedGivenData)) AssertThat(t, c.whenData, Equals(expectedWhenData)) } func TestAllowsAccessToFirstRegexCapture(t *testing.T) { c := &Context{} g := createWriterlessRunner() g.RegisterStepDef("(thing)", func(w *World, ctx *Context, thing string) { ctx.captured = thing }) g.Execute(`Feature: Scenario: Given thing `, c) AssertThat(t, c.captured, Equals("thing")) } func TestFailsGracefullyWithOutOfBoundsRegexCaptures(t *testing.T) { panicked := false g := createWriterlessRunner() g.RegisterStepDef(".", func(w *World, ctx *Context, x string) { }) func() { defer func() { r := recover() AssertThat(t, r, Equals("Function type mismatch")) panicked = true }() g.Execute(`Feature: Scenario: Given . `, &Context{}) }() AssertThat(t, panicked, IsTrue) } func TestFailsGracefullyWithInvalidFunctionType(t *testing.T) { panicked := false g := createWriterlessRunner() g.RegisterStepDef("(.)", func(w *World, ctx *Context, x interface{}) { }) func() { defer func() { r := recover() AssertThat(t, r, Equals("Function type not supported")) panicked = true }() g.Execute(`Feature: Scenario: Given . `, &Context{}) }() AssertThat(t, panicked, IsTrue) } func TestFailsGracefullyWithInvalidArguments(t *testing.T) { panicked := false g := createWriterlessRunner() g.RegisterStepDef("(.)", func(w *World, ctx *Context, x int) { t.Fail() }) func() { defer func() { recover() panicked = true }() g.Execute(`Feature: Scenario: Given x `, &Context{}) }() AssertThat(t, panicked, IsTrue) } func TestSupportsArguments(t *testing.T) { g := createWriterlessRunner() g.RegisterStepDef("(.*),(.*),(.*),(.*),(.*),(.*),(.*),(.*)", func(w *World, ctx *Context, b1 bool, i8 int8, i16 int16, i32 int32, i64 int64, i int, f32 float32, f64 float64) { }) g.Execute(`Feature: Scenario: Given true,127,255,255,255,255,0.3,0.4 `, &Context{}) } func DISABLED_TestOnlyExecutesStepsBelowScenarioLine(t *testing.T) { c := &Context{} g := createWriterlessRunner() g.RegisterStepDef(".", func(w *World, ctx *Context) { ctx.wasRun = true }) g.Execute(`Feature: Given .`, c) AssertThat(t, c.wasRun, IsFalse) } func TestScenarioOutlineWithoutExampleDoesNotExecute(t *testing.T) { c := &Context{} g := createWriterlessRunner() g.RegisterStepDef(".", func(w *World, ctx *Context) { ctx.wasRun = true}) g.Execute(`Feature: Scenario Outline: Given . `, c) AssertThat(t, c.wasRun, IsFalse) } func TestScenarioOutlineReplacesFieldWithValueInExample(t *testing.T) { so := ScenarioOutline() so.AddStep(StepFromString(`<count> pops`)) scenario := so.CreateForExample(map[string]string{"count":"5"}) AssertThat(t, scenario.steps[0].line, Equals(`5 pops`)) } func TestScenarioOutlineReplacesManyFieldsWithValuesInExample(t *testing.T) { so := ScenarioOutline() so.AddStep(StepFromString(`<count> <name>`)) scenario := so.CreateForExample(map[string]string{"count":"5", "name":"pops"}) AssertThat(t, scenario.steps[0].line, Equals(`5 pops`)) } func TestScenarioOutlineSupportsMultipleLines(t *testing.T) { so := ScenarioOutline() so.AddStep(StepFromString(`<count> <name>`)) so.AddStep(StepFromString(`<name> <type>`)) scenario := so.CreateForExample(map[string]string{"count":"5", "name":"pops", "type":"music"}) AssertThat(t, scenario.steps[0].line, Equals(`5 pops`)) AssertThat(t, scenario.steps[1].line, Equals(`pops music`)) } func TestExecutesScenarioOncePerLineInExample(t *testing.T) { c := &Context{} g := createWriterlessRunner() g.RegisterStepDef(".", func(w *World, ctx *Context) { ctx.timesRun++ }) g.Execute(`Feature: Scenario Outline: Given . Examples: |scenario num| |first| |second| `, c) AssertThat(t, c.timesRun, Equals(2)) } func TestBackgroundDoesntExecuteBackgroundWhenRun(t *testing.T) { c := &Context{} g := createWriterlessRunner() g.RegisterStepDef(".", func(w *World, ctx *Context) { ctx.wasRun = true }) g.Execute(`Feature: Background: Given . `, c) AssertThat(t, c.wasRun, IsFalse) } // Support PyStrings? // Support tags? // Support reporting.
package actions import ( "errors" "strings" "github.com/barrydev/api-3h-shop/src/common/connect" "github.com/barrydev/api-3h-shop/src/factories" "github.com/barrydev/api-3h-shop/src/model" ) func InsertShipping(body *model.BodyShipping) (*model.Shipping, error) { queryString := "" var args []interface{} var set []string if body.OrderId != nil { order, err := factories.FindOneOrder(&connect.QueryMySQL{ QueryString: "WHERE _id=? AND payment_status='paid'", Args: []interface{}{body.OrderId}, }) if err != nil { return nil, err } if order == nil { return nil, errors.New("order does not exists") } set = append(set, " order_id=?") args = append(args, body.OrderId) } else { return nil, errors.New("shipping's order_id is required") } if body.Carrier != nil { set = append(set, " carrier=?") args = append(args, body.Carrier) } else { return nil, errors.New("shipping's carrier is required") } if body.Price != nil { set = append(set, " price=?") args = append(args, body.Price) } else { return nil, errors.New("shipping's price is required") } if body.Note != nil { set = append(set, " note=?") args = append(args, body.Note) } if len(set) > 0 { queryString += "SET" + strings.Join(set, ",") + ", created_at=NOW() \n" } else { return nil, errors.New("invalid body") } id, err := factories.InsertShipping(&connect.QueryMySQL{ QueryString: queryString, Args: args, }) if err != nil { return nil, err } if id == nil { return nil, errors.New("insert error") } return factories.FindShippingById(*id) }
/* Copyright paskal.maksim@gmail.com Licensed under the Apache License, Version 2.0 (the "License") you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "errors" logrushookopentracing "github.com/maksim-paskal/logrus-hook-opentracing" log "github.com/sirupsen/logrus" "github.com/uber/jaeger-client-go" jaegercfg "github.com/uber/jaeger-client-go/config" "github.com/uber/jaeger-lib/metrics" ) var ErrTest error = errors.New("test error") func main() { hook, err := logrushookopentracing.NewHook(logrushookopentracing.Options{}) if err != nil { log.WithError(err).Fatal() } log.AddHook(hook) cfg, err := jaegercfg.FromEnv() if err != nil { log.WithError(err).Panic("Could not parse Jaeger env vars") } cfg.ServiceName = "test-app" cfg.Sampler.Type = jaeger.SamplerTypeConst cfg.Sampler.Param = 1 cfg.Reporter.LogSpans = true jMetricsFactory := metrics.NullFactory tracer, closer, err := cfg.NewTracer( jaegercfg.Metrics(jMetricsFactory), ) if err != nil { log.WithError(err).Panic("Could not create tracer") } defer closer.Close() span := tracer.StartSpan("main") defer span.Finish() log.Info("test info") log.Warn("test warn") log.WithField(logrushookopentracing.SpanKey, span).WithError(ErrTest).Error("test error") }
/* TESTO ESERCIZIO -------------------- Scrivete un programma che simuli l’ordinazione, la cottura e l’uscita dei piatti in un ristorante. 10 clienti ordinano contemporaneamente i loro piatti. In cucina vengono preparati in un massimo di 3 alla volta, essendoci solo 3 fornelli. Il tempo necessario per preparare ogni piatto è fra i 4 e i 6 secondi. Dopo che un piatto viene preparato, viene portato fuori da un cameriere, che impiega 3 secondi a portarlo fuori. Ci sono solamente 2 camerieri nel ristorante. ● Creare la strutture Piatto e Cameriere col relativo campo “nome”. ● Creare le funzioni ordina che aggiunge il piatto a un buffer di piatti da fare; creare la function cucina che cucina ogni piatto e lo mette in lista per essere consegnato; creare la function consegna che fa uscire un piatto dalla cucina. ● Ogni cameriere può portare solo un piatto alla volta. ● Usate buffered channels per svolgere il compito. ● Attenzione: se per cucinare un piatto lo mandate nel buffer fornello di capienza 3 e lo ritirate dopo 3 secondi, non è detto che ritiriate lo stesso piatto che avete messo sul fornello. Tenetelo in memoria. Ovviamente la vostra soluzione potrebbe differire dalla mia e questo hint potrebbe non servirvi. */ package main // Pacchetti importati import "fmt" import "time" import "math/rand" // Struttura Piatto type Piatto struct { nome string } // Struttura Cameriere type Cameriere struct { nome string piatto Piatto } // Prende gli ordini dei clienti e li aggiunge al channel delle prenotazioni // da mandare alla cucina func ordine(prenotazioni chan Piatto, clienteNumero int) { fmt.Println(clienteNumero, "sta per ordinare") prenotazioni <- Piatto{fmt.Sprint("",clienteNumero)} } // Prepara gi ordini sui fornelli func cucina(prenotazioni chan Piatto, pronto chan Piatto) { for { inPreparazione := <- prenotazioni // Tempo impiengato per la preparazione del piatto time.Sleep((time.Duration(rand.Int31n(6 - 4) + 4)) * time.Second) pronto <- inPreparazione } } // Il cameriere prende il piatto e lo porta al cliente aggiungendolo al channel // dei piatti consegnati func consegna(cameriere Cameriere, pronto chan Piatto, consegnato chan Piatto) { for { cameriere.piatto = <- pronto // Tempo per portare il piatto dalla cucina al tavolo time.Sleep(time.Duration(3) * time.Second) consegnato <- cameriere.piatto } } func main() { // N° clienti nel locale nClienti := 10 rand.Seed(time.Now().UnixNano()) // Channel dei piatti prenotati dai clienti prenotazioni := make(chan Piatto, nClienti) // Channel dei piatti già cucinati e pronti per essere presi in carico dal cameriere pronto := make(chan Piatto, nClienti) // Channel dei piatti consegnati ai clienti consegnato := make(chan Piatto, nClienti) // Camerieri del ristorante cameriere1 := Cameriere{nome: fmt.Sprint("cam1")} cameriere2 := Cameriere{nome: fmt.Sprint("cam1")} // 3 fornelli a disposizione per cucinare di conseguenza 3 thread per la preparazione // dei piatti in cucina for i := 0; i < 3; i++ { go cucina(prenotazioni, pronto) } // Ordini effettuati (anche in simmultanea) dei clienti for i := 0; i < nClienti; i++ { go ordine(prenotazioni, i) } // 2 Thread (uno per cameriere) per la consegna dei piatti ai clienti go consegna(cameriere1, pronto, consegnato) go consegna(cameriere2, pronto, consegnato) // Stampa a schermo i piatti consegnati ai clienti for i := 0; i < nClienti; i++ { fmt.Println(<- consegnato, "consegnato al cliente") } }
package capi import ( "fmt" "github.com/giantswarm/aws-gs-to-capi/giantswarm" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" capiawsv1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" kubeadmapiv1alpha3 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" kubeadmtypev1beta1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1" kubeadmv1alpha3 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" ) func kubeAdmControlPlaneName(clusterID string) string { return fmt.Sprintf("%s-control-plane", clusterID) } func transformKubeAdmControlPlane(gsCRs *giantswarm.GSClusterCrs, k8sVersion string) *kubeadmv1alpha3.KubeadmControlPlane { replicas := int32(gsCRs.G8sControlPlane.Spec.Replicas) clusterID := gsCRs.AWSCluster.Name cp := &kubeadmv1alpha3.KubeadmControlPlane{ TypeMeta: metav1.TypeMeta{ APIVersion: kubeadmv1alpha3.GroupVersion.String(), Kind: "KubeadmControlPlane", }, ObjectMeta: metav1.ObjectMeta{ Name: kubeAdmControlPlaneName(gsCRs.AWSCluster.Name), Namespace: gsCRs.AWSCluster.Namespace, }, Spec: kubeadmv1alpha3.KubeadmControlPlaneSpec{ InfrastructureTemplate: v1.ObjectReference{ APIVersion: capiawsv1alpha3.GroupVersion.String(), Name: awsMachineTemplateCPName(clusterID), Kind: "AWSMachineTemplate", }, KubeadmConfigSpec: kubeadmapiv1alpha3.KubeadmConfigSpec{ ClusterConfiguration: &kubeadmtypev1beta1.ClusterConfiguration{ APIServer: kubeadmtypev1beta1.APIServer{ ControlPlaneComponent: kubeadmtypev1beta1.ControlPlaneComponent{ ExtraArgs: map[string]string{ "cloud-provider": "aws", }, }, CertSANs: []string{ gsCRs.Cluster.Status.APIEndpoints[0].Host, }, }, ControllerManager: kubeadmtypev1beta1.ControlPlaneComponent{ ExtraArgs: map[string]string{ "cloud-provider": "aws", }, }, Etcd: kubeadmtypev1beta1.Etcd{ Local: &kubeadmtypev1beta1.LocalEtcd{ DataDir: "/var/lib/etcd/data", ExtraArgs: map[string]string{ "listen-client-urls": "https://0.0.0.0:2379", }, ImageMeta: kubeadmtypev1beta1.ImageMeta{ ImageTag: "v3.4.13", ImageRepository: "quay.io/giantswarm", }, }, }, }, InitConfiguration: &kubeadmtypev1beta1.InitConfiguration{ NodeRegistration: kubeadmtypev1beta1.NodeRegistrationOptions{ KubeletExtraArgs: map[string]string{ "cloud-provider": "aws", }, Name: "{{ ds.meta_data.local_hostname }}", }, }, JoinConfiguration: &kubeadmtypev1beta1.JoinConfiguration{ NodeRegistration: kubeadmtypev1beta1.NodeRegistrationOptions{ KubeletExtraArgs: map[string]string{ "cloud-provider": "aws", }, Name: "{{ ds.meta_data.local_hostname }}", }, }, Files: []kubeadmapiv1alpha3.File{ { Path: "/etc/systemd/system/etcd3-attach-deps.service", Owner: "root:root", ContentFrom: &kubeadmapiv1alpha3.FileSource{ Secret: kubeadmapiv1alpha3.SecretFileSource{ Name: unitSecretName(clusterID), Key: unitSecretKey, }, }, }, }, PreKubeadmCommands: []string{ "systemctl enable etcd3-attach-deps.service", "systemctl start etcd3-attach-deps.service", }, }, Replicas: &replicas, Version: k8sVersion, }, } return cp }
package rethinkdb // To test this rethinkdb integration, run rethinkdb on docker // docker run -d --name rethinkdb -p 28015:28015 -p 8080:8080 rethinkdb:latest // If on Mac, find the IP address of the docker host // $ boot2docker ip // 192.168.59.103 // For linux it's 127.0.0.1. // Now you can go to 192.168.59.103:8080 (or 127.0.0.1:8080) and create // table 'instructions'. Once created, create an index by going to // 'Data Explorer', and running this: // r.db('test').table('instructions').indexCreate('SubjectId') import ( r "github.com/dancannon/gorethink" "github.com/manishrjain/gocrud/store" "github.com/manishrjain/gocrud/x" ) var log = x.Log("rethinkdb") type RethinkDB struct { session *r.Session table string } func (rdb *RethinkDB) SetSession(session *r.Session) { rdb.session = session } func (rdb *RethinkDB) Init(args ...string) { if len(args) != 3 { log.WithField("args", args).Fatal("Invalid arguments") return } ipaddr := args[0] dbname := args[1] tablename := args[2] session, err := r.Connect(r.ConnectOpts{ // Address: "192.168.59.103:28015", Address: ipaddr, Database: dbname, }) if err != nil { x.LogErr(log, err).Fatal("While connecting") return } rdb.session = session rdb.table = tablename } func (rdb *RethinkDB) IsNew(subject string) bool { iter, err := r.Table(rdb.table).Get(subject).Run(rdb.session) if err != nil { x.LogErr(log, err).Error("While running query") return false } isnew := true if !iter.IsNil() { isnew = true } if err := iter.Close(); err != nil { x.LogErr(log, err).Error("While closing iterator") return false } return isnew } func (rdb *RethinkDB) Commit(its []*x.Instruction) error { res, err := r.Table(rdb.table).Insert(its).RunWrite(rdb.session) if err != nil { x.LogErr(log, err).Error("While executing batch") return nil } log.WithField("inserted", res.Inserted+res.Replaced).Debug("Stored instructions") return nil } func (rdb *RethinkDB) GetEntity(subject string) ( result []x.Instruction, rerr error, ) { iter, err := r.Table(rdb.table).GetAllByIndex("SubjectId", subject).Run(rdb.session) if err != nil { x.LogErr(log, err).Error("While running query") return result, err } err = iter.All(&result) if err != nil { x.LogErr(log, err).Error("While iterating") return result, err } if err := iter.Close(); err != nil { x.LogErr(log, err).Error("While closing iterator") return result, err } return result, nil } func (rdb *RethinkDB) Iterate(fromId string, num int, ch chan x.Entity) (found int, last x.Entity, err error) { log.Fatal("Not implemented") return } func init() { log.Info("Registering rethinkdb") store.Register("rethinkdb", new(RethinkDB)) }
// Unit tests for default configuration facade. // // @author TSS package facade import ( "testing" corefacade "github.com/mashmb/1pass/1pass-core/core/facade" "github.com/mashmb/1pass/1pass-core/core/service" "github.com/mashmb/1pass/1pass-core/port/out" "github.com/mashmb/1pass/1pass-parse/repo/file" ) func setupConfigFacade() corefacade.ConfigFacade { var configRepo out.ConfigRepo var configService service.ConfigService configRepo = file.NewFileConfigRepo("../../../../assets") configService = service.NewDfltConfigService(configRepo) return corefacade.NewDfltConfigFacade(configService) } func TestIsConfigAvailable(t *testing.T) { facade := setupConfigFacade() expected := true available := facade.IsConfigAvailable() if available != expected { t.Errorf("IsConfigAvailable() = %v; expected = %v", available, expected) } } func TestGetConfig(t *testing.T) { facade := setupConfigFacade() expected := "./assets/onepassword_data" config := facade.GetConfig() if config.Vault != expected { t.Errorf("GetConfig() = %v; expected = %v", config.Vault, expected) } } func TestSaveConfig(t *testing.T) { facade := setupConfigFacade() expected := "" config := facade.GetConfig() config.Vault = expected facade.SaveConfig(config) config = facade.GetConfig() if config.Vault != expected { t.Errorf("SaveConfig() = %v; expected = %v", config.Vault, expected) } expected = "./assets/onepassword_data" config.Vault = expected facade.SaveConfig(config) config = facade.GetConfig() if config.Vault != expected { t.Errorf("SaveConfig() = %v; expected = %v", config.Vault, expected) } }
package data import ( uuid "github.com/satori/go.uuid" "golang.org/x/net/websocket" ) type Character struct { ID uuid.UUID Pos Point Conn *websocket.Conn Send chan []byte }
package main /* * @lc app=leetcode id=24 lang=golang * * [24] Swap Nodes in Pairs */ /** * Definition for singly-linked list. * type ListNode struct { * Val int * Next *ListNode * } */ func swapPairs(head *ListNode) *ListNode { dummy := new(ListNode) dummy.Next = head cur := dummy for cur.Next != nil && cur.Next.Next != nil { tmp := cur.Next.Next cur.Next.Next = tmp.Next tmp.Next = cur.Next cur.Next = tmp cur = tmp.Next } return dummy.Next }
package main import ( "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) func TestBackup_Ok(t *testing.T) { mockedStorageService := new(mockStorageServie) mockedWriter := new(mockWriteCloser) mockedWriter.On("Close").Return(nil) mockedStorageService.On("Writer", mock.MatchedBy(func(date string) bool { return true }), "database1", "collection1").Return(mockedWriter, nil) mockedMongoService := new(mockMongoService) mockedMongoService.On("DumpCollectionTo", "database1", "collection1", mockedWriter).Return(nil) mockedStatusKeeper := new(mockStatusKeeper) mockedStatusKeeper.On("Save", mock.MatchedBy(func(result backupResult) bool { return result.Success && result.Collection.collection == "collection1" && result.Collection.database == "database1" })).Return(nil) backupService := newMongoBackupService(mockedMongoService, mockedStorageService, mockedStatusKeeper) err := backupService.Backup([]dbColl{{"database1", "collection1"}}) assert.NoError(t, err, "Error wasn't expected during backup.") } func TestBackup_ErrorOnStorage(t *testing.T) { mockedStorageService := new(mockStorageServie) mockedWriter := new(mockWriteCloser) mockedWriter.On("Close").Return(nil) mockedStorageService.On("Writer", mock.MatchedBy(func(date string) bool { return true }), "database1", "collection1").Return(mockedWriter, fmt.Errorf("Couldn't create writer for storage")) backupService := newMongoBackupService(nil, mockedStorageService, nil) err := backupService.Backup([]dbColl{{"database1", "collection1"}}) assert.Error(t, err, "Error was expected during backup.") assert.Equal(t, "Couldn't create writer for storage", err.Error()) } func TestBackup_ErrorOnDump(t *testing.T) { mockedStorageService := new(mockStorageServie) mockedWriter := new(mockWriteCloser) mockedWriter.On("Close").Return(nil) mockedStorageService.On("Writer", mock.MatchedBy(func(date string) bool { return true }), "database1", "collection1").Return(mockedWriter, nil) mockedMongoService := new(mockMongoService) mockedMongoService.On("DumpCollectionTo", "database1", "collection1", mockedWriter).Return(fmt.Errorf("Couldn't dump db")) mockedStatusKeeper := new(mockStatusKeeper) mockedStatusKeeper.On("Save", mock.MatchedBy(func(result backupResult) bool { return !result.Success && result.Collection.collection == "collection1" && result.Collection.database == "database1" })).Return(nil) backupService := newMongoBackupService(mockedMongoService, mockedStorageService, mockedStatusKeeper) err := backupService.Backup([]dbColl{{"database1", "collection1"}}) assert.Error(t, err, "Error was expected during backup.") assert.Equal(t, "dumping failed for database1/collection1: Couldn't dump db", err.Error()) } func TestBackup_ErrorOnSavingStatus(t *testing.T) { mockedStorageService := new(mockStorageServie) mockedWriter := new(mockWriteCloser) mockedWriter.On("Close").Return(nil) mockedStorageService.On("Writer", mock.MatchedBy(func(date string) bool { return true }), "database1", "collection1").Return(mockedWriter, nil) mockedMongoService := new(mockMongoService) mockedMongoService.On("DumpCollectionTo", "database1", "collection1", mockedWriter).Return(nil) mockedStatusKeeper := new(mockStatusKeeper) mockedStatusKeeper.On("Save", mock.MatchedBy(func(result backupResult) bool { return result.Success && result.Collection.collection == "collection1" && result.Collection.database == "database1" })).Return(fmt.Errorf("Coulnd't save status of backup")) backupService := newMongoBackupService(mockedMongoService, mockedStorageService, mockedStatusKeeper) err := backupService.Backup([]dbColl{{"database1", "collection1"}}) assert.Error(t, err, "Error was expected during backup.") assert.Equal(t, "Coulnd't save status of backup", err.Error()) } func TestRestore_OK(t *testing.T) { mockedStorageService := new(mockStorageServie) mockedReadCloser := new(mockReadCloser) mockedReadCloser.On("Close").Return(nil) mockedStorageService.On("Reader", "2017-09-04T12-40-36", "database1", "collection1").Return(mockedReadCloser, nil) mockedMongoService := new(mockMongoService) mockedMongoService.On("RestoreCollectionFrom", "database1", "collection1", mockedReadCloser).Return(nil) backupService := newMongoBackupService(mockedMongoService, mockedStorageService, nil) err := backupService.Restore("2017-09-04T12-40-36", []dbColl{{"database1", "collection1"}}) assert.NoError(t, err, "Error wasn't expected during backup.") } func TestRestore_ErrorOnReadingFromStorage(t *testing.T) { mockedStorageService := new(mockStorageServie) mockedReadCloser := new(mockReadCloser) mockedStorageService.On("Reader", "2017-09-04T12-40-36", "database1", "collection1").Return(mockedReadCloser, fmt.Errorf("Error getting reader to access S3. Test")) mockedMongoService := new(mockMongoService) mockedMongoService.On("RestoreCollectionFrom", "database1", "collection1", mockedReadCloser).Return(nil) backupService := newMongoBackupService(mockedMongoService, mockedStorageService, nil) err := backupService.Restore("2017-09-04T12-40-36", []dbColl{{"database1", "collection1"}}) assert.Error(t, err, "Error getting reader to access S3. Test") } func TestRestore_ErrorOnRestore(t *testing.T) { mockedStorageService := new(mockStorageServie) mockedReadCloser := new(mockReadCloser) mockedReadCloser.On("Close").Return(nil) mockedStorageService.On("Reader", "2017-09-04T12-40-36", "database1", "collection1").Return(mockedReadCloser, nil) mockedMongoService := new(mockMongoService) mockedMongoService.On("RestoreCollectionFrom", "database1", "collection1", mockedReadCloser).Return(fmt.Errorf("Error while restoring. Test")) backupService := newMongoBackupService(mockedMongoService, mockedStorageService, nil) err := backupService.Restore("2017-09-04T12-40-36", []dbColl{{"database1", "collection1"}}) assert.Error(t, err, "Error while restoring. Test") }
package eod import ( "encoding/json" "fmt" "strings" "time" "github.com/Nv7-Github/Nv7Haven/eod/base" "github.com/Nv7-Github/Nv7Haven/eod/basecmds" "github.com/Nv7-Github/Nv7Haven/eod/categories" "github.com/Nv7-Github/Nv7Haven/eod/elements" "github.com/Nv7-Github/Nv7Haven/eod/logs" "github.com/Nv7-Github/Nv7Haven/eod/polls" "github.com/Nv7-Github/Nv7Haven/eod/treecmds" "github.com/Nv7-Github/Nv7Haven/eod/types" "github.com/schollz/progressbar/v3" ) func (b *EoD) init() { res, err := b.db.Query("SELECT * FROM eod_serverdata WHERE 1") if err != nil { panic(err) } defer res.Close() var guild string var kind types.ServerDataType var value1 string var intval int for res.Next() { err = res.Scan(&guild, &kind, &value1, &intval) if err != nil { panic(err) } switch kind { case types.NewsChannel: //lock.RLock() dat, exists := b.dat[guild] //lock.RUnlock() if !exists { dat = types.NewServerData() } dat.NewsChannel = value1 //lock.Lock() b.dat[guild] = dat //lock.Unlock() case types.PlayChannel: //lock.RLock() dat, exists := b.dat[guild] //lock.RUnlock() if !exists { dat = types.NewServerData() } if dat.PlayChannels == nil { dat.PlayChannels = make(map[string]types.Empty) } dat.PlayChannels[value1] = types.Empty{} //lock.Lock() b.dat[guild] = dat //lock.Unlock() case types.VotingChannel: //lock.RLock() dat, exists := b.dat[guild] //lock.RUnlock() if !exists { dat = types.NewServerData() } dat.VotingChannel = value1 //lock.Lock() b.dat[guild] = dat //lock.Unlock() case types.VoteCount: //lock.RLock() dat, exists := b.dat[guild] //lock.RUnlock() if !exists { dat = types.NewServerData() } dat.VoteCount = intval //lock.Lock() b.dat[guild] = dat //lock.Unlock() case types.PollCount: //lock.RLock() dat, exists := b.dat[guild] //lock.RUnlock() if !exists { dat = types.NewServerData() } dat.PollCount = intval //lock.Lock() b.dat[guild] = dat //lock.Unlock() case types.ModRole: //lock.RLock() dat, exists := b.dat[guild] //lock.RUnlock() if !exists { dat = types.NewServerData() } dat.ModRole = value1 //lock.Lock() b.dat[guild] = dat //lock.Unlock() case types.UserColor: //lock.RLock() dat, exists := b.dat[guild] //lock.RUnlock() if !exists { dat = types.NewServerData() } if dat.UserColors == nil { dat.UserColors = make(map[string]int) } dat.UserColors[value1] = intval //lock.Lock() b.dat[guild] = dat //lock.Unlock() } } //elems, err := b.db.Query("SELECT * FROM eod_elements ORDER BY createdon ASC") // Do after nov 21 var cnt int err = b.db.QueryRow("SELECT COUNT(1) FROM eod_elements").Scan(&cnt) if err != nil { panic(err) } bar := progressbar.New(cnt) elems, err := b.db.Query("SELECT name, image, color, guild, comment, creator, createdon, parents, complexity, difficulty, usedin, treesize FROM `eod_elements` ORDER BY (IF(createdon=1637536881, 1605988759, createdon)) ") if err != nil { panic(err) } defer elems.Close() elem := types.Element{} var createdon int64 var parentDat string for elems.Next() { err = elems.Scan(&elem.Name, &elem.Image, &elem.Color, &elem.Guild, &elem.Comment, &elem.Creator, &createdon, &parentDat, &elem.Complexity, &elem.Difficulty, &elem.UsedIn, &elem.TreeSize) if err != nil { return } elem.CreatedOn = time.Unix(createdon, 0) if len(parentDat) == 0 { elem.Parents = make([]string, 0) } else { elem.Parents = strings.Split(parentDat, "+") } //lock.RLock() dat, exists := b.dat[elem.Guild] //lock.RUnlock() if !exists { dat = types.NewServerData() } elem.ID = len(dat.Elements) + 1 dat.Elements[strings.ToLower(elem.Name)] = elem //lock.Lock() b.dat[elem.Guild] = dat //lock.Unlock() bar.Add(1) } bar.Finish() err = b.db.QueryRow("SELECT COUNT(1) FROM eod_combos").Scan(&cnt) if err != nil { panic(err) } bar = progressbar.New(cnt) combs, err := b.db.Query("SELECT * FROM `eod_combos`") if err != nil { panic(err) } defer combs.Close() var elemsVal string var elem3 string for combs.Next() { err = combs.Scan(&guild, &elemsVal, &elem3) if err != nil { return } //lock.RLock() dat, exists := b.dat[guild] //lock.RUnlock() if !exists { dat = types.NewServerData() } dat.Combos[elemsVal] = elem3 //lock.Lock() b.dat[guild] = dat //lock.Unlock() bar.Add(1) } bar.Finish() err = b.db.QueryRow("SELECT COUNT(1) FROM eod_elements").Scan(&cnt) if err != nil { panic(err) } bar = progressbar.New(cnt) invs, err := b.db.Query("SELECT guild, user, inv, made FROM eod_inv WHERE 1") if err != nil { panic(err) } defer invs.Close() var invDat string var user string var inv map[string]types.Empty var madecnt int for invs.Next() { inv = make(map[string]types.Empty) err = invs.Scan(&guild, &user, &invDat, &madecnt) if err != nil { panic(err) } err = json.Unmarshal([]byte(invDat), &inv) if err != nil { panic(err) } //lock.RLock() dat, exists := b.dat[guild] //lock.RUnlock() if !exists { dat = types.NewServerData() } dat.Inventories[user] = types.Inventory{Elements: inv, MadeCnt: madecnt, User: user} //lock.Lock() b.dat[guild] = dat //lock.Unlock() bar.Add(1) } bar.Finish() err = b.db.QueryRow("SELECT COUNT(1) FROM eod_categories").Scan(&cnt) if err != nil { panic(err) } bar = progressbar.New(cnt) cats, err := b.db.Query("SELECT * FROM eod_categories") if err != nil { panic(err) } defer cats.Close() var elemDat string cat := types.Category{} for cats.Next() { err = cats.Scan(&guild, &cat.Name, &elemDat, &cat.Image, &cat.Color) if err != nil { return } cat.Guild = guild //lock.RLock() dat, exists := b.dat[guild] //lock.RUnlock() if !exists { dat = types.NewServerData() } cat.Elements = make(map[string]types.Empty) err := json.Unmarshal([]byte(elemDat), &cat.Elements) if err != nil { panic(err) } dat.Categories[strings.ToLower(cat.Name)] = cat //lock.Lock() b.dat[guild] = dat //lock.Unlock() bar.Add(1) } bar.Finish() err = b.db.QueryRow("SELECT COUNT(1) FROM eod_polls").Scan(&cnt) if err != nil { panic(err) } bar = progressbar.New(cnt) // Initialize subsystems logs.InitEoDLogs() b.base = base.NewBase(b.db, b.dat, b.dg, lock) b.treecmds = treecmds.NewTreeCmds(b.dat, b.dg, b.base, lock) b.polls = polls.NewPolls(b.dat, b.dg, b.db, b.base, lock) b.basecmds = basecmds.NewBaseCmds(b.dat, b.base, b.dg, b.db, lock) b.categories = categories.NewCategories(b.dat, b.base, b.dg, b.polls, lock) b.elements = elements.NewElements(b.dat, lock, b.polls, b.db, b.base, b.dg) polls, err := b.db.Query("SELECT * FROM eod_polls") if err != nil { panic(err) } defer polls.Close() var po types.Poll for polls.Next() { var jsondat string po.Data = nil err = polls.Scan(&po.Guild, &po.Channel, &po.Message, &po.Kind, &po.Value1, &po.Value2, &po.Value3, &po.Value4, &jsondat) if err != nil { panic(err) } err = json.Unmarshal([]byte(jsondat), &po.Data) if err != nil { panic(err) } _, err = b.db.Exec("DELETE FROM eod_polls WHERE guild=? AND channel=? AND message=?", po.Guild, po.Channel, po.Message) if err != nil { panic(err) } b.dg.ChannelMessageDelete(po.Channel, po.Message) err = b.polls.CreatePoll(po) if err != nil { fmt.Println(err) } bar.Add(1) } bar.Finish() b.initHandlers() b.start() // Start stats saving go func() { b.basecmds.SaveStats() for { time.Sleep(time.Minute * 30) b.basecmds.SaveStats() } }() // Recalc autocats? if types.RecalcAutocats { for id, gld := range b.dat { for elem := range gld.Elements { b.polls.Autocategorize(elem, id) } } } }
package src import ( "encoding/json" "net/http" "github.com/getsentry/sentry-go" log "github.com/sirupsen/logrus" ) type GasStationPrice struct { Id string `json:"id"` Name string `json:"name"` Address string `json:"address"` X_wgs float64 `json:"x_wgs"` Y_wgs float64 `json:"y_wgs"` Prices []GasPrice `json:"prices"` } type GasPrice struct { FuelType string `json:"type"` Price float64 `json:"price"` } // Upstream JSON structure type JsonGasStationPrice struct { Key string `json:"key"` Name string `json:"name"` Address string `json:"address"` Location struct { Coordinates []float64 `json:"coordinates"` } `json:"loc"` Prices []GasPrice `json:"prices"` } func ParseFuelPrices(pricesChannel chan<- []GasStationPrice) error { log.Debug("Retrieving gas prices data...") url := "https://api.bencinmonitor.si/stations?forMobile=true" response, err := http.Get(url) if err != nil { if response != nil { log.WithFields(log.Fields{"status": response.Status, "err": err}).Error("Failed to retrieve data from server.") } else { log.WithFields(log.Fields{"err": err}).Error("Failed to retrieve data from server.") } sentry.CaptureException(err) return err } dec := json.NewDecoder(response.Body) var data struct { Stations []JsonGasStationPrice `json:"stations"` } dec.Decode(&data) items := data.Stations var prices = make([]GasStationPrice, 0) for _, item := range items { price := GasStationPrice{ item.Key, item.Name, item.Address, item.Location.Coordinates[0], item.Location.Coordinates[1], item.Prices, } prices = append(prices, price) } log.WithFields(log.Fields{"status": response.Status, "num": len(items)}).Debug("Gas price retrieval ok.") pricesChannel <- prices return nil }
package pilot import ( "bytes" "log" "encoding/json" "errors" "net/http" uuid "github.com/satori/go.uuid" ) var ( ErrorNoNodes = errors.New("No Nodes registered") ) // Pilot type Pilot struct { UUID string `json:"uuid"` Nodes []*NodeRegistry `json:"nodes"` } func NewPilot() *Pilot { return &Pilot{ UUID: uuid.Must(uuid.NewV4(), nil).String(), Nodes: []*NodeRegistry{}, } } func (p *Pilot) RegisterNode(nr *NodeRegistry) { p.Nodes = append(p.Nodes, nr) } func (p *Pilot) Serialize() ([]byte, error) { return json.Marshal(p) } func (p *Pilot) LaunchContainer(c *Container) error { if len(p.Nodes) < 1 { return ErrorNoNodes } counts := []int{} for i := 0; i < len(p.Nodes); i++ { counts = append(counts, 0) } for i := 0; i < c.Count; i++ { counts[i%len(p.Nodes)]++ } for i, n := range p.Nodes { c.Count = counts[i] b, err := json.Marshal(c) if err != nil { return err } containerBuffer := bytes.NewBuffer(b) path := n.Path + "/" + n.APIVersion + "/launch" log.Println(path) _, err = http.Post(path, "application/json", containerBuffer) if err != nil { return err } } return nil }
package main import ( "flag" "fmt" "os" ) type animal interface { cry() } type dog struct { } type cat struct { } func (d dog) cry() { fmt.Println("わん!") } func (c cat) cry() { fmt.Println("にゃー") } func main() { var name string flag.StringVar(&name, "animal", "", "動物名") flag.Parse() flag.Usage = func() { p := func(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, format, args...) } p("Usage:\n") // p(" {prog} [options]\n") // p("\n") p("Available Options:\n") flag.PrintDefaults() } // operation := flag.Arg(0) // if operation == "" { // flag.Usage() // return // } switch name { case "dog": // dogの場合 fmt.Println("犬の場合の処理を記載します") case "cat": // catの場合 fmt.Println("ネコの場合の処理を記載します") default: fmt.Println("dogかcatで選択してください") flag.Usage() } dog := dog{} cat := cat{} dog.cry() cat.cry() }
package api import ( "testing" . "github.com/smartystreets/goconvey/convey" ) type VolumesFromSizeCase struct { name string input struct { rootVolumeSize, targeSize, perVolumeMaxSize uint64 } output string } func TestVolumesFromSize(t *testing.T) { tests := []VolumesFromSizeCase{ { name: "200G 200G 200G", input: struct{ rootVolumeSize, targeSize, perVolumeMaxSize uint64 }{ 200 * Giga, 600 * Giga, 200 * Giga, }, output: "200G 200G", }, { name: "200G 200G 100G", input: struct{ rootVolumeSize, targeSize, perVolumeMaxSize uint64 }{ 200 * Giga, 500 * Giga, 200 * Giga, }, output: "200G 100G", }, { name: "25G", input: struct{ rootVolumeSize, targeSize, perVolumeMaxSize uint64 }{ 25 * Giga, 25 * Giga, 200 * Giga, }, output: "", }, { name: "100G 150G", input: struct{ rootVolumeSize, targeSize, perVolumeMaxSize uint64 }{ 100 * Giga, 250 * Giga, 200 * Giga, }, output: "150G", }, { name: "200G 50G 50G 50G 50G", input: struct{ rootVolumeSize, targeSize, perVolumeMaxSize uint64 }{ 200 * Giga, 400 * Giga, 50 * Giga, }, output: "50G 50G 50G 50G", }, } for _, test := range tests { Convey("Testing VolumesFromSize with expected "+test.name, t, func(c C) { output := VolumesFromSize(test.input.rootVolumeSize, test.input.targeSize, test.input.perVolumeMaxSize) c.So(output, ShouldEqual, test.output) }) } }
package sync import ( "os" "github.com/devspace-cloud/devspace/sync/remote" "github.com/devspace-cloud/devspace/sync/util" ) // s.fileIndex needs to be locked before this function is called func shouldRemoveRemote(relativePath string, s *Sync) bool { // File / Folder was already deleted from map so event was already processed or should not be processed if s.fileIndex.fileMap[relativePath] == nil { return false } // Exclude symbolic links if s.fileIndex.fileMap[relativePath].IsSymbolicLink { return false } // Exclude changes on the exclude list if s.ignoreMatcher != nil { if util.MatchesPath(s.ignoreMatcher, relativePath, s.fileIndex.fileMap[relativePath].IsDirectory) { return false } } // Exclude changes on the upload exclude list if s.uploadIgnoreMatcher != nil { if util.MatchesPath(s.uploadIgnoreMatcher, relativePath, s.fileIndex.fileMap[relativePath].IsDirectory) { return false } } return true } // s.fileIndex needs to be locked before this function is called func shouldUpload(s *Sync, fileInformation *FileInformation) bool { // Exclude if stat is nil if fileInformation == nil { return false } // Exclude changes on the upload exclude list // is not necessary here anymore because it was already // checked // stat.Mode()&os.ModeSymlink // Exclude local symlinks if fileInformation.IsSymbolicLink { return false } // Exclude changes on the exclude list if s.ignoreMatcher != nil { if util.MatchesPath(s.ignoreMatcher, fileInformation.Name, fileInformation.IsDirectory) { return false } } // Check if we already tracked the path if s.fileIndex.fileMap[fileInformation.Name] != nil { // Folder already exists, don't send change if fileInformation.IsDirectory { return false } // Exclude symlinks if s.fileIndex.fileMap[fileInformation.Name].IsSymbolicLink { return false } // File did not change or was changed by downstream if fileInformation.Mtime == s.fileIndex.fileMap[fileInformation.Name].Mtime && fileInformation.Size == s.fileIndex.fileMap[fileInformation.Name].Size { return false } } return true } // s.fileIndex needs to be locked before this function is called func shouldDownload(change *remote.Change, s *Sync) bool { // Does file already exist in the filemap? if s.fileIndex.fileMap[change.Path] != nil { // Don't override folders that exist in the filemap if change.IsDir == false { // Redownload file if mtime is newer than saved one if change.MtimeUnix > s.fileIndex.fileMap[change.Path].Mtime { return true } // Redownload file if size changed && file is not older than the one in the fileMap // the mTime check is necessary, because otherwise we would override older local files that // are not overridden initially if change.MtimeUnix == s.fileIndex.fileMap[change.Path].Mtime && change.Size != s.fileIndex.fileMap[change.Path].Size { return true } } return false } return true } // s.fileIndex needs to be locked before this function is called // A file is only deleted if the following conditions are met: // - The file name is present in the d.config.fileMap map // - The file did not change in terms of size and mtime in the d.config.fileMap since we started the collecting changes process // - The file is present on the filesystem and did not change in terms of size and mtime on the filesystem func shouldRemoveLocal(absFilepath string, fileInformation *FileInformation, s *Sync, force bool) bool { if fileInformation == nil { s.log.Infof("Skip %s because change is nil", absFilepath) return false } // We don't need to check s.ignoreMatcher, because if a path is ignored it will never be added to the fileMap, because shouldDownload // and shouldUpload are always false, and hence it never appears in the fileMap and is not copied to the remove fileMap clone // in the beginning of the downstream mainLoop // Only delete if mtime and size did not change stat, err := os.Stat(absFilepath) if err != nil { if os.IsNotExist(err) == false { s.log.Infof("Skip %s because stat returned %v", absFilepath, err) } return false } // Check if deletion is forced if force { return true } // We don't delete the file if we haven't tracked it if stat != nil && s.fileIndex.fileMap[fileInformation.Name] != nil { if stat.IsDir() != s.fileIndex.fileMap[fileInformation.Name].IsDirectory || stat.IsDir() != fileInformation.IsDirectory { s.log.Infof("Skip %s because stat returned unequal isdir with fileMap", absFilepath) return false } if fileInformation.IsDirectory == false { // We don't delete the file if it has changed in the map since we collected changes if fileInformation.Mtime == s.fileIndex.fileMap[fileInformation.Name].Mtime && fileInformation.Size == s.fileIndex.fileMap[fileInformation.Name].Size { // We don't delete the file if it has changed on the filesystem meanwhile if stat.ModTime().Unix() <= fileInformation.Mtime { return true } s.log.Infof("Skip %s because stat.ModTime() %d is greater than fileInformation.Mtime %d", absFilepath, stat.ModTime().Unix(), fileInformation.Mtime) } else { s.log.Infof("Skip %s because Mtime (%d and %d) or Size (%d and %d) is unequal between fileInformation and fileMap", absFilepath, fileInformation.Mtime, s.fileIndex.fileMap[fileInformation.Name].Mtime, fileInformation.Size, s.fileIndex.fileMap[fileInformation.Name].Size) } } else { return true } } return false }
package main import "github.com/helm/helm/cli" func main() { cli.Cli().RunAndExitOnError() }
package main import "fmt" func main() { done := make(chan bool) values := []string{"a", "b", "c"} for _, v := range values { go func() { fmt.Println(v) done <- true }() } // wait for all goroutines to complete before exiting /* for _ = range values { <-done } */ for range values { <-done } } /* Some confusion may arise when using closures with concurrency. One might mistakenly expect to see a, b, c as the output. What you'll probably see instead is c, c, c. This is because each iteration of the loop uses the same instance of the variable v, so each closure shares that single variable. When the closure runs, it prints the value of v at the time fmt.Println is executed, but v may have been modified since the goroutine was launched. To help detect this and other problems before they happen, run go vet. SOURCE: https://golang.org/doc/faq#closures_and_goroutines */ // go vet // # github.com/AJONCODE/Golang-Fundamentals/22_go-routines/09_channels/08_closures/01_no-closure-binding // ./main.go:11:16: loop variable v captured by func literal // go run -race main.go // ================== // WARNING: DATA RACE // Read at 0x00c00011a1e0 by goroutine 8: // main.main.func1() // /home/ajoncode/goworkspace/src/github.com/AJONCODE/Golang-Fundamentals/22_go-routines/09_channels/08_closures/01_no-closure-binding/main.go:11 +0x3c // Previous write at 0x00c00011a1e0 by main goroutine: // main.main() // /home/ajoncode/goworkspace/src/github.com/AJONCODE/Golang-Fundamentals/22_go-routines/09_channels/08_closures/01_no-closure-binding/main.go:9 +0x119 // Goroutine 8 (running) created at: // main.main() // /home/ajoncode/goworkspace/src/github.com/AJONCODE/Golang-Fundamentals/22_go-routines/09_channels/08_closures/01_no-closure-binding/main.go:10 +0x163 // ================== // ================== // WARNING: DATA RACE // Read at 0x00c00011a1e0 by goroutine 7: // main.main.func1() // /home/ajoncode/goworkspace/src/github.com/AJONCODE/Golang-Fundamentals/22_go-routines/09_channels/08_closures/01_no-closure-binding/main.go:11 +0x3c // Previous write at 0x00c00011a1e0 by main goroutine: // main.main() // /home/ajoncode/goworkspace/src/github.com/AJONCODE/Golang-Fundamentals/22_go-routines/09_channels/08_closures/01_no-closure-binding/main.go:9 +0x119 // Goroutine 7 (running) created at: // main.main() // /home/ajoncode/goworkspace/src/github.com/AJONCODE/Golang-Fundamentals/22_go-routines/09_channels/08_closures/01_no-closure-binding/main.go:10 +0x163 // ================== // c // c // c // Found 2 data race(s) // exit status 66
package reg import "testing" func TestAsMethods(t *testing.T) { cases := [][2]Register{ {RAX.As8(), AL}, {ECX.As8L(), CL}, {EBX.As8H(), BH}, {R9B.As16(), R9W}, {DH.As32(), EDX}, {R14L.As64(), R14}, {X2.AsX(), X2}, {X4.AsY(), Y4}, {X9.AsZ(), Z9}, {Y2.AsX(), X2}, {Y4.AsY(), Y4}, {Y9.AsZ(), Z9}, {Z2.AsX(), X2}, {Z4.AsY(), Y4}, {Z9.AsZ(), Z9}, } for _, c := range cases { if c[0] != c[1] { t.FailNow() } } }
package controllers import ( "net/url" "strconv" "github.com/cloudreve/Cloudreve/v3/pkg/serializer" "github.com/cloudreve/Cloudreve/v3/pkg/util" "github.com/cloudreve/Cloudreve/v3/service/callback" "github.com/gin-gonic/gin" ) // RemoteCallback 远程上传回调 func RemoteCallback(c *gin.Context) { var callbackBody callback.RemoteUploadCallbackService if err := c.ShouldBindJSON(&callbackBody); err == nil { res := callback.ProcessCallback(callbackBody, c) c.JSON(200, res) } else { c.JSON(200, ErrorResponse(err)) } } // QiniuCallback 七牛上传回调 func QiniuCallback(c *gin.Context) { var callbackBody callback.UploadCallbackService if err := c.ShouldBindJSON(&callbackBody); err == nil { res := callback.ProcessCallback(callbackBody, c) if res.Code != 0 { c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: res.Msg}) } else { c.JSON(200, res) } } else { c.JSON(401, ErrorResponse(err)) } } // OSSCallback 阿里云OSS上传回调 func OSSCallback(c *gin.Context) { var callbackBody callback.UploadCallbackService if err := c.ShouldBindJSON(&callbackBody); err == nil { if callbackBody.PicInfo == "," { callbackBody.PicInfo = "" } res := callback.ProcessCallback(callbackBody, c) c.JSON(200, res) } else { c.JSON(200, ErrorResponse(err)) } } // UpyunCallback 又拍云上传回调 func UpyunCallback(c *gin.Context) { var callbackBody callback.UpyunCallbackService if err := c.ShouldBind(&callbackBody); err == nil { if callbackBody.Code != 200 { util.Log().Debug( "又拍云回调返回错误代码%d,信息:%s", callbackBody.Code, callbackBody.Message, ) return } res := callback.ProcessCallback(callbackBody, c) c.JSON(200, res) } else { c.JSON(200, ErrorResponse(err)) } } // OneDriveCallback OneDrive上传完成客户端回调 func OneDriveCallback(c *gin.Context) { var callbackBody callback.OneDriveCallback if err := c.ShouldBindJSON(&callbackBody); err == nil { res := callbackBody.PreProcess(c) c.JSON(200, res) } else { c.JSON(200, ErrorResponse(err)) } } // OneDriveOAuth OneDrive 授权回调 func OneDriveOAuth(c *gin.Context) { var callbackBody callback.OneDriveOauthService if err := c.ShouldBindQuery(&callbackBody); err == nil { res := callbackBody.Auth(c) redirect, _ := url.Parse("/admin/policy") queries := redirect.Query() queries.Add("code", strconv.Itoa(res.Code)) queries.Add("msg", res.Msg) queries.Add("err", res.Error) redirect.RawQuery = queries.Encode() c.Redirect(301, "/#"+redirect.String()) } else { c.JSON(200, ErrorResponse(err)) } } // COSCallback COS上传完成客户端回调 func COSCallback(c *gin.Context) { var callbackBody callback.COSCallback if err := c.ShouldBindQuery(&callbackBody); err == nil { res := callbackBody.PreProcess(c) c.JSON(200, res) } else { c.JSON(200, ErrorResponse(err)) } } // S3Callback S3上传完成客户端回调 func S3Callback(c *gin.Context) { c.Header("Access-Control-Allow-Origin", "*") var callbackBody callback.S3Callback if err := c.ShouldBindQuery(&callbackBody); err == nil { res := callbackBody.PreProcess(c) c.JSON(200, res) } else { c.JSON(200, ErrorResponse(err)) } } func UfileCallback(c *gin.Context) { var callbackBody callback.UfileCallback if err := c.ShouldBind(&callbackBody); err == nil { if callbackBody.PicInfo == "," { callbackBody.PicInfo = "" } res := callback.ProcessCallback(callbackBody, c) c.JSON(200, res) } else { c.JSON(200, ErrorResponse(err)) } }
package msgpackdiff import ( "encoding/base64" "errors" "io/ioutil" "github.com/algorand/msgp/msgp" ) // GetBinary gathers the binary content of a string that represents a MessagePack object. The string // may be a base64 encoded binary object, or the path to a binary file that contains the object as // its only content. func GetBinary(object string) ([]byte, error) { decoded, err := base64.StdEncoding.DecodeString(object) if err == nil { return decoded, nil } content, err := ioutil.ReadFile(object) if err != nil { return []byte{}, err } // attempt to decode from base64 maxLen := base64.StdEncoding.DecodedLen(len(content)) decoded = make([]byte, maxLen) n, err := base64.StdEncoding.Decode(decoded, content) if err == nil { return decoded[:n], nil } return content, nil } // Parse parses a MessagePack encoded binary object into an in-memory data structure. func Parse(bytes []byte) (parsed MsgpObject, remaining []byte, err error) { parsed.Type = msgp.NextType(bytes) switch parsed.Type { case msgp.StrType: parsed.Value, bytes, err = msgp.ReadStringBytes(bytes) case msgp.BinType: parsed.Value, bytes, err = msgp.ReadBytesBytes(bytes, nil) case msgp.MapType: var size int size, _, bytes, err = msgp.ReadMapHeaderBytes(bytes) valueMap := MsgpMap{ Order: make([]string, size), Values: make(map[string]MsgpObject, size), } for i := 0; i < size; i++ { var key string key, bytes, err = msgp.ReadStringBytes(bytes) if err != nil { break } if _, ok := valueMap.Values[key]; ok { err = errors.New("Object has duplicate key") break } valueMap.Order[i] = key valueMap.Values[key], bytes, err = Parse(bytes) if err != nil { break } } parsed.Value = valueMap case msgp.ArrayType: var size int size, _, bytes, err = msgp.ReadArrayHeaderBytes(bytes) valueArray := make([]MsgpObject, size) for i := 0; i < size; i++ { valueArray[i], bytes, err = Parse(bytes) if err != nil { break } } parsed.Value = valueArray case msgp.Float32Type: parsed.Value, bytes, err = msgp.ReadFloat32Bytes(bytes) case msgp.Float64Type: parsed.Value, bytes, err = msgp.ReadFloat64Bytes(bytes) case msgp.BoolType: parsed.Value, bytes, err = msgp.ReadBoolBytes(bytes) case msgp.IntType: parsed.Value, bytes, err = msgp.ReadInt64Bytes(bytes) case msgp.UintType: parsed.Value, bytes, err = msgp.ReadUint64Bytes(bytes) case msgp.NilType: parsed.Value = nil bytes, err = msgp.ReadNilBytes(bytes) case msgp.Complex64Type: parsed.Value, bytes, err = msgp.ReadComplex64Bytes(bytes) case msgp.Complex128Type: parsed.Value, bytes, err = msgp.ReadComplex128Bytes(bytes) case msgp.TimeType: parsed.Value, bytes, err = msgp.ReadTimeBytes(bytes) default: err = errors.New("Invalid MessagePack type") } remaining = bytes return }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package chunk import ( "testing" "time" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/collate" "github.com/stretchr/testify/require" ) func TestMutRow(t *testing.T) { allTypes := newAllTypes() mutRow := MutRowFromTypes(allTypes) row := mutRow.ToRow() sc := new(stmtctx.StatementContext) for i := 0; i < row.Len(); i++ { val := zeroValForType(allTypes[i]) d := row.GetDatum(i, allTypes[i]) d2 := types.NewDatum(val) cmp, err := d.Compare(sc, &d2, collate.GetCollator(allTypes[i].GetCollate())) require.NoError(t, err) require.Equal(t, 0, cmp) } mutRow = MutRowFromValues("abc", 123) require.False(t, row.IsNull(0)) require.Equal(t, "abc", mutRow.ToRow().GetString(0)) require.False(t, row.IsNull(1)) require.Equal(t, int64(123), mutRow.ToRow().GetInt64(1)) mutRow.SetValues("abcd", 456) row = mutRow.ToRow() require.Equal(t, "abcd", row.GetString(0)) require.False(t, row.IsNull(0)) require.Equal(t, int64(456), row.GetInt64(1)) require.False(t, row.IsNull(1)) mutRow.SetDatums(types.NewStringDatum("defgh"), types.NewIntDatum(33)) require.False(t, row.IsNull(0)) require.Equal(t, "defgh", row.GetString(0)) require.False(t, row.IsNull(1)) require.Equal(t, int64(33), row.GetInt64(1)) mutRow.SetRow(MutRowFromValues("foobar", nil).ToRow()) row = mutRow.ToRow() require.False(t, row.IsNull(0)) require.True(t, row.IsNull(1)) nRow := MutRowFromValues(nil, 111).ToRow() require.True(t, nRow.IsNull(0)) require.False(t, nRow.IsNull(1)) mutRow.SetRow(nRow) row = mutRow.ToRow() require.True(t, row.IsNull(0)) require.False(t, row.IsNull(1)) j, err := types.ParseBinaryJSONFromString("true") time := types.NewTime(types.FromDate(2000, 1, 1, 1, 0, 0, 0), mysql.TypeDatetime, types.MaxFsp) require.NoError(t, err) mutRow = MutRowFromValues(j, time) row = mutRow.ToRow() require.Equal(t, j, row.GetJSON(0)) require.Equal(t, time, row.GetTime(1)) retTypes := []*types.FieldType{types.NewFieldType(mysql.TypeDuration)} chk := New(retTypes, 1, 1) dur, _, err := types.ParseDuration(sc, "01:23:45", 0) require.NoError(t, err) chk.AppendDuration(0, dur) mutRow = MutRowFromTypes(retTypes) mutRow.SetValue(0, dur) require.Equal(t, mutRow.c.columns[0].data, chk.columns[0].data) mutRow.SetDatum(0, types.NewDurationDatum(dur)) require.Equal(t, mutRow.c.columns[0].data, chk.columns[0].data) } func TestIssue29947(t *testing.T) { allTypes := newAllTypes() mutRow := MutRowFromTypes(allTypes) nilDatum := types.NewDatum(nil) dataBefore := make([][]byte, 0, len(mutRow.c.columns)) elemBufBefore := make([][]byte, 0, len(mutRow.c.columns)) for _, col := range mutRow.c.columns { dataBefore = append(dataBefore, col.data) elemBufBefore = append(elemBufBefore, col.elemBuf) } for i, col := range mutRow.c.columns { mutRow.SetDatum(i, nilDatum) require.Equal(t, col.IsNull(0), true) for _, off := range col.offsets { require.Equal(t, off, int64(0)) } require.Equal(t, col.data, dataBefore[i]) require.Equal(t, col.elemBuf, elemBufBefore[i]) } } func BenchmarkMutRowSetRow(b *testing.B) { b.ReportAllocs() rowChk := newChunk(8, 0) rowChk.AppendInt64(0, 1) rowChk.AppendString(1, "abcd") row := rowChk.GetRow(0) mutRow := MutRowFromValues(1, "abcd") for i := 0; i < b.N; i++ { mutRow.SetRow(row) } } func BenchmarkMutRowSetDatums(b *testing.B) { b.ReportAllocs() mutRow := MutRowFromValues(1, "abcd") datums := []types.Datum{types.NewDatum(1), types.NewDatum("abcd")} for i := 0; i < b.N; i++ { mutRow.SetDatums(datums...) } } func BenchmarkMutRowSetValues(b *testing.B) { b.ReportAllocs() mutRow := MutRowFromValues(1, "abcd") for i := 0; i < b.N; i++ { mutRow.SetValues(1, "abcd") } } func BenchmarkMutRowFromTypes(b *testing.B) { b.ReportAllocs() tps := []*types.FieldType{ types.NewFieldType(mysql.TypeLonglong), types.NewFieldType(mysql.TypeVarchar), } for i := 0; i < b.N; i++ { MutRowFromTypes(tps) } } func BenchmarkMutRowFromDatums(b *testing.B) { b.ReportAllocs() datums := []types.Datum{types.NewDatum(1), types.NewDatum("abc")} for i := 0; i < b.N; i++ { MutRowFromDatums(datums) } } func BenchmarkMutRowFromValues(b *testing.B) { b.ReportAllocs() values := []interface{}{1, "abc"} for i := 0; i < b.N; i++ { MutRowFromValues(values) } } func TestMutRowShallowCopyPartialRow(t *testing.T) { colTypes := make([]*types.FieldType, 0, 3) colTypes = append(colTypes, types.NewFieldType(mysql.TypeVarString)) colTypes = append(colTypes, types.NewFieldType(mysql.TypeLonglong)) colTypes = append(colTypes, types.NewFieldType(mysql.TypeTimestamp)) mutRow := MutRowFromTypes(colTypes) row := MutRowFromValues("abc", 123, types.ZeroTimestamp).ToRow() mutRow.ShallowCopyPartialRow(0, row) require.Equal(t, mutRow.ToRow().GetString(0), row.GetString(0)) require.Equal(t, mutRow.ToRow().GetInt64(1), row.GetInt64(1)) require.Equal(t, mutRow.ToRow().GetTime(2), row.GetTime(2)) row.c.Reset() d := types.NewStringDatum("dfg") row.c.AppendDatum(0, &d) d = types.NewIntDatum(567) row.c.AppendDatum(1, &d) d = types.NewTimeDatum(types.NewTime(types.FromGoTime(time.Now()), mysql.TypeTimestamp, 6)) row.c.AppendDatum(2, &d) require.Equal(t, mutRow.ToRow().GetTime(2), d.GetMysqlTime()) require.Equal(t, mutRow.ToRow().GetString(0), row.GetString(0)) require.Equal(t, mutRow.ToRow().GetInt64(1), row.GetInt64(1)) require.Equal(t, mutRow.ToRow().GetTime(2), row.GetTime(2)) } var rowsNum = 1024 func BenchmarkMutRowShallowCopyPartialRow(b *testing.B) { b.ReportAllocs() colTypes := make([]*types.FieldType, 0, 8) colTypes = append(colTypes, types.NewFieldType(mysql.TypeVarString)) colTypes = append(colTypes, types.NewFieldType(mysql.TypeVarString)) colTypes = append(colTypes, types.NewFieldType(mysql.TypeLonglong)) colTypes = append(colTypes, types.NewFieldType(mysql.TypeLonglong)) colTypes = append(colTypes, types.NewFieldType(mysql.TypeDatetime)) mutRow := MutRowFromTypes(colTypes) row := MutRowFromValues("abc", "abcdefg", 123, 456, types.ZeroDatetime).ToRow() b.ResetTimer() for i := 0; i < b.N; i++ { for j := 0; j < rowsNum; j++ { mutRow.ShallowCopyPartialRow(0, row) } } } func BenchmarkChunkAppendPartialRow(b *testing.B) { b.ReportAllocs() chk := newChunkWithInitCap(rowsNum, 0, 0, 8, 8, sizeTime) row := MutRowFromValues("abc", "abcdefg", 123, 456, types.ZeroDatetime).ToRow() b.ResetTimer() for i := 0; i < b.N; i++ { chk.Reset() for j := 0; j < rowsNum; j++ { chk.AppendPartialRow(0, row) } } }
package rest import ( "github.com/jinmukeji/jiujiantang-services/pkg/rest" proto "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/user/v1" "github.com/kataras/iris/v12" ) // 注销登录 func (h *webHandler) SignOut(ctx iris.Context) { req := new(proto.UserSignOutRequest) req.Ip = ctx.RemoteAddr() _, err := h.rpcSvc.UserSignOut( newRPCContext(ctx), req, ) if err != nil { writeRpcInternalError(ctx, err, false) return } rest.WriteOkJSON(ctx, nil) }
package semt import ( "encoding/xml" "github.com/thought-machine/finance-messaging/iso20022" ) type Document01500101 struct { XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:semt.015.001.01 Document"` Message *IntraPositionMovementConfirmationV01 `xml:"IntraPosMvmntConf"` } func (d *Document01500101) AddMessage() *IntraPositionMovementConfirmationV01 { d.Message = new(IntraPositionMovementConfirmationV01) return d.Message } // Scope // An account servicer sends a IntraPositionMovementConfirmation to an account owner to confirm the movement of securities within its holding from one sub-balance to another, for example, blocking of securities. // The account servicer/owner relationship may be: // - a central securities depository or another settlement market infrastructure acting on behalf of their participants // - an agent (sub-custodian) acting on behalf of their global custodian customer, or // - a custodian acting on behalf of an investment management institution or a broker/dealer. // Usage // The message may also be used to: // - re-send a message previously sent (the sub-function of the message is Duplicate), // - provide a third party with a copy of a message for information (the sub-function of the message is Copy), // - re-send to a third party a copy of a message for information (the sub-function of the message is Copy Duplicate). // ISO 15022 - 20022 Coexistence // This ISO 20022 message is reversed engineered from ISO 15022. Both standards will coexist for a certain number of years. Until this coexistence period ends, the usage of certain data types is restricted to ensure interoperability between ISO 15022 and 20022 users. Compliance to these rules is mandatory in a coexistence environment. The coexistence restrictions are described in a Textual Rule linked to the Message Items they concern. These coexistence textual rules are clearly identified as follows: “CoexistenceXxxxRule”. type IntraPositionMovementConfirmationV01 struct { // Information that unambiguously identifies an IntraPositionMovementConfirmation message as known by the account servicer. Identification *iso20022.DocumentIdentification11 `xml:"Id"` // Additional parameters to the transaction. AdditionalParameters *iso20022.AdditionalParameters3 `xml:"AddtlParams,omitempty"` // Party that legally owns the account. AccountOwner *iso20022.PartyIdentification13Choice `xml:"AcctOwnr,omitempty"` // Account to or from which a securities entry is made. SafekeepingAccount *iso20022.SecuritiesAccount13 `xml:"SfkpgAcct"` // Place where the securities are safe-kept, physically or notionally. This place can be, for example, a local custodian, a Central Securities Depository (CSD) or an International Central Securities Depository (ICSD). SafekeepingPlace *iso20022.SafekeepingPlaceFormat3Choice `xml:"SfkpgPlc,omitempty"` // Financial instrument representing a sum of rights of the investor vis-a-vis the issuer. FinancialInstrumentIdentification *iso20022.SecurityIdentification11 `xml:"FinInstrmId"` // Elements characterising a financial instrument. FinancialInstrumentAttributes *iso20022.FinancialInstrumentAttributes4 `xml:"FinInstrmAttrbts,omitempty"` // Intra-position movement transaction details. IntraPositionDetails *iso20022.IntraPositionDetails2 `xml:"IntraPosDtls"` // Party that originated the message, if other than the sender. MessageOriginator *iso20022.PartyIdentification10Choice `xml:"MsgOrgtr,omitempty"` // Party that is the final destination of the message, if other than the receiver. MessageRecipient *iso20022.PartyIdentification10Choice `xml:"MsgRcpt,omitempty"` // Additional information that cannot be captured in the structured elements and/or any other specific block. Extension []*iso20022.Extension2 `xml:"Xtnsn,omitempty"` } func (i *IntraPositionMovementConfirmationV01) AddIdentification() *iso20022.DocumentIdentification11 { i.Identification = new(iso20022.DocumentIdentification11) return i.Identification } func (i *IntraPositionMovementConfirmationV01) AddAdditionalParameters() *iso20022.AdditionalParameters3 { i.AdditionalParameters = new(iso20022.AdditionalParameters3) return i.AdditionalParameters } func (i *IntraPositionMovementConfirmationV01) AddAccountOwner() *iso20022.PartyIdentification13Choice { i.AccountOwner = new(iso20022.PartyIdentification13Choice) return i.AccountOwner } func (i *IntraPositionMovementConfirmationV01) AddSafekeepingAccount() *iso20022.SecuritiesAccount13 { i.SafekeepingAccount = new(iso20022.SecuritiesAccount13) return i.SafekeepingAccount } func (i *IntraPositionMovementConfirmationV01) AddSafekeepingPlace() *iso20022.SafekeepingPlaceFormat3Choice { i.SafekeepingPlace = new(iso20022.SafekeepingPlaceFormat3Choice) return i.SafekeepingPlace } func (i *IntraPositionMovementConfirmationV01) AddFinancialInstrumentIdentification() *iso20022.SecurityIdentification11 { i.FinancialInstrumentIdentification = new(iso20022.SecurityIdentification11) return i.FinancialInstrumentIdentification } func (i *IntraPositionMovementConfirmationV01) AddFinancialInstrumentAttributes() *iso20022.FinancialInstrumentAttributes4 { i.FinancialInstrumentAttributes = new(iso20022.FinancialInstrumentAttributes4) return i.FinancialInstrumentAttributes } func (i *IntraPositionMovementConfirmationV01) AddIntraPositionDetails() *iso20022.IntraPositionDetails2 { i.IntraPositionDetails = new(iso20022.IntraPositionDetails2) return i.IntraPositionDetails } func (i *IntraPositionMovementConfirmationV01) AddMessageOriginator() *iso20022.PartyIdentification10Choice { i.MessageOriginator = new(iso20022.PartyIdentification10Choice) return i.MessageOriginator } func (i *IntraPositionMovementConfirmationV01) AddMessageRecipient() *iso20022.PartyIdentification10Choice { i.MessageRecipient = new(iso20022.PartyIdentification10Choice) return i.MessageRecipient } func (i *IntraPositionMovementConfirmationV01) AddExtension() *iso20022.Extension2 { newValue := new(iso20022.Extension2) i.Extension = append(i.Extension, newValue) return newValue }
package teststore_test import ( "github.com/igogorek/http-rest-api-go/internal/app/model" "github.com/igogorek/http-rest-api-go/internal/app/store" "github.com/igogorek/http-rest-api-go/internal/app/store/teststore" "github.com/stretchr/testify/assert" "testing" ) func TestUserRepository_Create(t *testing.T) { st := teststore.New() user := model.TestUser() err := st.User().Create(user) assert.NoError(t, err) assert.NotEmpty(t, user.ID) assert.NotEmpty(t, user.EncryptedPassword) } func TestUserRepository_Find(t *testing.T) { st := teststore.New() user, err := st.User().Find(1) assert.EqualError(t, err, store.ErrRecordNotFound.Error()) assert.Nil(t, user) testUser := model.TestUser() if err = st.User().Create(testUser); err != nil { t.Fatal(err) } user, err = st.User().Find(testUser.ID) assert.NoError(t, err) assert.Equal(t, model.User{ ID: testUser.ID, Email: testUser.Email, Password: testUser.Password, EncryptedPassword: testUser.EncryptedPassword, }, *user, ) } func TestUserRepository_FindByEmail(t *testing.T) { st := teststore.New() testUser := model.TestUser() user, err := st.User().FindByEmail(testUser.Email) assert.EqualError(t, err, store.ErrRecordNotFound.Error()) assert.Nil(t, user) if err = st.User().Create(testUser); err != nil { t.Fatal(err) } user, err = st.User().FindByEmail(testUser.Email) assert.NoError(t, err) assert.Equal(t, model.User{ ID: testUser.ID, Email: testUser.Email, Password: testUser.Password, EncryptedPassword: testUser.EncryptedPassword, }, *user, ) }
package commands import ( // HOFSTADTER_START import // HOFSTADTER_END import // custom imports "fmt" "github.com/hofstadter-io/examples/blog/server/databases/postgres" "os" // infered imports // infered imports "github.com/spf13/viper" "github.com/spf13/cobra" ) // HOFSTADTER_START const // HOFSTADTER_END const // HOFSTADTER_START var // HOFSTADTER_END var // HOFSTADTER_START init // HOFSTADTER_END init var ServerToolDBLong = `A tool for working with the server databases` var ( RootConfigPFlag string ) func init() { RootCmd.PersistentFlags().StringVarP(&RootConfigPFlag, "config", "C", "server/config.yaml", "the config file for the API server") viper.BindPFlag("config", RootCmd.PersistentFlags().Lookup("config")) } var ( RootCmd = &cobra.Command{ Use: "serverToolDB", Short: "A tool for working with the server databases", Long: ServerToolDBLong, PersistentPreRun: func(cmd *cobra.Command, args []string) { // Argument Parsing filename := "server/config.yaml" f, err := os.Open(filename) if err != nil { fmt.Println("Error", err) os.Exit(1) } viper.SetConfigType("yaml") verr := viper.MergeConfig(f) if verr != nil { fmt.Println("Error", verr) os.Exit(1) } postgresHost := viper.GetString("databases.postgres.host") postgresPort := viper.GetString("databases.postgres.port") postgresUser := viper.GetString("databases.postgres.user") postgresPass := viper.GetString("databases.postgres.pass") postgresDb := viper.GetString("databases.postgres.dbname") postgresSslmode := viper.GetString("databases.postgres.sslmode") connStr := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s", postgresHost, postgresPort, postgresUser, postgresPass, postgresDb, postgresSslmode, ) postgres.ConnectToPsql(connStr) }, PersistentPostRun: func(cmd *cobra.Command, args []string) { logger.Debug("In PersistentPostRun serverToolDBCmd", "args", args) // Argument Parsing postgres.DisconnectFromPsql() }, } ) // HOFSTADTER_BELOW
package main import ( "fmt" "net/http" "path/filepath" "os" "io/ioutil" "github.com/gin-gonic/gin" "ipfs_api/pkg/our_infura" //"github.com/wabarc/ipfs-pinner/pkg/infura" ) func main() { router := gin.Default() root := router.Group("/") { root.POST("upload", upload) root.POST("retrieve", retrieve) } router.Run(":9090") } func upload(c *gin.Context) { file, err := c.FormFile("uploadFile") if err != nil { c.String(http.StatusBadRequest, fmt.Sprintf("get form err: %s", err.Error())) } if file != nil{ filename := filepath.Base(file.Filename) if err := c.SaveUploadedFile(file, "uploadedFiles/"+filename); err != nil { c.String(http.StatusBadRequest, fmt.Sprintf("upload file err: %s \n", err.Error())) } c.String(http.StatusOK, fmt.Sprintf("File %s uploaded successfully \n", file.Filename)) //------------------------------------------------------------------------------------------ filePath := filepath.Join("/home/oem/go/src/ipfs_api/uploadedFiles/", filename) cid, err := ipfs_protocol.PinFile(filePath) //cid, err := infura.PinFile(filePath) if err != nil { c.String(http.StatusOK, fmt.Sprintf("ipfs-pinner: %s \n", err.Error())) } else { c.String(http.StatusOK, fmt.Sprintf("Pinned file hash: \n %s \n", cid)) } } } func retrieve(c *gin.Context) { hash := c.PostForm("retrieveFile") if hash != ""{ data, err := ipfs_protocol.RetrieveFile(hash) if err != nil { c.String(http.StatusOK, fmt.Sprintf("File retrieval error: %s \n", err.Error())) } else { filePath := filepath.Join("/home/oem/go/src/ipfs_api/retrievedFile/", hash) var file, errf = os.Create(filePath) if errf != nil { return } defer file.Close() err = ioutil.WriteFile(filePath, []byte(data),0644) if err!=nil{ c.String(http.StatusOK, fmt.Sprintf("Writing file error: %s \n", err.Error())) } c.String(http.StatusOK, fmt.Sprintf("File retried at:\n%s \n", filePath)) } } }
/* Copyright 2019 Adobe. All rights reserved. This file is licensed to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package models import ( "time" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates" ) // Aggregate represents the OpenStack Aggregate // // swagger:model type Aggregate struct { // the id for the aggregate // // required: true ID int `storm:"id"` // the name for the aggregate // // required: true Name string `storm:"index"` // the AvailabilityZone for the aggregate // // required: true AvailabilityZone string // the Hosts for the aggregate // // required: true Hosts []string // the metadata for the aggregate // // required: true Metadata map[string]string // the time of the aggregate creation // // required: true Created time.Time // the time of the aggregate modification // // required: true Updated time.Time // OSSIA update time // // required: true PollTime time.Time } // Exists method checking if flavor is in API Response Slice func (a *Aggregate) Exists(aggregates []aggregates.Aggregate) bool { for _, v := range aggregates { if v.ID == a.ID { return true } } return false }
package main import ( "encoding/csv" "fmt" "io" "os" "strconv" mgo "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) func main() { session, err := mgo.Dial("127.0.0.1") if err != nil { panic(err) } defer session.Close() session.SetMode(mgo.Monotonic, true) s := session.DB("bigdata_school_chile").C("school") region := [15]string{"I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX", "X", "XI", "XII", "XIII", "XIV", "XV"} curses := [4]string{"2b", "4b", "6b", "8b"} d1 := 2013 for i := 0; i < len(curses); i++ { for j := 0; j < len(region); j++ { file, err := os.Open("../../../data/2013/simce/" + curses[i] + "/simce" + curses[i] + "2013_region" + region[j] + ".csv") defer file.Close() if err != nil { fmt.Println("Error:", err) } else { fmt.Println("Leyendo archivo simce del 2013 del curso ", curses[i], "region: ", region[j]) lineCount := 0 reader := csv.NewReader(file) reader.Comma = ';' for { record, err := reader.Read() if err == io.EOF { break } else if err != nil { fmt.Println("Error:", err) return } if lineCount > 0 { if i == 0 { d2, _ := strconv.ParseInt(record[6], 10, 64) d3, _ := strconv.ParseInt(record[13], 10, 64) d4, _ := strconv.ParseInt(record[14], 10, 64) err := s.Update(bson.M{"rbd": d2, "year": d1}, bson.M{"$set": bson.M{"simce.2b.l.students": d3, "simce.2b.l.score": d4}}) if err != nil { fmt.Println("RBD NO ENCONTRADO: ", d2, " NOMBRE:", record[8]) } } else if i == 1 { d2, _ := strconv.ParseInt(record[6], 10, 64) d3, _ := strconv.ParseInt(record[13], 10, 64) d4, _ := strconv.ParseInt(record[14], 10, 64) d5, _ := strconv.ParseInt(record[15], 10, 64) d6, _ := strconv.ParseInt(record[16], 10, 64) d7, _ := strconv.ParseInt(record[17], 10, 64) d8, _ := strconv.ParseInt(record[18], 10, 64) err := s.Update(bson.M{"rbd": d2, "year": d1}, bson.M{"$set": bson.M{"simce.4b.l.students": d3, "simce.4b.l.score": d6, "simce.4b.m.students": d4, "simce.4b.m.score": d7, "simce.4b.n.students": d5, "simce.4b.n.score": d8}}) if err != nil { fmt.Println("RBD NO ENCONTRADO: ", d2, " NOMBRE:", record[8]) } } else if i == 2 { d2, _ := strconv.ParseInt(record[6], 10, 64) d3, _ := strconv.ParseInt(record[13], 10, 64) d4, _ := strconv.ParseInt(record[14], 10, 64) d5, _ := strconv.ParseInt(record[15], 10, 64) d6, _ := strconv.ParseInt(record[16], 10, 64) err := s.Update(bson.M{"rbd": d2, "year": d1}, bson.M{"$set": bson.M{"simce.6b.l.students": d3, "simce.6b.l.score": d5, "simce.6b.m.students": d4, "simce.6b.m.score": d6}}) if err != nil { fmt.Println("RBD NO ENCONTRADO: ", d2, " NOMBRE:", record[8]) } } else { d2, _ := strconv.ParseInt(record[6], 10, 64) d3, _ := strconv.ParseInt(record[13], 10, 64) d4, _ := strconv.ParseInt(record[14], 10, 64) d5, _ := strconv.ParseInt(record[15], 10, 64) d6, _ := strconv.ParseInt(record[16], 10, 64) d7, _ := strconv.ParseInt(record[17], 10, 64) d8, _ := strconv.ParseInt(record[18], 10, 64) err := s.Update(bson.M{"rbd": d2, "year": d1}, bson.M{"$set": bson.M{"simce.8b.l.students": d3, "simce.8b.l.score": d6, "simce.8b.m.students": d4, "simce.8b.m.score": d7, "simce.8b.n.students": d5, "simce.8b.n.score": d8}}) if err != nil { fmt.Println("RBD NO ENCONTRADO: ", d2, " NOMBRE:", record[8]) } } } lineCount++ } } } } }
package strucct // type PlayerInfo struct { // PlayerId int64 `xorm:"not null pk autoincr BIGINT(20)"` // Name string `xorm:"not null default '' unique(uk_name) VARCHAR(128)"` // NickName string `xorm:"not null default '' unique(uk_name) VARCHAR(128)"` // Position int `xorm:"not null unique(uk_type_position) INT(11)"` // SecondPosition int `xorm:"not null default 0 INT(11)"` // Type int `xorm:"not null default 0 unique(uk_type_position) INT(11)"` // Score int `xorm:"not null default 0 INT(11)"` // Rebound int `xorm:"not null default 0 INT(11)"` // Assist int `xorm:"not null default 0 INT(11)"` // Steal int `xorm:"not null default 0 INT(11)"` // Cap int `xorm:"not null default 0 INT(11)"` // AppearNum int `xorm:"not null default 0 INT(11)"` // } // 基础能力:<input type="text" name="base_power"> // 内攻:<input type="text" name="inside_attack"> // 外攻:<input type="text" name="outside_attack"> // 前板:<input type="text" name="offensive_rebound"> // 后板:<input type="text" name="defensive_rebound"> // 传球:<input type="text" name="pass"> // 内防:<input type="text" name="inside_defense"> // 外防:<input type="text" name="outside_defense">
package jobs import ( "fmt" "github.com/gocql/gocql" "github.com/gorhill/cronexpr" "github.com/prometheus/common/log" ) // TODO * Add metrics for how often we try CAS because we did not see current // TODO correct job meta data. // TODO * Load job info using EACH_QUROUM to make sure we pick up state correctly // TODO OTH, maybe a version info on the state will help with that without using // TODO EACH_QUORUM // A repository implementation for Cassandra keyspace. type CassandraRepo struct { session *gocql.Session } // getJobInfo retrieves the meta data for the job with the given name. // Returns nil,nil for jobinfo if there is no job with the given name // and nil,err if an error occurred. func (m *CassandraRepo) GetJobinfo(name string) (*jobinfo, error) { li := jobinfo{name: name} var schedule string // Using CL LocalOne because the read is informational only - any data // we miss here will only cause an unnecessary locking attempt. err := m.session. Query("select enabled,lockttlsec,checksec,schedule,last,owner,state from jobs where name = ?", name). Consistency(gocql.LocalOne). Scan(&(li.enabled), &(li.lockttlsec), &(li.checksec), &schedule, &(li.last), &(li.owner), &(li.state)) if err == gocql.ErrNotFound { return nil, nil } if err != nil { return nil, err } li.schedule, err = cronexpr.Parse(schedule) if err != nil { return nil, err } return &li, nil } // saveState saves the given state for the given job. // This does not check lock ownership. func (m *CassandraRepo) SaveState(name string, state []byte) error { // Using CL EachQuorum to make sure that subsequent runs from any DC // pick up the correct state. if err := m.session. Query("update jobs set state = ?, last = unixTimestampOf(now()) where name = ?", state, name). Consistency(gocql.EachQuorum). Exec(); err != nil { // TODO map error return err } return nil } // touchLock updates the lock's TTL to ttlsec. // This does not check ownership of the lock. func (m *CassandraRepo) TouchLock(name string, owner string, ttlsec int) error { // Using CL LocalQuorum here because we want the TTL increase to be stored // safely in more than one node. The use of SERIAL in the lock CAS query will // make sure we see the TTL refresh when trying to get lock. return m.session. Query("update jobs using ttl ? set owner = ? where name = ?", ttlsec, owner, name). Consistency(gocql.LocalQuorum). Exec() } // tryGetLock attempts to acquire the given job's lock using a Cassandra CAS query. // true will be returned if the lock has been acquired, false if it was already taken. func (m *CassandraRepo) TryGetLock(name string, owner string, ttlsec int) (bool, error) { // CL of LocalOne is sufficient, because coordination happens based on CAS query. // Reads of older data in getJobInfo will only trigger unnecessary work. applied, err := m.session. Query("update jobs using ttl ? set owner = ? where name = ? if owner = null", ttlsec, owner, name). SerialConsistency(gocql.Serial). Consistency(gocql.LocalOne). ScanCAS() if err != nil { return false, err } return applied, nil } // Commit the given state to the job with the provided name func (m *CassandraRepo) Commit(name string, state []byte) error { // Commit is an optimization, so LocalQuorum CL is enough - if data is lost // in worst case, it will only leat to re-doing some work. return m.session. Query("update locks set state = ? where name = ?", state, name). Consistency(gocql.LocalOne). Exec() } // log records a message in a per-job log messages table. func (m *CassandraRepo) Log(name string, id string, event string, format string, a ...interface{}) { msg := fmt.Sprintf(format, a...) if err := m.session. Query("update logs using ttl 864000 set id = ?,event = ?, msg = ? where name = ? and ts = now()", id, event, msg, name). Consistency(gocql.LocalOne). Exec(); err != nil { log.Errorf("Unable to save log (%s) '%s' to DB for %s,%s: %v)", event, msg, name, id, err) } } // CreateJob creates an entry in the jobs table for the provided jobinfo. // This uses a CAS query to make sure it is only inserted once. func (m *CassandraRepo) CreateJob(name string, jobcfg *JobCfg) error { _, err := m.session. Query("insert into jobs (name,enabled,lockttlsec,checksec,schedule) values (?,?,?,?,?) if not exists", name, jobcfg.Enabled, jobcfg.Lockttlsec, jobcfg.Checksec, jobcfg.Schedule). SerialConsistency(gocql.Serial). Consistency(gocql.EachQuorum). ScanCAS() return err }
package reply type ProtocolErrReply struct { Msg string } func (r *ProtocolErrReply)ToBytes()[]byte{ return []byte("-ERR Protocol error: '" + r.Msg + "'\r\n") }