text stringlengths 11 4.05M |
|---|
package fimap
import (
"testing"
"github.com/valyala/fastrand"
)
var smallSet, mediumSet, largeSet []keyType
func initializeSet(size int) []keyType {
v := make([]keyType, size)
for i := range v {
v[i] = 1 + keyType(fastrand.Uint32n(2000000000))
}
return v
}
func init() {
smallSet = initializeSet(1024)
mediumSet = initializeSet(2000000) // 2 millions
largeSet = initializeSet(10000000) // 10 millions
}
func BenchmarkFIMapSmall(b *testing.B) {
benchmarkFIMap(b, smallSet)
}
func BenchmarkFIMapMedium(b *testing.B) {
benchmarkFIMap(b, mediumSet)
}
func BenchmarkFIMapLarge(b *testing.B) {
benchmarkFIMap(b, largeSet)
}
func BenchmarkMapSmall(b *testing.B) {
benchmarkMap(b, smallSet)
}
func BenchmarkMapMedium(b *testing.B) {
benchmarkMap(b, mediumSet)
}
func BenchmarkMapLarge(b *testing.B) {
benchmarkMap(b, largeSet)
}
func benchmarkFIMap(b *testing.B, set []keyType) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
s, _ := New(0, 0.65)
var exist bool
for _, v := range set {
if _, exist = s.Get(v); !exist {
s.Set(v, struct{}{})
}
}
}
}
func benchmarkMap(b *testing.B, set []keyType) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
s := make(map[keyType]struct{})
var exist bool
for _, v := range set {
if _, exist = s[v]; !exist {
s[v] = struct{}{}
}
}
}
}
|
//************************************************************************//
// rsc - RightScale API command line tool
//
// Generated with:
// $ praxisgen -metadata=ss/ssd/restful_doc -output=ss/ssd -pkg=ssd -target=1.0 -client=API
//
// The content of this file is auto-generated, DO NOT MODIFY
//************************************************************************//
package ssd
import (
"regexp"
"github.com/rightscale/rsc/metadata"
)
// Consists of a map of resource name to resource metadata.
var GenMetadata = map[string]*metadata.Resource{
"Schedule": &metadata.Resource{
Name: "Schedule",
Description: `A Schedule represents a recurring period during which a CloudApp should be running. It must have a unique name and an optional description. The recurrence rules follow the [Recurrence Rule format](https://tools.ietf.org/html/rfc5545#section-3.8.5.3).
Multiple Schedules can be associated with a Template when published to the Catalog. Users will be able to launch the resulting CloudApp with one of the associated schedule. Updating or deleting a Schedule will not affect CloudApps that were published with that Schedule.`,
Identifier: "application/vnd.rightscale.self_service.schedule",
Attributes: []*metadata.Attribute{
&metadata.Attribute{
Name: "created_by",
FieldName: "CreatedBy",
FieldType: "*User",
},
&metadata.Attribute{
Name: "description",
FieldName: "Description",
FieldType: "string",
},
&metadata.Attribute{
Name: "href",
FieldName: "Href",
FieldType: "string",
},
&metadata.Attribute{
Name: "id",
FieldName: "Id",
FieldType: "string",
},
&metadata.Attribute{
Name: "kind",
FieldName: "Kind",
FieldType: "string",
},
&metadata.Attribute{
Name: "name",
FieldName: "Name",
FieldType: "string",
},
&metadata.Attribute{
Name: "start_recurrence",
FieldName: "StartRecurrence",
FieldType: "*Recurrence",
},
&metadata.Attribute{
Name: "stop_recurrence",
FieldName: "StopRecurrence",
FieldType: "*Recurrence",
},
&metadata.Attribute{
Name: "timestamps",
FieldName: "Timestamps",
FieldType: "*TimestampsStruct",
},
},
Actions: []*metadata.Action{
&metadata.Action{
Name: "index",
Description: `List the schedules available in Designer.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "GET",
Pattern: "/api/designer/collections/%s/schedules",
Variables: []string{"collection_id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/schedules`),
},
},
CommandFlags: []*metadata.ActionParam{},
APIParams: []*metadata.ActionParam{},
},
&metadata.Action{
Name: "show",
Description: `Show detailed information about a given Schedule.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "GET",
Pattern: "/api/designer/collections/%s/schedules/%s",
Variables: []string{"collection_id", "id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/schedules/([^/]+)`),
},
},
CommandFlags: []*metadata.ActionParam{},
APIParams: []*metadata.ActionParam{},
},
&metadata.Action{
Name: "create",
Description: `Create a new Schedule.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "POST",
Pattern: "/api/designer/collections/%s/schedules",
Variables: []string{"collection_id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/schedules`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "description",
Description: `An optional description that will help users understand the purpose of the Schedule`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "name",
Description: `The unique name of the Schedule`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "start_recurrence[hour]",
Description: `The hour of day from 0 to 23.`,
Type: "int",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "start_recurrence[minute]",
Description: `The minute from 0 to 59.`,
Type: "int",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "start_recurrence[rule]",
Description: `A RRULE string describing the recurrence rule.`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "stop_recurrence[hour]",
Description: `The hour of day from 0 to 23.`,
Type: "int",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "stop_recurrence[minute]",
Description: `The minute from 0 to 59.`,
Type: "int",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "stop_recurrence[rule]",
Description: `A RRULE string describing the recurrence rule.`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "description",
Description: `An optional description that will help users understand the purpose of the Schedule`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "name",
Description: `The unique name of the Schedule`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "start_recurrence",
Description: `When to start a CloudApp`,
Type: "*Recurrence",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "stop_recurrence",
Description: `When to stop a CloudApp`,
Type: "*Recurrence",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
},
},
&metadata.Action{
Name: "update",
Description: `Update one or more attributes of an existing Schedule.
Note: updating a Schedule in Designer doesn't update it in the applications that were published with it to the Catalog or affect running CloudApps with that Schedule.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "PATCH",
Pattern: "/api/designer/collections/%s/schedules/%s",
Variables: []string{"collection_id", "id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/schedules/([^/]+)`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "description",
Description: `An optional description that will help users understand the purpose of the Schedule`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "name",
Description: `The unique name of the Schedule`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "start_recurrence[hour]",
Description: `The hour of day from 0 to 23.`,
Type: "int",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "start_recurrence[minute]",
Description: `The minute from 0 to 59.`,
Type: "int",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "start_recurrence[rule]",
Description: `A RRULE string describing the recurrence rule.`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "stop_recurrence[hour]",
Description: `The hour of day from 0 to 23.`,
Type: "int",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "stop_recurrence[minute]",
Description: `The minute from 0 to 59.`,
Type: "int",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "stop_recurrence[rule]",
Description: `A RRULE string describing the recurrence rule.`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "description",
Description: `An optional description that will help users understand the purpose of the Schedule`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "name",
Description: `The unique name of the Schedule`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "start_recurrence",
Description: `When to start a CloudApp`,
Type: "*Recurrence",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "stop_recurrence",
Description: `When to stop a CloudApp`,
Type: "*Recurrence",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
},
},
&metadata.Action{
Name: "delete",
Description: `Delete a Schedule from the system.
Note: deleting a Schedule from Designer doesn't remove it from the applications that were published with it to the Catalog or affect running CloudApps with that Schedule.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "DELETE",
Pattern: "/api/designer/collections/%s/schedules/%s",
Variables: []string{"collection_id", "id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/schedules/([^/]+)`),
},
},
CommandFlags: []*metadata.ActionParam{},
APIParams: []*metadata.ActionParam{},
},
&metadata.Action{
Name: "multi_delete",
Description: `Delete multiple Schedules from the system in bulk.
Note: deleting a Schedule from Designer doesn't remove it from the applications that were published with it to the Catalog or affect running CloudApps with that Schedule.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "DELETE",
Pattern: "/api/designer/collections/%s/schedules",
Variables: []string{"collection_id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/schedules`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "ids[]",
Description: `The IDs of the Schedules to delete`,
Type: "[]string",
Location: metadata.QueryParam,
Mandatory: true,
NonBlank: false,
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "ids[]",
Description: `The IDs of the Schedules to delete`,
Type: "[]string",
Location: metadata.QueryParam,
Mandatory: true,
NonBlank: false,
},
},
},
},
},
"Template": &metadata.Resource{
Name: "Template",
Description: `A Template represent a CloudApplication Template (CAT) that has been uploaded to this design collection.
For information on the syntax of a CAT file, please see the [CAT File Language Reference](http://docs.rightscale.com/ss/reference/ss_CAT_file_language.html) on the RightScale Docs
site.
A CAT file is compiled by Self-Service to make it ready for publication and subsequent launch by users. To
test your CAT file syntax, you can call the compile action with the source content. In order to
Publish your CAT to the Catalog where users can launch it, it must be uploaded to Designer first, and then
published to the Catalog.
CAT files are uniquely identified by the name of the CloudApplication, which is specified as the "name"
attribute inside of a CAT file.`,
Identifier: "application/vnd.rightscale.self_service.template",
Attributes: []*metadata.Attribute{
&metadata.Attribute{
Name: "application_info",
FieldName: "ApplicationInfo",
FieldType: "*ApplicationInfo",
},
&metadata.Attribute{
Name: "compilation_href",
FieldName: "CompilationHref",
FieldType: "string",
},
&metadata.Attribute{
Name: "compiled_cat",
FieldName: "CompiledCat",
FieldType: "string",
},
&metadata.Attribute{
Name: "created_by",
FieldName: "CreatedBy",
FieldType: "*User",
},
&metadata.Attribute{
Name: "dependencies",
FieldName: "Dependencies",
FieldType: "[]*CatDependency",
},
&metadata.Attribute{
Name: "dependents",
FieldName: "Dependents",
FieldType: "[]*CatDependency",
},
&metadata.Attribute{
Name: "filename",
FieldName: "Filename",
FieldType: "string",
},
&metadata.Attribute{
Name: "href",
FieldName: "Href",
FieldType: "string",
},
&metadata.Attribute{
Name: "id",
FieldName: "Id",
FieldType: "string",
},
&metadata.Attribute{
Name: "imports",
FieldName: "Imports",
FieldType: "[]string",
},
&metadata.Attribute{
Name: "kind",
FieldName: "Kind",
FieldType: "string",
},
&metadata.Attribute{
Name: "long_description",
FieldName: "LongDescription",
FieldType: "string",
},
&metadata.Attribute{
Name: "name",
FieldName: "Name",
FieldType: "string",
},
&metadata.Attribute{
Name: "package",
FieldName: "Package",
FieldType: "string",
},
&metadata.Attribute{
Name: "parameters",
FieldName: "Parameters",
FieldType: "[]*Parameter",
},
&metadata.Attribute{
Name: "published_by",
FieldName: "PublishedBy",
FieldType: "*User",
},
&metadata.Attribute{
Name: "required_parameters",
FieldName: "RequiredParameters",
FieldType: "[]string",
},
&metadata.Attribute{
Name: "rs_ca_ver",
FieldName: "RsCaVer",
FieldType: "int",
},
&metadata.Attribute{
Name: "short_description",
FieldName: "ShortDescription",
FieldType: "string",
},
&metadata.Attribute{
Name: "source",
FieldName: "Source",
FieldType: "string",
},
&metadata.Attribute{
Name: "source_href",
FieldName: "SourceHref",
FieldType: "string",
},
&metadata.Attribute{
Name: "stale",
FieldName: "Stale",
FieldType: "bool",
},
&metadata.Attribute{
Name: "timestamps",
FieldName: "Timestamps",
FieldType: "*TimestampsStruct",
},
},
Actions: []*metadata.Action{
&metadata.Action{
Name: "index",
Description: `List the templates available in Designer along with some general details.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "GET",
Pattern: "/api/designer/collections/%s/templates",
Variables: []string{"collection_id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "filter[]",
Description: `Filter by name, syntax is ["name==foo"]`,
Type: "[]string",
Location: metadata.QueryParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "ids[]",
Description: `An optional list of template IDs to retrieve`,
Type: "[]string",
Location: metadata.QueryParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "view",
Description: `Optional view to return`,
Type: "string",
Location: metadata.QueryParam,
Mandatory: false,
NonBlank: false,
ValidValues: []string{"default", "expanded"},
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "filter[]",
Description: `Filter by name, syntax is ["name==foo"]`,
Type: "[]string",
Location: metadata.QueryParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "ids[]",
Description: `An optional list of template IDs to retrieve`,
Type: "[]string",
Location: metadata.QueryParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "view",
Description: `Optional view to return`,
Type: "string",
Location: metadata.QueryParam,
Mandatory: false,
NonBlank: false,
ValidValues: []string{"default", "expanded"},
},
},
},
&metadata.Action{
Name: "show",
Description: `Show detailed information about a given Template. Use the views specified below for more information.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "GET",
Pattern: "/api/designer/collections/%s/templates/%s",
Variables: []string{"collection_id", "id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates/([^/]+)`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "view",
Description: `Optional view to return`,
Type: "string",
Location: metadata.QueryParam,
Mandatory: false,
NonBlank: false,
ValidValues: []string{"default", "expanded"},
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "view",
Description: `Optional view to return`,
Type: "string",
Location: metadata.QueryParam,
Mandatory: false,
NonBlank: false,
ValidValues: []string{"default", "expanded"},
},
},
},
&metadata.Action{
Name: "create",
Description: `Create a new Template by uploading its content to Designer.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "POST",
Pattern: "/api/designer/collections/%s/templates",
Variables: []string{"collection_id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "source",
Description: `Multipart File Upload`,
Type: "file",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "source",
Description: `Multipart File Upload`,
Type: "*rsapi.FileUpload",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
},
},
&metadata.Action{
Name: "create_from_compilation",
Description: `Create a new Template from a previously compiled CAT.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "POST",
Pattern: "/api/designer/collections/%s/templates/actions/create_from_compilation",
Variables: []string{"collection_id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates/actions/create_from_compilation`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "compilation_href",
Description: `The href of the compilation`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "filename",
Description: `The filename of the template`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "compilation_href",
Description: `The href of the compilation`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "filename",
Description: `The filename of the template`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
},
},
&metadata.Action{
Name: "update",
Description: `Update the content of an existing Template (a Template with the same "name" value in the CAT).`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "PUT",
Pattern: "/api/designer/collections/%s/templates/%s",
Variables: []string{"collection_id", "id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates/([^/]+)`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "source",
Description: `Multipart File Upload`,
Type: "file",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "source",
Description: `Multipart File Upload`,
Type: "*rsapi.FileUpload",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
},
},
&metadata.Action{
Name: "update_from_compilation",
Description: `Update a Template from a previously compiled CAT.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "POST",
Pattern: "/api/designer/collections/%s/templates/%s/actions/update_from_compilation",
Variables: []string{"collection_id", "id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates/([^/]+)/actions/update_from_compilation`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "compilation_href",
Description: `The href of the compilation`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "filename",
Description: `The filename of the template`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "compilation_href",
Description: `The href of the compilation`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "filename",
Description: `The filename of the template`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
},
},
&metadata.Action{
Name: "delete",
Description: `Delete a Template from the system. Note: deleting a Template from Designer doesn't remove it from the Catalog if it has already been published -- see the "unpublish" action.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "DELETE",
Pattern: "/api/designer/collections/%s/templates/%s",
Variables: []string{"collection_id", "id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates/([^/]+)`),
},
},
CommandFlags: []*metadata.ActionParam{},
APIParams: []*metadata.ActionParam{},
},
&metadata.Action{
Name: "multi_delete",
Description: `Delete multiple Templates from the system in bulk. Note: deleting a Template from Designer doesn't remove it from the Catalog if it has already been published -- see the "unpublish" action.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "DELETE",
Pattern: "/api/designer/collections/%s/templates",
Variables: []string{"collection_id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "ids[]",
Description: `The IDs of the Template to delete`,
Type: "[]string",
Location: metadata.QueryParam,
Mandatory: true,
NonBlank: false,
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "ids[]",
Description: `The IDs of the Template to delete`,
Type: "[]string",
Location: metadata.QueryParam,
Mandatory: true,
NonBlank: false,
},
},
},
&metadata.Action{
Name: "download",
Description: `Download the source of a Template.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "GET",
Pattern: "/api/designer/collections/%s/templates/%s/download",
Variables: []string{"collection_id", "id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates/([^/]+)/download`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "api_version",
Description: `The API version (only valid value is currently "1.0")`,
Type: "string",
Location: metadata.QueryParam,
Mandatory: true,
NonBlank: false,
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "api_version",
Description: `The API version (only valid value is currently "1.0")`,
Type: "string",
Location: metadata.QueryParam,
Mandatory: true,
NonBlank: false,
},
},
},
&metadata.Action{
Name: "compile",
Description: `Compile the Template, but don't save it to Designer. Useful for debugging a CAT file while you are still authoring it.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "POST",
Pattern: "/api/designer/collections/%s/templates/actions/compile",
Variables: []string{"collection_id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates/actions/compile`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "source",
Description: `The source of the CAT as a string`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "source",
Description: `The source of the CAT as a string`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
},
},
&metadata.Action{
Name: "dependencies",
Description: `Lists the Templates which the provided CAT source or Template directly or indirectly depend upon`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "POST",
Pattern: "/api/designer/collections/%s/templates/actions/dependencies",
Variables: []string{"collection_id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates/actions/dependencies`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "source",
Description: `The source of the CAT as a string, mutually exclusive with template_id`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "template_id",
Description: `The id of the template, mutually exclusive with source, have predecedence over "source" if both parameters are given`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "source",
Description: `The source of the CAT as a string, mutually exclusive with template_id`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "template_id",
Description: `The id of the template, mutually exclusive with source, have predecedence over "source" if both parameters are given`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
},
},
&metadata.Action{
Name: "dependents",
Description: `List the Dependents templates available in Designer for the given package, even if no template actually define the package.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "GET",
Pattern: "/api/designer/collections/%s/templates/actions/dependents",
Variables: []string{"collection_id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates/actions/dependents`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "package",
Description: `The path of the Package to which lists the dependents`,
Type: "string",
Location: metadata.QueryParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "view",
Description: `Optional view to return`,
Type: "string",
Location: metadata.QueryParam,
Mandatory: false,
NonBlank: false,
ValidValues: []string{"default", "expanded"},
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "package",
Description: `The path of the Package to which lists the dependents`,
Type: "string",
Location: metadata.QueryParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "view",
Description: `Optional view to return`,
Type: "string",
Location: metadata.QueryParam,
Mandatory: false,
NonBlank: false,
ValidValues: []string{"default", "expanded"},
},
},
},
&metadata.Action{
Name: "publish",
Description: `Publish the given Template to the Catalog so that users can launch it.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "POST",
Pattern: "/api/designer/collections/%s/templates/actions/publish",
Variables: []string{"collection_id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates/actions/publish`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "id",
Description: `The ID of a Template to publish`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "long_description",
Description: `Optionally override the Template long description used mostly for designers.`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "name",
Description: `Optionally override the Template name for display in the Catalog`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "overridden_application_href",
Description: `If re-publishing, you must specify the href of the Application in the Catalog that is being overridden`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "schedule_required",
Description: `Whether the CloudApp requires a schedule to be provided at launch time. If set to false, allows user to pick from '24/7' schedule when launching in the UI`,
Type: "bool",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "schedules[]",
Description: ``,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "short_description",
Description: `Optionally override the Template short description for display in the Catalog`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "id",
Description: `The ID of a Template to publish`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
&metadata.ActionParam{
Name: "long_description",
Description: `Optionally override the Template long description used mostly for designers.`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "name",
Description: `Optionally override the Template name for display in the Catalog`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "overridden_application_href",
Description: `If re-publishing, you must specify the href of the Application in the Catalog that is being overridden`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "schedule_required",
Description: `Whether the CloudApp requires a schedule to be provided at launch time. If set to false, allows user to pick from '24/7' schedule when launching in the UI`,
Type: "bool",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "schedules",
Description: `Schedules available to users when launching the application`,
Type: "[]string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
&metadata.ActionParam{
Name: "short_description",
Description: `Optionally override the Template short description for display in the Catalog`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: false,
NonBlank: false,
},
},
},
&metadata.Action{
Name: "unpublish",
Description: `Remove a publication from the Catalog by specifying its associated Template.`,
PathPatterns: []*metadata.PathPattern{
&metadata.PathPattern{
HTTPMethod: "POST",
Pattern: "/api/designer/collections/%s/templates/actions/unpublish",
Variables: []string{"collection_id"},
Regexp: regexp.MustCompile(`/api/designer/collections/([^/]+)/templates/actions/unpublish`),
},
},
CommandFlags: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "id",
Description: `The ID of the Template to unpublish`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
},
APIParams: []*metadata.ActionParam{
&metadata.ActionParam{
Name: "id",
Description: `The ID of the Template to unpublish`,
Type: "string",
Location: metadata.PayloadParam,
Mandatory: true,
NonBlank: false,
},
},
},
},
},
}
|
package main
import "testing"
func TestCountTheWays_String(t *testing.T) {
var c = CountTheWays([]int{1, 2})
tests := []struct {
name string
c *CountTheWays
want string
}{
{"base-case", &c, "1 ... 2"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.c.String(); got != tt.want {
t.Errorf("CountTheWays.String() = %v, want %v", got, tt.want)
}
})
}
}
func TestCountTheWays_Set(t *testing.T) {
var c = CountTheWays([]int{1, 2})
type args struct {
value string
}
tests := []struct {
name string
c *CountTheWays
args args
wantErr bool
}{
{"base-case", &c, args{"1,2"}, false},
{"bad args", &c, args{"1,a"}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := tt.c.Set(tt.args.value); (err != nil) != tt.wantErr {
t.Errorf("CountTheWays.Set() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestConfig_Setup(t *testing.T) {
type fields struct {
subject string
isAwesome bool
howAwesome int
countTheWays CountTheWays
}
tests := []struct {
name string
fields fields
}{
{"base-case", fields{}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &Config{
subject: tt.fields.subject,
isAwesome: tt.fields.isAwesome,
howAwesome: tt.fields.howAwesome,
countTheWays: tt.fields.countTheWays,
}
c.Setup()
})
}
}
func TestConfig_GetMessage(t *testing.T) {
type fields struct {
subject string
isAwesome bool
howAwesome int
countTheWays CountTheWays
}
tests := []struct {
name string
fields fields
want string
}{
{"base-case", fields{subject: "something"}, "something is NOT awesome with a certainty of 0 out of 10. Let me count the ways "},
{"base-case", fields{subject: "something", isAwesome: true}, "something is awesome with a certainty of 0 out of 10. Let me count the ways "},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &Config{
subject: tt.fields.subject,
isAwesome: tt.fields.isAwesome,
howAwesome: tt.fields.howAwesome,
countTheWays: tt.fields.countTheWays,
}
if got := c.GetMessage(); got != tt.want {
t.Errorf("Config.GetMessage() = %v, want %v", got, tt.want)
}
})
}
}
|
package piscine
import "github.com/01-edu/z01"
//import "fmt"
func IsNegative(nb int) {
if nb < 0 {
z01.PrintRune(84)
} else {
z01.PrintRune(70)
}
z01.PrintRune(10)
}
func PrintComb(){
for i:='0'; i <= '9'; i++{
for j:='0'; j <= '9'; j++{
for k:='0'; k <= '9'; k++{
if i < j && j < k {
z01.PrintRune(i)
z01.PrintRune(j)
z01.PrintRune(k)
if i != '7'{
z01.PrintRune(',')
z01.PrintRune(' ')
}
}
}
}
}
z01.PrintRune(10)
}
func PrintComb2() {
for i:='0'; i <= '9'; i++{
for j:='0'; j <= '9'; j++{
for k:='0'; k <= '9'; k++{
for m:='0'; m <= '9'; m++{
if i < k{
z01.PrintRune(i)
z01.PrintRune(j)
z01.PrintRune(' ')
z01.PrintRune(k)
z01.PrintRune(m)
z01.PrintRune(',')
z01.PrintRune(' ')
}else if i == k{
if j < m {
z01.PrintRune(i)
z01.PrintRune(j)
z01.PrintRune(' ')
z01.PrintRune(k)
z01.PrintRune(m)
if i == '9' && j == '8' && k == '9' && m == '9'{
continue
} else{
z01.PrintRune(',')
z01.PrintRune(' ')
}
}
}
}
}
}
}
z01.PrintRune(10)
}
func PrintNbr(n int){
if n < 0 {
z01.PrintRune('-')
n = n * -1
}
var dig1 int
dig1 = n % 10
var x int = 0
for x < dig1 {
x++
}
z01.PrintRune(rune(x))
} |
package main
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"html/template"
"image/png"
"io/ioutil"
"mime"
"net/http"
"os"
"path"
"strings"
"time"
"bitbucket.org/cicadaDev/utils"
log "github.com/Sirupsen/logrus"
"github.com/dgrijalva/jwt-go"
"github.com/lidashuang/goji_gzip"
"github.com/markbates/goth"
"github.com/markbates/goth/providers/gplus"
"github.com/markbates/goth/providers/linkedin"
"github.com/nullboundary/govalidator"
"github.com/pressly/cji"
"github.com/unrolled/secure"
"github.com/zenazn/goji/graceful"
"github.com/zenazn/goji/web"
"github.com/zenazn/goji/web/middleware"
)
var (
passTokenKey = []byte(`@1nw_5_sg@WRQtjRYry{IJ1O[]t,#)w`) //TODO: lets make a new key and put this somewhere safer!
csrfKey = []byte(`@1nw_5_sg@WRQtjRYry{IJ1O[]t,#)w`) //TODO: lets make a new key and put this somewhere safer!
jWTokenKey = []byte(`yY8\,VQ\'MZM(n:0;]-XzUMcYU9rQz,`) //TODO: lets make a new key and put this somewhere safer!
downloadServer = "https://pass.ninja/pass/1/passes/" //(https://pass.ninja/pass/1/passes) https://local.pass.ninja:8001/pass/1/passes/
loginTemplate *template.Template
notFoundTemplate *template.Template
indexTemplate *template.Template
accountTemplate *template.Template
docsTemplate *template.Template
emailFeedBack = NewEmailer()
secretKeyRing = "/etc/ninja/tls/.secring.gpg" //crypt set -keyring .pubring.gpg -endpoint http://10.1.42.1:4001 /email/feedback emailvar.json
bindUrl string //flag var for binding to a specific port
)
func init() {
flag.StringVar(&bindUrl, "bindurl", "https://localhost:10443", "The public ip address and port number for this server to bind to")
setAuthProviders()
//svg mime issue fix: https://github.com/golang/go/issues/6378
mime.AddExtensionType(".svg", "image/svg+xml")
//add custom validator functions
addValidators()
//set the logging level
log.SetLevel(log.DebugLevel)
//load etcd service url from env variables
etcdAddr := utils.SetEtcdURL()
log.WithField("etcdAddr", etcdAddr).Debugln("set etcd from env variable")
emailFeedBack.Init()
//load html files as templates into memory for speed increase
preLoadTemplates()
log.Infoln("Pass Ninja Webserver Initialized")
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func main() {
cspSrc := buildCSPolicy()
secureMiddleware := secure.New(secure.Options{
AllowedHosts: []string{"pass.ninja"},
SSLProxyHeaders: map[string]string{"X-Forwarded-Proto": "https"},
STSSeconds: 315360000,
STSIncludeSubdomains: true,
//STSPreload: true,
FrameDeny: true,
ContentTypeNosniff: true,
BrowserXssFilter: true,
ContentSecurityPolicy: cspSrc,
IsDevelopment: true,
})
flag.Parse() //parse flags
root := web.New()
root.Use(middleware.Logger)
root.Use(middleware.Recoverer)
root.Use(secureMiddleware.Handler)
root.Use(gzip.GzipHandler) //gzip everything
//Login
root.Post("/auth/:provider", handleLogin)
root.Get("/auth/:provider/unlink", handleUnlink)
root.Get("/auth/success", handleLoginSuccess) //loads a login success page into oauth popup.(probably never seen)
//home page
root.Get("/index.html", handleIndex)
root.Get("/", handleIndex)
root.Get("/apidocs.html", handleDocs)
root.Get("/assets/*", handleStatic)
//web app login pages
accounts := web.New()
root.Handle("/accounts/*", accounts) //handle all things that require login
accounts.Use(requireLogin) //login check middleware
accounts.Get("/accounts/home", handleAccountPage) //seperate assets for accounts - TODO add back to non accounts
accounts.Get("/accounts/editor", handleAccountPage) //seperate assets for accounts - TODO add back to non accounts
accounts.Post("/accounts/feedback", handleFeedback)
accounts.Get("/accounts/template/:passType", handlePassSample) //return a sample json object of the pass type
//API
api := web.New()
root.Handle("/api/*", api) //handle all things that require login
api.Use(requireAPILogin) //login check middleware
api.Get("/api/v1/passes", handleGetAllPass) //get a list of all the users passes
api.Get("/api/v1/passes/:id", cji.Use(passIDVerify).On(handleGetPass)) //get a specific pass data object
api.Get("/api/v1/passes/:id/link", cji.Use(passIDVerify).On(handleGetPassLink)) //get a public link to a pass - or update pass variables.
api.Post("/api/v1/passes", cji.Use(passReadVerify).On(handleCreatePass)) //creates a new pass
api.Delete("/api/v1/passes/:id", cji.Use(passIDVerify).On(handleDeletePass)) //remove a pass from the DB
api.Get("/api/v1/passes/:id/mutate", cji.Use(passIDVerify).On(handleGetMutateList)) //update pass variables.
api.Patch("/api/v1/passes/:id/mutate", cji.Use(passIDVerify).On(handleMutatePass)) //update pass variables.
api.Patch("/api/v1/passes/:id", cji.Use(passIDVerify, passReadVerify).On(handleUpdatePass)) //partial update of pass data
root.NotFound(handleNotFound)
root.Use(AddDb) //comment out to remove db function for testing
//annouce the server on etcd for vulcand
announceEtcd()
//customCA Server is only used for testing
customCAServer := &graceful.Server{Addr: ":443", Handler: root}
customCAServer.TLSConfig = addRootCA("/etc/ninja/tls/myCA.cer")
customCAServer.ListenAndServeTLS("/etc/ninja/tls/mycert1.cer", "/etc/ninja/tls/mycert1.key")
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func addRootCA(filepath string) *tls.Config {
severCert, err := ioutil.ReadFile(filepath)
if err != nil {
log.WithField("filepath", filepath).Fatalf("error loading Root CA file %v", err)
}
cAPool := x509.NewCertPool()
cAPool.AppendCertsFromPEM(severCert)
tlc := &tls.Config{
RootCAs: cAPool,
MinVersion: tls.VersionTLS10,
}
return tlc
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func announceEtcd() {
sz := len(bindUrl)
servernum := "01"
if sz > 2 {
servernum = bindUrl[sz-2:]
}
log.WithFields(log.Fields{"addr": bindUrl, "server": servernum}).Debugln("announce service on etcd")
///vulcand/backends/b1/servers/srv2 '{"URL": "http://localhost:5001"}'
utils.HeartBeatEtcd("vulcand/backends/passninja/servers/svr"+servernum, `{"URL": "`+bindUrl+`"}`, 5)
}
//////////////////////////////////////////////////////////////////////////
//
// generateFileName makes a unique Id for the pass file name
//
//
//////////////////////////////////////////////////////////////////////////
func generateFileName(passName string) string {
//todo: get all crypto hashing here...
//idHash := utils.HashSha1Bytes([]byte(passName + time.Now().String()))
idHash := utils.GenerateFnvHashID(passName, time.Now().String()) //generate a hash using pass orgname + color + time
passFileName := strings.Replace(passName, " ", "-", -1) //remove spaces from organization name
fileName := fmt.Sprintf("%s-%d", passFileName, idHash)
log.WithField("name", fileName).Debugln("generating file name")
return govalidator.SafeFileName(fileName)
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func addValidators() {
//check barcode format is 1 of 3 types
barcodeFormats := []string{"PKBarcodeFormatQR", "PKBarcodeFormatPDF417", "PKBarcodeFormatAztec"}
addListValidator("barcode", barcodeFormats)
//check transit type
transitTypes := []string{"PKTransitTypeAir", "PKTransitTypeBoat", "PKTransitTypeBus", "PKTransitTypeGeneric", "PKTransitTypeTrain"}
addListValidator("transit", transitTypes)
//check datestyle type (timestyle and date style are the same list)
timeTypes := []string{"PKDateStyleNone", "PKDateStyleShort", "PKDateStyleMedium", "PKDateStyleLong", "PKDateStyleFull"}
addListValidator("datestyle", timeTypes)
//check numstyle type
numTypes := []string{"PKNumberStyleDecimal", "PKNumberStylePercent", "PKNumberStyleScientific", "PKNumberStyleSpellOut"}
addListValidator("numstyle", numTypes)
//check text align style types
textAlignTypes := []string{"PKTextAlignmentLeft", "PKTextAlignmentCenter", "PKTextAlignmentRight", "PKTextAlignmentNatural"}
addListValidator("align", textAlignTypes)
//check if passtype is one of 5 types
passTypes := []string{"boardingPass", "coupon", "eventTicket", "generic", "storeCard"}
addListValidator("passtypes", passTypes)
//check to make sure its a valid currency code: USD,GBP etc
govalidator.TagMap["iso4217"] = govalidator.Validator(func(str string) bool {
if len(str) != 3 {
return false
}
if !govalidator.IsUpperCase(str) {
return false
}
return govalidator.IsAlpha(str)
})
//general text plus a few special characters
govalidator.TagMap["encode"] = govalidator.Validator(func(str string) bool {
//actually using as whitelist. Strip these chars example: ISO_8859-10:1992!
str = govalidator.BlackList(str, `-_.+:`)
//then test UTFLetterNum becomes: ISO8859101992! which returns false
if !govalidator.IsAlphanumeric(str) {
return false
}
//return true if all passed above.
return true
})
//general text plus a few special characters
govalidator.TagMap["msg"] = govalidator.Validator(func(str string) bool {
log.WithField("string", str).Debugln("msg validator input")
//use as whitelist. Strip these chars example: 달기&Co.;
pattern := "[" + `&. ~()':/?"!--+_%@#,\s` + "]+"
str = govalidator.ReplacePattern(str, pattern, "")
log.WithField("string", str).Debugln("msg validator after character strip")
//then test UTFLetterNum becomes: 달기Co;, which returns false
if !govalidator.IsUTFLetterNumeric(str) {
return false
}
//return true if all passed above.
return true
})
//check to make sure its a valid png image datauri
govalidator.TagMap["imagepng"] = govalidator.Validator(func(str string) bool {
dataStr := strings.SplitN(str, ",", 2) //seperate data:image/png;base64, from the DataURI
if !strings.Contains(dataStr[0], "image") {
return false
}
if !strings.Contains(dataStr[0], "png") {
return false
}
if !govalidator.IsDataURI(str) {
return false
}
//decode the data and see if its truely a png, by getting the color space and width/height
data, err := base64.StdEncoding.DecodeString(dataStr[1]) // [] byte
if err != nil {
log.Errorf("png base64 decode error: %s", err.Error())
return false
}
r := bytes.NewReader(data)
_, err = png.DecodeConfig(r)
if err != nil {
log.Errorf("png decode config error: %s", err.Error())
return false
}
return true
})
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func addListValidator(key string, typeList []string) {
govalidator.TagMap[key] = govalidator.Validator(func(str string) bool {
for _, nextType := range typeList {
if str == nextType {
return true
}
}
return false
})
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func createSessionID() (*http.Cookie, error) {
sidValue, err := createJWToken("sid", csrfKey, utils.RandomStr(16))
if err != nil {
return nil, err
}
log.WithField("sid", sidValue["sid"]).Debugln("create session id")
return &http.Cookie{Name: "sid", Value: sidValue["sid"]}, nil
}
// parseFromRequest Tries to find the token in an http.Request.
// This method will call ParseMultipartForm if there's no token in the header.
// Currently, it looks in the Authorization header as well as
// looking for an 'access_token' request parameter in req.Form.
func parseFromRequest(req *http.Request, keyFunc jwt.Keyfunc) (token *jwt.Token, err error) {
// Look for an Authorization header
if ah := req.Header.Get("Authorization"); ah != "" {
// Should be a bearer token
if len(ah) > 6 && strings.ToUpper(ah[0:6]) == "BEARER" {
return jwt.Parse(ah[7:], keyFunc)
}
}
//Check for a token in the cookie
c, err := req.Cookie("token")
if err == nil {
return jwt.Parse(c.Value, keyFunc)
}
// Look for "token=" url parameter
param := req.URL.Query()
if tokStr := param.Get("token"); tokStr != "" {
return jwt.Parse(tokStr, keyFunc)
}
// Look for "access_token" parameter
req.ParseMultipartForm(10e6)
if tokStr := req.Form.Get("access_token"); tokStr != "" {
return jwt.Parse(tokStr, keyFunc)
}
return nil, jwt.ErrNoTokenInRequest
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func verifyState(req *http.Request) bool {
stateKeyFunc := func(token *jwt.Token) (interface{}, error) {
return csrfKey, nil
}
param := req.URL.Query()
stateStr := param.Get("state")
log.WithField("state", stateStr).Debugln("verify request state")
if stateStr == "" {
return false
}
state, err := jwt.Parse(stateStr, stateKeyFunc)
if err != nil || !state.Valid {
return false
}
sidCookie, err := req.Cookie("sid")
log.Println(sidCookie.Value)
if err != nil || sidCookie.Value == "" {
return false
}
sid, err := jwt.Parse(sidCookie.Value, stateKeyFunc)
if err != nil || !sid.Valid {
return false
}
if sidCookie.Value != stateStr {
return false
}
return true
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func updatePassVariables(newPass *pass, customVars map[string]value) error {
var passDoc *passStructure
log.Println(newPass.PassType)
//get the correct keydoc passtructure
switch newPass.PassType {
case "boardingPass":
passDoc = newPass.KeyDoc.BoardingPass
case "coupon":
passDoc = newPass.KeyDoc.Coupon
case "eventTicket":
passDoc = newPass.KeyDoc.EventTicket
case "generic":
passDoc = newPass.KeyDoc.Generic
case "storeCard":
passDoc = newPass.KeyDoc.StoreCard
default:
log.WithField("type", newPass.PassType).Warnln("Pass type not found")
return fmt.Errorf("the submitted data is malformed")
}
//loop over every mutate variable key in pass
mutateLoop:
for _, key := range newPass.MutateList {
val, ok := customVars[key]
if !ok {
log.WithField("key", key).Warnln("mutate key not found")
return fmt.Errorf("mutate field key not found:%s", key)
}
//fmt.Printf("%s -> %s\n", key, val)
//sKey := []rune(key) //aux4: starts as
//fieldType := sKey[:len(sKey)] //aux: first part
//index := sKey[len(sKey):] //4: second part
//match against each field slice : TODO, is this ideal?
for i, field := range passDoc.AuxiliaryFields {
if field.Key == key {
passDoc.AuxiliaryFields[i].Value = &val //TODO: can't always be strings!
continue mutateLoop
}
}
for i, field := range passDoc.SecondaryFields {
if field.Key == key {
passDoc.SecondaryFields[i].Value = &val
continue mutateLoop
}
}
for i, field := range passDoc.BackFields {
if field.Key == key {
passDoc.BackFields[i].Value = &val
continue mutateLoop
}
}
for i, field := range passDoc.HeaderFields {
if field.Key == key {
passDoc.HeaderFields[i].Value = &val
continue mutateLoop
}
}
for i, field := range passDoc.PrimaryFields {
if field.Key == key {
passDoc.PrimaryFields[i].Value = &val
continue mutateLoop
}
}
}
return nil
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func verifyPassIDToken(token string, seeds ...string) error {
//id is a token, verify it
ok, err := utils.VerifyToken(passTokenKey, token, seeds...)
if err != nil {
//base64 decode failed
log.WithField("token", token).Errorf("verify pass id token failed: %s", err.Error())
return fmt.Errorf("pass not found")
}
if !ok {
log.WithField("token", token).Warnln("token failed to verify")
return fmt.Errorf("pass not found")
}
return nil
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func createJWToken(tokenName string, signKey []byte, subClaim string) (map[string]string, error) {
token := jwt.New(jwt.GetSigningMethod("HS256"))
log.WithField("sub", subClaim).Debugln("create JWT")
// Set some claims
token.Claims["sub"] = subClaim
token.Claims["iat"] = time.Now().Unix()
token.Claims["exp"] = time.Now().Add(time.Hour * 72).Unix()
// Sign and get the complete encoded token as a string
tokenString, err := token.SignedString(signKey)
if err != nil {
return nil, err
}
return map[string]string{tokenName: tokenString}, nil
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func noDirListing(prefix string, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
safePath := govalidator.SafeFileName(r.URL.Path)
log.WithField("path", path.Join(prefix, safePath)).Debugln("join preix and safepath")
fileInfo, err := os.Stat(path.Join(prefix, r.URL.Path))
if err != nil {
log.Errorf("os.Stat error: %s", err)
handleNotFound(w, r)
return
}
if fileInfo.IsDir() {
handleNotFound(w, r)
return
}
h.ServeHTTP(w, r)
})
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func maxAgeHandler(maxAge int, h http.Handler) http.Handler {
serverMaxAge := maxAge * 3 //cdn shared cache 3 times longer then user agent
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Cache-Control", fmt.Sprintf("no-transform,public,max-age=%d,s-maxage=%d", maxAge, serverMaxAge))
h.ServeHTTP(w, r)
})
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func setAuthProviders() {
type provider struct {
AuthURI string `json:"auth_uri"`
ClientSecret string `json:"client_secret"`
TokenURI string `json:"token_uri"`
RedirectURI string `json:"redirect_uri"`
ClientID string `json:"client_id"`
JavascriptOrigin string `json:"javascript_origin"`
}
type providers struct {
Google *provider `json:"google"`
Linkedin *provider `json:"linkedin"`
}
authP := &providers{}
providerVar, err := utils.GetCryptKey(secretKeyRing, "/oauth/providers")
if err != nil {
log.Errorf("error getting oauth provider etcd info: %s", err)
}
err = json.Unmarshal(providerVar, &authP)
if err != nil {
log.Errorf("error unmarshalling oauth provider info: %s", err)
}
goth.UseProviders(
gplus.New(authP.Google.ClientID, authP.Google.ClientSecret, authP.Google.RedirectURI),
linkedin.New(authP.Linkedin.ClientID, authP.Linkedin.ClientSecret, authP.Linkedin.RedirectURI),
)
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func buildCSPolicy() string {
//TODO: 'unsafe-eval' should be removed when a new datetime picker is selected
styleHash1 := "'sha256-OhN3Otr1Zz7s_rehKYkBeODCQWNYqNJvWI-Yp0YJJhI='" //eventticket.svg
styleHash2 := "'sha256-rJJyMDPmHMZS0mPmL877gjjApxGMVa4522UDb4ctw7I='" //webcomponents.js 7059
styleHash3 := "'sha256-3m4uh7Ti2CB_4MwwXKXBqcyUVLLr7fYp_-3JMbEr7Xc='" //back.svg
styleHash4 := "'sha256-M--wiR7hOXXX_WqIoRuQQFFzOfS922jlrxq62uZEkLA='" //boardingpass.svg
styleHash5 := "'sha256-hREc081rgBIohKe7SykdwzKlLSyEG5uX0H_HitOG6Rw='" //coupon.svg
styleHash6 := "'sha256-SIbroM9WWbSNLD633pSS4_Y_i6tCwP5_MQIEF-mBN_w='" //storeCard.svg
//TODO: sha256 hashs problematic when minified?
//scriptHash1 := "'sha256-jyt8dE8Ni1-ffuFSRkU0oJb7KniYkUefxOF3XKxjg4g='" //google anaylytics inline script
defaultSrc := "default-src 'self' https://cdnjs.cloudflare.com https://fonts.gstatic.com;"
styleSrc := "style-src 'self' 'unsafe-inline' " + styleHash1 + " " + styleHash3 + " " + styleHash4 + " " + styleHash5 + " " + styleHash6 + " " + styleHash2 + " https://cdnjs.cloudflare.com https://fonts.googleapis.com ;"
//scriptSrc := "script-src 'self' 'unsafe-eval' " + scriptHash1 + " https://cdnjs.cloudflare.com https://ajax.googleapis.com https://maps.googleapis.com https://maps.gstatic.com https://www.google-analytics.com;"
scriptSrc := "script-src 'self' 'unsafe-eval' https://cdnjs.cloudflare.com https://ajax.googleapis.com https://maps.googleapis.com https://maps.gstatic.com https://www.google-analytics.com;"
childSrc := "child-src 'none';frame-src 'none';" //frame-src is depreacted. Remove by 2017
objectSrc := "object-src 'none';"
imageSrc := "img-src 'self' *.global.ssl.fastly.net https://cdnjs.cloudflare.com data: blob:;"
return defaultSrc + scriptSrc + styleSrc + imageSrc + childSrc + objectSrc
}
//////////////////////////////////////////////////////////////////////////
//
//
//
//
//////////////////////////////////////////////////////////////////////////
func preLoadTemplates() {
//load login page as template.
var err error
loginTemplate, err = template.ParseFiles("/usr/share/ninja/www/static/public/login.html")
if err != nil {
log.WithField("template", "login").Fatalln(err)
}
//load notfound.html as template
notFoundTemplate, err = template.ParseFiles("/usr/share/ninja/www/static/public/notfound.html")
if err != nil {
log.WithField("template", "notfound").Fatalln(err)
}
//load index.html as template
indexTemplate, err = template.ParseFiles("/usr/share/ninja/www/static/public/index.html")
if err != nil {
log.WithField("template", "index").Fatalln(err)
}
//load account.html as template
accountTemplate, err = template.ParseFiles("/usr/share/ninja/www/static/auth/accounts.html")
if err != nil {
log.WithField("template", "accounts").Fatalln(err)
}
//load docs.html as template
docsTemplate, err = template.ParseFiles("/usr/share/ninja/www/static/public/apidocs.html")
if err != nil {
log.WithField("template", "docs").Fatalln(err)
}
}
|
package myeth
import (
"bytes"
"encoding/json"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc/naming"
"math/big"
"sync"
"time"
)
const REFLASHTIME = 10 * time.Second
const HTTPTIMEOUT = 10 * time.Second
var NonceMap *Nonce
type n struct {
num uint64
createTime time.Time
}
type Nonce struct {
m map[string]*n
Lock sync.RWMutex
}
func NewNonceMap() {
NonceMap = &Nonce{
m: make(map[string]*n),
}
}
// every 10 second recall the ipc to get the correct nonce.
// but whenever the nodes is just pending when we reseting the
// nonce we can hardly get the correct nonce.
func (this *Nonce) Reflash() {
naming.NewDNSResolver()
for {
time.Sleep(REFLASHTIME)
this.Lock.Lock()
for k, v := range this.m {
if time.Now().Sub(v.createTime) >= 10 {
glog.Infoln("delete", k)
delete(NonceMap.m, k)
}
}
this.Lock.Unlock()
}
}
// Change the value of the local nonce map,so we can write
// to the block chain node concurrency.
func (this *Nonce) calc(addr string, auth *bind.TransactOpts, client *ethclient.Client) int64 {
this.Lock.Lock()
defer this.Lock.Unlock()
addr = ParseUserName(addr)
if addr == "" {
glog.Errorln("user_address == nil")
return -1
}
glog.Infoln("Nonce Calc", addr)
// If nonce of corresponding user_address is not exist
// get it from the block_chain by rpc func. And reflash
// the MyNonce map.
if _, ok := this.m[addr]; !ok {
ctx, cancel := context.WithTimeout(context.Background(), HTTPTIMEOUT)
nonce, err := client.PendingNonceAt(ctx, auth.From)
defer cancel()
if err != nil {
glog.Errorln("Nonce Calc error :", err)
return -1
}
this.m[addr] = &n{
num: nonce,
createTime: time.Now(),
}
auth.Nonce = big.NewInt(int64(nonce))
this.m[addr].num++
return int64(nonce)
} else {
// Else just add is just fine.
nonce := this.m[addr].num
this.m[addr].num++
auth.Nonce = big.NewInt(int64(nonce))
return int64(nonce)
}
}
// when the contract func returns error ,reset the nonce in case of the
// error is call by the wrong nonce
func (this *Nonce) reset22222(key_string string, client *ethclient.Client) int64 {
this.Lock.Lock()
defer this.Lock.Unlock()
mp := make(map[string]interface{})
json.Unmarshal(bytes.NewBufferString(key_string).Bytes(), &mp)
glog.Infoln(mp)
if user_address, exist := mp["address"]; exist {
u_addr := user_address.(string)
u_addr = ParseUserName(u_addr)
ctx, cancel := context.WithTimeout(context.Background(), HTTPTIMEOUT)
nonce, err := client.PendingNonceAt(ctx, common.HexToAddress(u_addr))
if err != nil {
glog.Errorln("Nonce Calc error :", err)
return -1
}
glog.Infoln("Reseting Nonce", nonce, u_addr)
this.m[u_addr] = &n{
num: nonce,
createTime: time.Now(),
}
cancel()
return 0
}
return -1
}
// if the connection is down reset all nonce of the users
func (this *Nonce) resetAll11111(client *ethclient.Client) int64 {
for addr, _ := range this.m {
ctx, cancel := context.WithTimeout(context.Background(), HTTPTIMEOUT)
nonce, err := client.PendingNonceAt(ctx, common.HexToAddress(addr))
if err != nil {
glog.Errorln("Nonce Calc error :", err)
return -1
}
glog.Infoln("Reseting Nonce", nonce, addr)
this.Lock.Lock()
this.m[addr] = &n{
num: nonce,
createTime: time.Now(),
}
cancel()
this.Lock.Unlock()
return 0
}
return -1
}
|
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bytes"
"context"
"fmt"
"runtime/trace"
"strconv"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/executor/internal/applycache"
"github.com/pingcap/tidb/executor/internal/exec"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/parser/terror"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/bitmap"
"github.com/pingcap/tidb/util/channel"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/dbterror/exeerrors"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/memory"
)
var (
_ exec.Executor = &HashJoinExec{}
_ exec.Executor = &NestedLoopApplyExec{}
)
type hashJoinCtx struct {
sessCtx sessionctx.Context
allocPool chunk.Allocator
// concurrency is the number of partition, build and join workers.
concurrency uint
joinResultCh chan *hashjoinWorkerResult
// closeCh add a lock for closing executor.
closeCh chan struct{}
finished atomic.Bool
useOuterToBuild bool
isOuterJoin bool
isNullEQ []bool
buildFinished chan error
rowContainer *hashRowContainer
joinType plannercore.JoinType
outerMatchedStatus []*bitmap.ConcurrentBitmap
stats *hashJoinRuntimeStats
probeTypes []*types.FieldType
buildTypes []*types.FieldType
outerFilter expression.CNFExprs
isNullAware bool
memTracker *memory.Tracker // track memory usage.
diskTracker *disk.Tracker // track disk usage.
}
// probeSideTupleFetcher reads tuples from probeSideExec and send them to probeWorkers.
type probeSideTupleFetcher struct {
*hashJoinCtx
probeSideExec exec.Executor
probeChkResourceCh chan *probeChkResource
probeResultChs []chan *chunk.Chunk
requiredRows int64
}
type probeWorker struct {
hashJoinCtx *hashJoinCtx
workerID uint
probeKeyColIdx []int
probeNAKeyColIdx []int
// We pre-alloc and reuse the Rows and RowPtrs for each probe goroutine, to avoid allocation frequently
buildSideRows []chunk.Row
buildSideRowPtrs []chunk.RowPtr
// We build individual joiner for each join worker when use chunk-based
// execution, to avoid the concurrency of joiner.chk and joiner.selected.
joiner joiner
rowIters *chunk.Iterator4Slice
rowContainerForProbe *hashRowContainer
// for every naaj probe worker, pre-allocate the int slice for store the join column index to check.
needCheckBuildColPos []int
needCheckProbeColPos []int
needCheckBuildTypes []*types.FieldType
needCheckProbeTypes []*types.FieldType
probeChkResourceCh chan *probeChkResource
joinChkResourceCh chan *chunk.Chunk
probeResultCh chan *chunk.Chunk
}
type buildWorker struct {
hashJoinCtx *hashJoinCtx
buildSideExec exec.Executor
buildKeyColIdx []int
buildNAKeyColIdx []int
}
// HashJoinExec implements the hash join algorithm.
type HashJoinExec struct {
exec.BaseExecutor
*hashJoinCtx
probeSideTupleFetcher *probeSideTupleFetcher
probeWorkers []*probeWorker
buildWorker *buildWorker
workerWg util.WaitGroupWrapper
waiterWg util.WaitGroupWrapper
prepared bool
}
// probeChkResource stores the result of the join probe side fetch worker,
// `dest` is for Chunk reuse: after join workers process the probe side chunk which is read from `dest`,
// they'll store the used chunk as `chk`, and then the probe side fetch worker will put new data into `chk` and write `chk` into dest.
type probeChkResource struct {
chk *chunk.Chunk
dest chan<- *chunk.Chunk
}
// hashjoinWorkerResult stores the result of join workers,
// `src` is for Chunk reuse: the main goroutine will get the join result chunk `chk`,
// and push `chk` into `src` after processing, join worker goroutines get the empty chunk from `src`
// and push new data into this chunk.
type hashjoinWorkerResult struct {
chk *chunk.Chunk
err error
src chan<- *chunk.Chunk
}
// Close implements the Executor Close interface.
func (e *HashJoinExec) Close() error {
if e.closeCh != nil {
close(e.closeCh)
}
e.finished.Store(true)
if e.prepared {
if e.buildFinished != nil {
channel.Clear(e.buildFinished)
}
if e.joinResultCh != nil {
channel.Clear(e.joinResultCh)
}
if e.probeSideTupleFetcher.probeChkResourceCh != nil {
close(e.probeSideTupleFetcher.probeChkResourceCh)
channel.Clear(e.probeSideTupleFetcher.probeChkResourceCh)
}
for i := range e.probeSideTupleFetcher.probeResultChs {
channel.Clear(e.probeSideTupleFetcher.probeResultChs[i])
}
for i := range e.probeWorkers {
close(e.probeWorkers[i].joinChkResourceCh)
channel.Clear(e.probeWorkers[i].joinChkResourceCh)
}
e.probeSideTupleFetcher.probeChkResourceCh = nil
terror.Call(e.rowContainer.Close)
e.waiterWg.Wait()
}
e.outerMatchedStatus = e.outerMatchedStatus[:0]
for _, w := range e.probeWorkers {
w.buildSideRows = nil
w.buildSideRowPtrs = nil
w.needCheckBuildColPos = nil
w.needCheckProbeColPos = nil
w.needCheckBuildTypes = nil
w.needCheckProbeTypes = nil
w.joinChkResourceCh = nil
}
if e.stats != nil && e.rowContainer != nil {
e.stats.hashStat = *e.rowContainer.stat
}
if e.stats != nil {
defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats)
}
err := e.BaseExecutor.Close()
return err
}
// Open implements the Executor Open interface.
func (e *HashJoinExec) Open(ctx context.Context) error {
if err := e.BaseExecutor.Open(ctx); err != nil {
e.closeCh = nil
e.prepared = false
return err
}
e.prepared = false
if e.hashJoinCtx.memTracker != nil {
e.hashJoinCtx.memTracker.Reset()
} else {
e.hashJoinCtx.memTracker = memory.NewTracker(e.ID(), -1)
}
e.hashJoinCtx.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker)
e.diskTracker = disk.NewTracker(e.ID(), -1)
e.diskTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.DiskTracker)
e.workerWg = util.WaitGroupWrapper{}
e.waiterWg = util.WaitGroupWrapper{}
e.closeCh = make(chan struct{})
e.finished.Store(false)
if e.RuntimeStats() != nil {
e.stats = &hashJoinRuntimeStats{
concurrent: int(e.concurrency),
}
}
return nil
}
// fetchProbeSideChunks get chunks from fetches chunks from the big table in a background goroutine
// and sends the chunks to multiple channels which will be read by multiple join workers.
func (fetcher *probeSideTupleFetcher) fetchProbeSideChunks(ctx context.Context, maxChunkSize int) {
hasWaitedForBuild := false
for {
if fetcher.finished.Load() {
return
}
var probeSideResource *probeChkResource
var ok bool
select {
case <-fetcher.closeCh:
return
case probeSideResource, ok = <-fetcher.probeChkResourceCh:
if !ok {
return
}
}
probeSideResult := probeSideResource.chk
if fetcher.isOuterJoin {
required := int(atomic.LoadInt64(&fetcher.requiredRows))
probeSideResult.SetRequiredRows(required, maxChunkSize)
}
err := Next(ctx, fetcher.probeSideExec, probeSideResult)
failpoint.Inject("ConsumeRandomPanic", nil)
if err != nil {
fetcher.joinResultCh <- &hashjoinWorkerResult{
err: err,
}
return
}
if !hasWaitedForBuild {
failpoint.Inject("issue30289", func(val failpoint.Value) {
if val.(bool) {
probeSideResult.Reset()
}
})
if probeSideResult.NumRows() == 0 && !fetcher.useOuterToBuild {
fetcher.finished.Store(true)
}
emptyBuild, buildErr := fetcher.wait4BuildSide()
if buildErr != nil {
fetcher.joinResultCh <- &hashjoinWorkerResult{
err: buildErr,
}
return
} else if emptyBuild {
return
}
hasWaitedForBuild = true
}
if probeSideResult.NumRows() == 0 {
return
}
probeSideResource.dest <- probeSideResult
}
}
func (fetcher *probeSideTupleFetcher) wait4BuildSide() (emptyBuild bool, err error) {
select {
case <-fetcher.closeCh:
return true, nil
case err := <-fetcher.buildFinished:
if err != nil {
return false, err
}
}
if fetcher.rowContainer.Len() == uint64(0) && (fetcher.joinType == plannercore.InnerJoin || fetcher.joinType == plannercore.SemiJoin) {
return true, nil
}
return false, nil
}
// fetchBuildSideRows fetches all rows from build side executor, and append them
// to e.buildSideResult.
func (w *buildWorker) fetchBuildSideRows(ctx context.Context, chkCh chan<- *chunk.Chunk, errCh chan<- error, doneCh <-chan struct{}) {
defer close(chkCh)
var err error
failpoint.Inject("issue30289", func(val failpoint.Value) {
if val.(bool) {
err = errors.Errorf("issue30289 build return error")
errCh <- errors.Trace(err)
return
}
})
failpoint.Inject("issue42662_1", func(val failpoint.Value) {
if val.(bool) {
if w.hashJoinCtx.sessCtx.GetSessionVars().ConnectionID != 0 {
// consume 170MB memory, this sql should be tracked into MemoryTop1Tracker
w.hashJoinCtx.memTracker.Consume(170 * 1024 * 1024)
}
return
}
})
sessVars := w.hashJoinCtx.sessCtx.GetSessionVars()
for {
if w.hashJoinCtx.finished.Load() {
return
}
chk := sessVars.GetNewChunkWithCapacity(w.buildSideExec.Base().RetFieldTypes(), sessVars.MaxChunkSize, sessVars.MaxChunkSize, w.hashJoinCtx.allocPool)
err = Next(ctx, w.buildSideExec, chk)
if err != nil {
errCh <- errors.Trace(err)
return
}
failpoint.Inject("errorFetchBuildSideRowsMockOOMPanic", nil)
failpoint.Inject("ConsumeRandomPanic", nil)
if chk.NumRows() == 0 {
return
}
select {
case <-doneCh:
return
case <-w.hashJoinCtx.closeCh:
return
case chkCh <- chk:
}
}
}
func (e *HashJoinExec) initializeForProbe() {
// e.joinResultCh is for transmitting the join result chunks to the main
// thread.
e.joinResultCh = make(chan *hashjoinWorkerResult, e.concurrency+1)
e.probeSideTupleFetcher.hashJoinCtx = e.hashJoinCtx
// e.probeSideTupleFetcher.probeResultChs is for transmitting the chunks which store the data of
// probeSideExec, it'll be written by probe side worker goroutine, and read by join
// workers.
e.probeSideTupleFetcher.probeResultChs = make([]chan *chunk.Chunk, e.concurrency)
for i := uint(0); i < e.concurrency; i++ {
e.probeSideTupleFetcher.probeResultChs[i] = make(chan *chunk.Chunk, 1)
e.probeWorkers[i].probeResultCh = e.probeSideTupleFetcher.probeResultChs[i]
}
// e.probeChkResourceCh is for transmitting the used probeSideExec chunks from
// join workers to probeSideExec worker.
e.probeSideTupleFetcher.probeChkResourceCh = make(chan *probeChkResource, e.concurrency)
for i := uint(0); i < e.concurrency; i++ {
e.probeSideTupleFetcher.probeChkResourceCh <- &probeChkResource{
chk: newFirstChunk(e.probeSideTupleFetcher.probeSideExec),
dest: e.probeSideTupleFetcher.probeResultChs[i],
}
}
// e.probeWorker.joinChkResourceCh is for transmitting the reused join result chunks
// from the main thread to probe worker goroutines.
for i := uint(0); i < e.concurrency; i++ {
e.probeWorkers[i].joinChkResourceCh = make(chan *chunk.Chunk, 1)
e.probeWorkers[i].joinChkResourceCh <- newFirstChunk(e)
e.probeWorkers[i].probeChkResourceCh = e.probeSideTupleFetcher.probeChkResourceCh
}
}
func (e *HashJoinExec) fetchAndProbeHashTable(ctx context.Context) {
e.initializeForProbe()
e.workerWg.RunWithRecover(func() {
defer trace.StartRegion(ctx, "HashJoinProbeSideFetcher").End()
e.probeSideTupleFetcher.fetchProbeSideChunks(ctx, e.MaxChunkSize())
}, e.probeSideTupleFetcher.handleProbeSideFetcherPanic)
for i := uint(0); i < e.concurrency; i++ {
workerID := i
e.workerWg.RunWithRecover(func() {
defer trace.StartRegion(ctx, "HashJoinWorker").End()
e.probeWorkers[workerID].runJoinWorker()
}, e.probeWorkers[workerID].handleProbeWorkerPanic)
}
e.waiterWg.RunWithRecover(e.waitJoinWorkersAndCloseResultChan, nil)
}
func (fetcher *probeSideTupleFetcher) handleProbeSideFetcherPanic(r interface{}) {
for i := range fetcher.probeResultChs {
close(fetcher.probeResultChs[i])
}
if r != nil {
fetcher.joinResultCh <- &hashjoinWorkerResult{err: errors.Errorf("%v", r)}
}
}
func (w *probeWorker) handleProbeWorkerPanic(r interface{}) {
if r != nil {
w.hashJoinCtx.joinResultCh <- &hashjoinWorkerResult{err: errors.Errorf("probeWorker[%d] meets error: %v", w.workerID, r)}
}
}
func (e *HashJoinExec) handleJoinWorkerPanic(r interface{}) {
if r != nil {
e.joinResultCh <- &hashjoinWorkerResult{err: errors.Errorf("%v", r)}
}
}
// Concurrently handling unmatched rows from the hash table
func (w *probeWorker) handleUnmatchedRowsFromHashTable() {
ok, joinResult := w.getNewJoinResult()
if !ok {
return
}
numChks := w.rowContainerForProbe.NumChunks()
for i := int(w.workerID); i < numChks; i += int(w.hashJoinCtx.concurrency) {
chk, err := w.rowContainerForProbe.GetChunk(i)
if err != nil {
// Catching the error and send it
joinResult.err = err
w.hashJoinCtx.joinResultCh <- joinResult
return
}
for j := 0; j < chk.NumRows(); j++ {
if !w.hashJoinCtx.outerMatchedStatus[i].UnsafeIsSet(j) { // process unmatched outer rows
w.joiner.onMissMatch(false, chk.GetRow(j), joinResult.chk)
}
if joinResult.chk.IsFull() {
w.hashJoinCtx.joinResultCh <- joinResult
ok, joinResult = w.getNewJoinResult()
if !ok {
return
}
}
}
}
if joinResult == nil {
return
} else if joinResult.err != nil || (joinResult.chk != nil && joinResult.chk.NumRows() > 0) {
w.hashJoinCtx.joinResultCh <- joinResult
}
}
func (e *HashJoinExec) waitJoinWorkersAndCloseResultChan() {
e.workerWg.Wait()
if e.useOuterToBuild {
// Concurrently handling unmatched rows from the hash table at the tail
for i := uint(0); i < e.concurrency; i++ {
var workerID = i
e.workerWg.RunWithRecover(func() { e.probeWorkers[workerID].handleUnmatchedRowsFromHashTable() }, e.handleJoinWorkerPanic)
}
e.workerWg.Wait()
}
close(e.joinResultCh)
}
func (w *probeWorker) runJoinWorker() {
probeTime := int64(0)
if w.hashJoinCtx.stats != nil {
start := time.Now()
defer func() {
t := time.Since(start)
atomic.AddInt64(&w.hashJoinCtx.stats.probe, probeTime)
atomic.AddInt64(&w.hashJoinCtx.stats.fetchAndProbe, int64(t))
w.hashJoinCtx.stats.setMaxFetchAndProbeTime(int64(t))
}()
}
var (
probeSideResult *chunk.Chunk
selected = make([]bool, 0, chunk.InitialCapacity)
)
ok, joinResult := w.getNewJoinResult()
if !ok {
return
}
// Read and filter probeSideResult, and join the probeSideResult with the build side rows.
emptyProbeSideResult := &probeChkResource{
dest: w.probeResultCh,
}
hCtx := &hashContext{
allTypes: w.hashJoinCtx.probeTypes,
keyColIdx: w.probeKeyColIdx,
naKeyColIdx: w.probeNAKeyColIdx,
}
for ok := true; ok; {
if w.hashJoinCtx.finished.Load() {
break
}
select {
case <-w.hashJoinCtx.closeCh:
return
case probeSideResult, ok = <-w.probeResultCh:
}
failpoint.Inject("ConsumeRandomPanic", nil)
if !ok {
break
}
start := time.Now()
if w.hashJoinCtx.useOuterToBuild {
ok, joinResult = w.join2ChunkForOuterHashJoin(probeSideResult, hCtx, joinResult)
} else {
ok, joinResult = w.join2Chunk(probeSideResult, hCtx, joinResult, selected)
}
probeTime += int64(time.Since(start))
if !ok {
break
}
probeSideResult.Reset()
emptyProbeSideResult.chk = probeSideResult
w.probeChkResourceCh <- emptyProbeSideResult
}
// note joinResult.chk may be nil when getNewJoinResult fails in loops
if joinResult == nil {
return
} else if joinResult.err != nil || (joinResult.chk != nil && joinResult.chk.NumRows() > 0) {
w.hashJoinCtx.joinResultCh <- joinResult
} else if joinResult.chk != nil && joinResult.chk.NumRows() == 0 {
w.joinChkResourceCh <- joinResult.chk
}
}
func (w *probeWorker) joinMatchedProbeSideRow2ChunkForOuterHashJoin(probeKey uint64, probeSideRow chunk.Row, hCtx *hashContext, joinResult *hashjoinWorkerResult) (bool, *hashjoinWorkerResult) {
var err error
w.buildSideRows, w.buildSideRowPtrs, err = w.rowContainerForProbe.GetMatchedRowsAndPtrs(probeKey, probeSideRow, hCtx, w.buildSideRows, w.buildSideRowPtrs, true)
buildSideRows, rowsPtrs := w.buildSideRows, w.buildSideRowPtrs
if err != nil {
joinResult.err = err
return false, joinResult
}
if len(buildSideRows) == 0 {
return true, joinResult
}
iter := w.rowIters
iter.Reset(buildSideRows)
var outerMatchStatus []outerRowStatusFlag
rowIdx, ok := 0, false
for iter.Begin(); iter.Current() != iter.End(); {
outerMatchStatus, err = w.joiner.tryToMatchOuters(iter, probeSideRow, joinResult.chk, outerMatchStatus)
if err != nil {
joinResult.err = err
return false, joinResult
}
for i := range outerMatchStatus {
if outerMatchStatus[i] == outerRowMatched {
w.hashJoinCtx.outerMatchedStatus[rowsPtrs[rowIdx+i].ChkIdx].Set(int(rowsPtrs[rowIdx+i].RowIdx))
}
}
rowIdx += len(outerMatchStatus)
if joinResult.chk.IsFull() {
w.hashJoinCtx.joinResultCh <- joinResult
ok, joinResult = w.getNewJoinResult()
if !ok {
return false, joinResult
}
}
}
return true, joinResult
}
// joinNAALOSJMatchProbeSideRow2Chunk implement the matching logic for NA-AntiLeftOuterSemiJoin
func (w *probeWorker) joinNAALOSJMatchProbeSideRow2Chunk(probeKey uint64, probeKeyNullBits *bitmap.ConcurrentBitmap, probeSideRow chunk.Row, hCtx *hashContext, joinResult *hashjoinWorkerResult) (bool, *hashjoinWorkerResult) {
var (
err error
ok bool
)
if probeKeyNullBits == nil {
// step1: match the same key bucket first.
// because AntiLeftOuterSemiJoin cares about the scalar value. If we both have a match from null
// bucket and same key bucket, we should return the result as <rhs-row, 0> from same-key bucket
// rather than <rhs-row, null> from null bucket.
w.buildSideRows, err = w.rowContainerForProbe.GetMatchedRows(probeKey, probeSideRow, hCtx, w.buildSideRows)
buildSideRows := w.buildSideRows
if err != nil {
joinResult.err = err
return false, joinResult
}
if len(buildSideRows) != 0 {
iter1 := w.rowIters
iter1.Reset(buildSideRows)
for iter1.Begin(); iter1.Current() != iter1.End(); {
matched, _, err := w.joiner.tryToMatchInners(probeSideRow, iter1, joinResult.chk, LeftNotNullRightNotNull)
if err != nil {
joinResult.err = err
return false, joinResult
}
// here matched means: there is a valid same-key bucket row from right side.
// as said in the comment, once we meet a same key (NOT IN semantic) in CNF, we can determine the result as <rhs, 0>.
if matched {
return true, joinResult
}
if joinResult.chk.IsFull() {
w.hashJoinCtx.joinResultCh <- joinResult
ok, joinResult = w.getNewJoinResult()
if !ok {
return false, joinResult
}
}
}
}
// step2: match the null bucket secondly.
w.buildSideRows, err = w.rowContainerForProbe.GetNullBucketRows(hCtx, probeSideRow, probeKeyNullBits, w.buildSideRows, w.needCheckBuildColPos, w.needCheckProbeColPos, w.needCheckBuildTypes, w.needCheckProbeTypes)
buildSideRows = w.buildSideRows
if err != nil {
joinResult.err = err
return false, joinResult
}
if len(buildSideRows) == 0 {
// when reach here, it means we couldn't find a valid same key match from same-key bucket yet
// and the null bucket is empty. so the result should be <rhs, 1>.
w.joiner.onMissMatch(false, probeSideRow, joinResult.chk)
return true, joinResult
}
iter2 := w.rowIters
iter2.Reset(buildSideRows)
for iter2.Begin(); iter2.Current() != iter2.End(); {
matched, _, err := w.joiner.tryToMatchInners(probeSideRow, iter2, joinResult.chk, LeftNotNullRightHasNull)
if err != nil {
joinResult.err = err
return false, joinResult
}
// here matched means: there is a valid null bucket row from right side.
// as said in the comment, once we meet a null in CNF, we can determine the result as <rhs, null>.
if matched {
return true, joinResult
}
if joinResult.chk.IsFull() {
w.hashJoinCtx.joinResultCh <- joinResult
ok, joinResult = w.getNewJoinResult()
if !ok {
return false, joinResult
}
}
}
// step3: if we couldn't return it quickly in null bucket and same key bucket, here means two cases:
// case1: x NOT IN (empty set): if other key bucket don't have the valid rows yet.
// case2: x NOT IN (l,m,n...): if other key bucket do have the valid rows.
// both cases mean the result should be <rhs, 1>
w.joiner.onMissMatch(false, probeSideRow, joinResult.chk)
return true, joinResult
}
// when left side has null values, all we want is to find a valid build side rows (past other condition)
// so we can return it as soon as possible. here means two cases:
// case1: <?, null> NOT IN (empty set): ----------------------> result is <rhs, 1>.
// case2: <?, null> NOT IN (at least a valid inner row) ------------------> result is <rhs, null>.
// Step1: match null bucket (assumption that null bucket is quite smaller than all hash table bucket rows)
w.buildSideRows, err = w.rowContainerForProbe.GetNullBucketRows(hCtx, probeSideRow, probeKeyNullBits, w.buildSideRows, w.needCheckBuildColPos, w.needCheckProbeColPos, w.needCheckBuildTypes, w.needCheckProbeTypes)
buildSideRows := w.buildSideRows
if err != nil {
joinResult.err = err
return false, joinResult
}
if len(buildSideRows) != 0 {
iter1 := w.rowIters
iter1.Reset(buildSideRows)
for iter1.Begin(); iter1.Current() != iter1.End(); {
matched, _, err := w.joiner.tryToMatchInners(probeSideRow, iter1, joinResult.chk, LeftHasNullRightHasNull)
if err != nil {
joinResult.err = err
return false, joinResult
}
// here matched means: there is a valid null bucket row from right side. (not empty)
// as said in the comment, once we found at least a valid row, we can determine the result as <rhs, null>.
if matched {
return true, joinResult
}
if joinResult.chk.IsFull() {
w.hashJoinCtx.joinResultCh <- joinResult
ok, joinResult = w.getNewJoinResult()
if !ok {
return false, joinResult
}
}
}
}
// Step2: match all hash table bucket build rows (use probeKeyNullBits to filter if any).
w.buildSideRows, err = w.rowContainerForProbe.GetAllMatchedRows(hCtx, probeSideRow, probeKeyNullBits, w.buildSideRows, w.needCheckBuildColPos, w.needCheckProbeColPos, w.needCheckBuildTypes, w.needCheckProbeTypes)
buildSideRows = w.buildSideRows
if err != nil {
joinResult.err = err
return false, joinResult
}
if len(buildSideRows) == 0 {
// when reach here, it means we couldn't return it quickly in null bucket, and same-bucket is empty,
// which means x NOT IN (empty set) or x NOT IN (l,m,n), the result should be <rhs, 1>
w.joiner.onMissMatch(false, probeSideRow, joinResult.chk)
return true, joinResult
}
iter2 := w.rowIters
iter2.Reset(buildSideRows)
for iter2.Begin(); iter2.Current() != iter2.End(); {
matched, _, err := w.joiner.tryToMatchInners(probeSideRow, iter2, joinResult.chk, LeftHasNullRightNotNull)
if err != nil {
joinResult.err = err
return false, joinResult
}
// here matched means: there is a valid same key bucket row from right side. (not empty)
// as said in the comment, once we found at least a valid row, we can determine the result as <rhs, null>.
if matched {
return true, joinResult
}
if joinResult.chk.IsFull() {
w.hashJoinCtx.joinResultCh <- joinResult
ok, joinResult = w.getNewJoinResult()
if !ok {
return false, joinResult
}
}
}
// step3: if we couldn't return it quickly in null bucket and all hash bucket, here means only one cases:
// case1: <?, null> NOT IN (empty set):
// empty set comes from no rows from all bucket can pass other condition. the result should be <rhs, 1>
w.joiner.onMissMatch(false, probeSideRow, joinResult.chk)
return true, joinResult
}
// joinNAASJMatchProbeSideRow2Chunk implement the matching logic for NA-AntiSemiJoin
func (w *probeWorker) joinNAASJMatchProbeSideRow2Chunk(probeKey uint64, probeKeyNullBits *bitmap.ConcurrentBitmap, probeSideRow chunk.Row, hCtx *hashContext, joinResult *hashjoinWorkerResult) (bool, *hashjoinWorkerResult) {
var (
err error
ok bool
)
if probeKeyNullBits == nil {
// step1: match null bucket first.
// need fetch the "valid" rows every time. (nullBits map check is necessary)
w.buildSideRows, err = w.rowContainerForProbe.GetNullBucketRows(hCtx, probeSideRow, probeKeyNullBits, w.buildSideRows, w.needCheckBuildColPos, w.needCheckProbeColPos, w.needCheckBuildTypes, w.needCheckProbeTypes)
buildSideRows := w.buildSideRows
if err != nil {
joinResult.err = err
return false, joinResult
}
if len(buildSideRows) != 0 {
iter1 := w.rowIters
iter1.Reset(buildSideRows)
for iter1.Begin(); iter1.Current() != iter1.End(); {
matched, _, err := w.joiner.tryToMatchInners(probeSideRow, iter1, joinResult.chk)
if err != nil {
joinResult.err = err
return false, joinResult
}
// here matched means: there is a valid null bucket row from right side.
// as said in the comment, once we meet a rhs null in CNF, we can determine the reject of lhs row.
if matched {
return true, joinResult
}
if joinResult.chk.IsFull() {
w.hashJoinCtx.joinResultCh <- joinResult
ok, joinResult = w.getNewJoinResult()
if !ok {
return false, joinResult
}
}
}
}
// step2: then same key bucket.
w.buildSideRows, err = w.rowContainerForProbe.GetMatchedRows(probeKey, probeSideRow, hCtx, w.buildSideRows)
buildSideRows = w.buildSideRows
if err != nil {
joinResult.err = err
return false, joinResult
}
if len(buildSideRows) == 0 {
// when reach here, it means we couldn't return it quickly in null bucket, and same-bucket is empty,
// which means x NOT IN (empty set), accept the rhs row.
w.joiner.onMissMatch(false, probeSideRow, joinResult.chk)
return true, joinResult
}
iter2 := w.rowIters
iter2.Reset(buildSideRows)
for iter2.Begin(); iter2.Current() != iter2.End(); {
matched, _, err := w.joiner.tryToMatchInners(probeSideRow, iter2, joinResult.chk)
if err != nil {
joinResult.err = err
return false, joinResult
}
// here matched means: there is a valid same key bucket row from right side.
// as said in the comment, once we meet a false in CNF, we can determine the reject of lhs row.
if matched {
return true, joinResult
}
if joinResult.chk.IsFull() {
w.hashJoinCtx.joinResultCh <- joinResult
ok, joinResult = w.getNewJoinResult()
if !ok {
return false, joinResult
}
}
}
// step3: if we couldn't return it quickly in null bucket and same key bucket, here means two cases:
// case1: x NOT IN (empty set): if other key bucket don't have the valid rows yet.
// case2: x NOT IN (l,m,n...): if other key bucket do have the valid rows.
// both cases should accept the rhs row.
w.joiner.onMissMatch(false, probeSideRow, joinResult.chk)
return true, joinResult
}
// when left side has null values, all we want is to find a valid build side rows (passed from other condition)
// so we can return it as soon as possible. here means two cases:
// case1: <?, null> NOT IN (empty set): ----------------------> accept rhs row.
// case2: <?, null> NOT IN (at least a valid inner row) ------------------> unknown result, refuse rhs row.
// Step1: match null bucket (assumption that null bucket is quite smaller than all hash table bucket rows)
w.buildSideRows, err = w.rowContainerForProbe.GetNullBucketRows(hCtx, probeSideRow, probeKeyNullBits, w.buildSideRows, w.needCheckBuildColPos, w.needCheckProbeColPos, w.needCheckBuildTypes, w.needCheckProbeTypes)
buildSideRows := w.buildSideRows
if err != nil {
joinResult.err = err
return false, joinResult
}
if len(buildSideRows) != 0 {
iter1 := w.rowIters
iter1.Reset(buildSideRows)
for iter1.Begin(); iter1.Current() != iter1.End(); {
matched, _, err := w.joiner.tryToMatchInners(probeSideRow, iter1, joinResult.chk)
if err != nil {
joinResult.err = err
return false, joinResult
}
// here matched means: there is a valid null bucket row from right side. (not empty)
// as said in the comment, once we found at least a valid row, we can determine the reject of lhs row.
if matched {
return true, joinResult
}
if joinResult.chk.IsFull() {
w.hashJoinCtx.joinResultCh <- joinResult
ok, joinResult = w.getNewJoinResult()
if !ok {
return false, joinResult
}
}
}
}
// Step2: match all hash table bucket build rows.
w.buildSideRows, err = w.rowContainerForProbe.GetAllMatchedRows(hCtx, probeSideRow, probeKeyNullBits, w.buildSideRows, w.needCheckBuildColPos, w.needCheckProbeColPos, w.needCheckBuildTypes, w.needCheckProbeTypes)
buildSideRows = w.buildSideRows
if err != nil {
joinResult.err = err
return false, joinResult
}
if len(buildSideRows) == 0 {
// when reach here, it means we couldn't return it quickly in null bucket, and same-bucket is empty,
// which means <?,null> NOT IN (empty set) or <?,null> NOT IN (no valid rows) accept the rhs row.
w.joiner.onMissMatch(false, probeSideRow, joinResult.chk)
return true, joinResult
}
iter2 := w.rowIters
iter2.Reset(buildSideRows)
for iter2.Begin(); iter2.Current() != iter2.End(); {
matched, _, err := w.joiner.tryToMatchInners(probeSideRow, iter2, joinResult.chk)
if err != nil {
joinResult.err = err
return false, joinResult
}
// here matched means: there is a valid key row from right side. (not empty)
// as said in the comment, once we found at least a valid row, we can determine the reject of lhs row.
if matched {
return true, joinResult
}
if joinResult.chk.IsFull() {
w.hashJoinCtx.joinResultCh <- joinResult
ok, joinResult = w.getNewJoinResult()
if !ok {
return false, joinResult
}
}
}
// step3: if we couldn't return it quickly in null bucket and all hash bucket, here means only one cases:
// case1: <?, null> NOT IN (empty set):
// empty set comes from no rows from all bucket can pass other condition. we should accept the rhs row.
w.joiner.onMissMatch(false, probeSideRow, joinResult.chk)
return true, joinResult
}
// joinNAAJMatchProbeSideRow2Chunk implement the matching priority logic for NA-AntiSemiJoin and NA-AntiLeftOuterSemiJoin
// there are some bucket-matching priority difference between them.
//
// Since NA-AntiSemiJoin don't need to append the scalar value with the left side row, there is a quick matching path.
// 1: lhs row has null:
// lhs row has null can't determine its result in advance, we should judge whether the right valid set is empty
// or not. For semantic like x NOT IN(y set), If y set is empty, the scalar result is 1; Otherwise, the result
// is 0. Since NA-AntiSemiJoin don't care about the scalar value, we just try to find a valid row from right side,
// once we found it then just return the left side row instantly. (same as NA-AntiLeftOuterSemiJoin)
//
// 2: lhs row without null:
// same-key bucket and null-bucket which should be the first to match? For semantic like x NOT IN(y set), once y
// set has a same key x, the scalar value is 0; else if y set has a null key, then the scalar value is null. Both
// of them lead the refuse of the lhs row without any difference. Since NA-AntiSemiJoin don't care about the scalar
// value, we can just match the null bucket first and refuse the lhs row as quickly as possible, because a null of
// yi in the CNF (x NA-EQ yi) can always determine a negative value (refuse lhs row) in advance here.
//
// For NA-AntiLeftOuterSemiJoin, we couldn't match null-bucket first, because once y set has a same key x and null
// key, we should return the result as left side row appended with a scalar value 0 which is from same key matching failure.
func (w *probeWorker) joinNAAJMatchProbeSideRow2Chunk(probeKey uint64, probeKeyNullBits *bitmap.ConcurrentBitmap, probeSideRow chunk.Row, hCtx *hashContext, joinResult *hashjoinWorkerResult) (bool, *hashjoinWorkerResult) {
naAntiSemiJoin := w.hashJoinCtx.joinType == plannercore.AntiSemiJoin && w.hashJoinCtx.isNullAware
naAntiLeftOuterSemiJoin := w.hashJoinCtx.joinType == plannercore.AntiLeftOuterSemiJoin && w.hashJoinCtx.isNullAware
if naAntiSemiJoin {
return w.joinNAASJMatchProbeSideRow2Chunk(probeKey, probeKeyNullBits, probeSideRow, hCtx, joinResult)
}
if naAntiLeftOuterSemiJoin {
return w.joinNAALOSJMatchProbeSideRow2Chunk(probeKey, probeKeyNullBits, probeSideRow, hCtx, joinResult)
}
// shouldn't be here, not a valid NAAJ.
return false, joinResult
}
func (w *probeWorker) joinMatchedProbeSideRow2Chunk(probeKey uint64, probeSideRow chunk.Row, hCtx *hashContext,
joinResult *hashjoinWorkerResult) (bool, *hashjoinWorkerResult) {
var err error
w.buildSideRows, err = w.rowContainerForProbe.GetMatchedRows(probeKey, probeSideRow, hCtx, w.buildSideRows)
buildSideRows := w.buildSideRows
if err != nil {
joinResult.err = err
return false, joinResult
}
if len(buildSideRows) == 0 {
w.joiner.onMissMatch(false, probeSideRow, joinResult.chk)
return true, joinResult
}
iter := w.rowIters
iter.Reset(buildSideRows)
hasMatch, hasNull, ok := false, false, false
for iter.Begin(); iter.Current() != iter.End(); {
matched, isNull, err := w.joiner.tryToMatchInners(probeSideRow, iter, joinResult.chk)
if err != nil {
joinResult.err = err
return false, joinResult
}
hasMatch = hasMatch || matched
hasNull = hasNull || isNull
if joinResult.chk.IsFull() {
w.hashJoinCtx.joinResultCh <- joinResult
ok, joinResult = w.getNewJoinResult()
if !ok {
return false, joinResult
}
}
}
if !hasMatch {
w.joiner.onMissMatch(hasNull, probeSideRow, joinResult.chk)
}
return true, joinResult
}
func (w *probeWorker) getNewJoinResult() (bool, *hashjoinWorkerResult) {
joinResult := &hashjoinWorkerResult{
src: w.joinChkResourceCh,
}
ok := true
select {
case <-w.hashJoinCtx.closeCh:
ok = false
case joinResult.chk, ok = <-w.joinChkResourceCh:
}
return ok, joinResult
}
func (w *probeWorker) join2Chunk(probeSideChk *chunk.Chunk, hCtx *hashContext, joinResult *hashjoinWorkerResult,
selected []bool) (ok bool, _ *hashjoinWorkerResult) {
var err error
selected, err = expression.VectorizedFilter(w.hashJoinCtx.sessCtx, w.hashJoinCtx.outerFilter, chunk.NewIterator4Chunk(probeSideChk), selected)
if err != nil {
joinResult.err = err
return false, joinResult
}
numRows := probeSideChk.NumRows()
hCtx.initHash(numRows)
// By now, path 1 and 2 won't be conducted at the same time.
// 1: write the row data of join key to hashVals. (normal EQ key should ignore the null values.) null-EQ for Except statement is an exception.
for keyIdx, i := range hCtx.keyColIdx {
ignoreNull := len(w.hashJoinCtx.isNullEQ) > keyIdx && w.hashJoinCtx.isNullEQ[keyIdx]
err = codec.HashChunkSelected(w.rowContainerForProbe.sc, hCtx.hashVals, probeSideChk, hCtx.allTypes[keyIdx], i, hCtx.buf, hCtx.hasNull, selected, ignoreNull)
if err != nil {
joinResult.err = err
return false, joinResult
}
}
// 2: write the row data of NA join key to hashVals. (NA EQ key should collect all row including null value, store null value in a special position)
isNAAJ := len(hCtx.naKeyColIdx) > 0
for keyIdx, i := range hCtx.naKeyColIdx {
// NAAJ won't ignore any null values, but collect them up to probe.
err = codec.HashChunkSelected(w.rowContainerForProbe.sc, hCtx.hashVals, probeSideChk, hCtx.allTypes[keyIdx], i, hCtx.buf, hCtx.hasNull, selected, false)
if err != nil {
joinResult.err = err
return false, joinResult
}
// after fetch one NA column, collect the null value to null bitmap for every row. (use hasNull flag to accelerate)
// eg: if a NA Join cols is (a, b, c), for every build row here we maintained a 3-bit map to mark which column is null for them.
for rowIdx := 0; rowIdx < numRows; rowIdx++ {
if hCtx.hasNull[rowIdx] {
hCtx.naColNullBitMap[rowIdx].UnsafeSet(keyIdx)
// clean and try fetch next NA join col.
hCtx.hasNull[rowIdx] = false
hCtx.naHasNull[rowIdx] = true
}
}
}
for i := range selected {
killed := atomic.LoadUint32(&w.hashJoinCtx.sessCtx.GetSessionVars().Killed) == 1
failpoint.Inject("killedInJoin2Chunk", func(val failpoint.Value) {
if val.(bool) {
killed = true
}
})
if killed {
joinResult.err = exeerrors.ErrQueryInterrupted
return false, joinResult
}
if isNAAJ {
if !selected[i] {
// since this is the case of using inner to build, so for an outer row unselected, we should fill the result when it's outer join.
w.joiner.onMissMatch(false, probeSideChk.GetRow(i), joinResult.chk)
}
if hCtx.naHasNull[i] {
// here means the probe join connecting column has null value in it and this is special for matching all the hash buckets
// for it. (probeKey is not necessary here)
probeRow := probeSideChk.GetRow(i)
ok, joinResult = w.joinNAAJMatchProbeSideRow2Chunk(0, hCtx.naColNullBitMap[i].Clone(), probeRow, hCtx, joinResult)
if !ok {
return false, joinResult
}
} else {
// here means the probe join connecting column without null values, where we should match same key bucket and null bucket for it at its order.
// step1: process same key matched probe side rows
probeKey, probeRow := hCtx.hashVals[i].Sum64(), probeSideChk.GetRow(i)
ok, joinResult = w.joinNAAJMatchProbeSideRow2Chunk(probeKey, nil, probeRow, hCtx, joinResult)
if !ok {
return false, joinResult
}
}
} else {
// since this is the case of using inner to build, so for an outer row unselected, we should fill the result when it's outer join.
if !selected[i] || hCtx.hasNull[i] { // process unmatched probe side rows
w.joiner.onMissMatch(false, probeSideChk.GetRow(i), joinResult.chk)
} else { // process matched probe side rows
probeKey, probeRow := hCtx.hashVals[i].Sum64(), probeSideChk.GetRow(i)
ok, joinResult = w.joinMatchedProbeSideRow2Chunk(probeKey, probeRow, hCtx, joinResult)
if !ok {
return false, joinResult
}
}
}
if joinResult.chk.IsFull() {
w.hashJoinCtx.joinResultCh <- joinResult
ok, joinResult = w.getNewJoinResult()
if !ok {
return false, joinResult
}
}
}
return true, joinResult
}
// join2ChunkForOuterHashJoin joins chunks when using the outer to build a hash table (refer to outer hash join)
func (w *probeWorker) join2ChunkForOuterHashJoin(probeSideChk *chunk.Chunk, hCtx *hashContext, joinResult *hashjoinWorkerResult) (ok bool, _ *hashjoinWorkerResult) {
hCtx.initHash(probeSideChk.NumRows())
for keyIdx, i := range hCtx.keyColIdx {
err := codec.HashChunkColumns(w.rowContainerForProbe.sc, hCtx.hashVals, probeSideChk, hCtx.allTypes[keyIdx], i, hCtx.buf, hCtx.hasNull)
if err != nil {
joinResult.err = err
return false, joinResult
}
}
for i := 0; i < probeSideChk.NumRows(); i++ {
killed := atomic.LoadUint32(&w.hashJoinCtx.sessCtx.GetSessionVars().Killed) == 1
failpoint.Inject("killedInJoin2ChunkForOuterHashJoin", func(val failpoint.Value) {
if val.(bool) {
killed = true
}
})
if killed {
joinResult.err = exeerrors.ErrQueryInterrupted
return false, joinResult
}
probeKey, probeRow := hCtx.hashVals[i].Sum64(), probeSideChk.GetRow(i)
ok, joinResult = w.joinMatchedProbeSideRow2ChunkForOuterHashJoin(probeKey, probeRow, hCtx, joinResult)
if !ok {
return false, joinResult
}
if joinResult.chk.IsFull() {
w.hashJoinCtx.joinResultCh <- joinResult
ok, joinResult = w.getNewJoinResult()
if !ok {
return false, joinResult
}
}
}
return true, joinResult
}
// Next implements the Executor Next interface.
// hash join constructs the result following these steps:
// step 1. fetch data from build side child and build a hash table;
// step 2. fetch data from probe child in a background goroutine and probe the hash table in multiple join workers.
func (e *HashJoinExec) Next(ctx context.Context, req *chunk.Chunk) (err error) {
if !e.prepared {
e.buildFinished = make(chan error, 1)
hCtx := &hashContext{
allTypes: e.buildTypes,
keyColIdx: e.buildWorker.buildKeyColIdx,
naKeyColIdx: e.buildWorker.buildNAKeyColIdx,
}
e.rowContainer = newHashRowContainer(e.Ctx(), hCtx, retTypes(e.buildWorker.buildSideExec))
// we shallow copies rowContainer for each probe worker to avoid lock contention
for i := uint(0); i < e.concurrency; i++ {
if i == 0 {
e.probeWorkers[i].rowContainerForProbe = e.rowContainer
} else {
e.probeWorkers[i].rowContainerForProbe = e.rowContainer.ShallowCopy()
}
}
for i := uint(0); i < e.concurrency; i++ {
e.probeWorkers[i].rowIters = chunk.NewIterator4Slice([]chunk.Row{}).(*chunk.Iterator4Slice)
}
e.workerWg.RunWithRecover(func() {
defer trace.StartRegion(ctx, "HashJoinHashTableBuilder").End()
e.fetchAndBuildHashTable(ctx)
}, e.handleFetchAndBuildHashTablePanic)
e.fetchAndProbeHashTable(ctx)
e.prepared = true
}
if e.isOuterJoin {
atomic.StoreInt64(&e.probeSideTupleFetcher.requiredRows, int64(req.RequiredRows()))
}
req.Reset()
result, ok := <-e.joinResultCh
if !ok {
return nil
}
if result.err != nil {
e.finished.Store(true)
return result.err
}
req.SwapColumns(result.chk)
result.src <- result.chk
return nil
}
func (e *HashJoinExec) handleFetchAndBuildHashTablePanic(r interface{}) {
if r != nil {
e.buildFinished <- errors.Errorf("%v", r)
}
close(e.buildFinished)
}
func (e *HashJoinExec) fetchAndBuildHashTable(ctx context.Context) {
if e.stats != nil {
start := time.Now()
defer func() {
e.stats.fetchAndBuildHashTable = time.Since(start)
}()
}
// buildSideResultCh transfers build side chunk from build side fetch to build hash table.
buildSideResultCh := make(chan *chunk.Chunk, 1)
doneCh := make(chan struct{})
fetchBuildSideRowsOk := make(chan error, 1)
e.workerWg.RunWithRecover(
func() {
defer trace.StartRegion(ctx, "HashJoinBuildSideFetcher").End()
e.buildWorker.fetchBuildSideRows(ctx, buildSideResultCh, fetchBuildSideRowsOk, doneCh)
},
func(r interface{}) {
if r != nil {
fetchBuildSideRowsOk <- errors.Errorf("%v", r)
}
close(fetchBuildSideRowsOk)
},
)
// TODO: Parallel build hash table. Currently not support because `unsafeHashTable` is not thread-safe.
err := e.buildWorker.buildHashTableForList(buildSideResultCh)
if err != nil {
e.buildFinished <- errors.Trace(err)
close(doneCh)
}
// Wait fetchBuildSideRows be finished.
// 1. if buildHashTableForList fails
// 2. if probeSideResult.NumRows() == 0, fetchProbeSideChunks will not wait for the build side.
channel.Clear(buildSideResultCh)
// Check whether err is nil to avoid sending redundant error into buildFinished.
if err == nil {
if err = <-fetchBuildSideRowsOk; err != nil {
e.buildFinished <- err
}
}
}
// buildHashTableForList builds hash table from `list`.
func (w *buildWorker) buildHashTableForList(buildSideResultCh <-chan *chunk.Chunk) error {
var err error
var selected []bool
rowContainer := w.hashJoinCtx.rowContainer
rowContainer.GetMemTracker().AttachTo(w.hashJoinCtx.memTracker)
rowContainer.GetMemTracker().SetLabel(memory.LabelForBuildSideResult)
rowContainer.GetDiskTracker().AttachTo(w.hashJoinCtx.diskTracker)
rowContainer.GetDiskTracker().SetLabel(memory.LabelForBuildSideResult)
if variable.EnableTmpStorageOnOOM.Load() {
actionSpill := rowContainer.ActionSpill()
failpoint.Inject("testRowContainerSpill", func(val failpoint.Value) {
if val.(bool) {
actionSpill = rowContainer.rowContainer.ActionSpillForTest()
defer actionSpill.(*chunk.SpillDiskAction).WaitForTest()
}
})
w.hashJoinCtx.sessCtx.GetSessionVars().MemTracker.FallbackOldAndSetNewAction(actionSpill)
}
for chk := range buildSideResultCh {
if w.hashJoinCtx.finished.Load() {
return nil
}
if !w.hashJoinCtx.useOuterToBuild {
err = rowContainer.PutChunk(chk, w.hashJoinCtx.isNullEQ)
} else {
var bitMap = bitmap.NewConcurrentBitmap(chk.NumRows())
w.hashJoinCtx.outerMatchedStatus = append(w.hashJoinCtx.outerMatchedStatus, bitMap)
w.hashJoinCtx.memTracker.Consume(bitMap.BytesConsumed())
if len(w.hashJoinCtx.outerFilter) == 0 {
err = w.hashJoinCtx.rowContainer.PutChunk(chk, w.hashJoinCtx.isNullEQ)
} else {
selected, err = expression.VectorizedFilter(w.hashJoinCtx.sessCtx, w.hashJoinCtx.outerFilter, chunk.NewIterator4Chunk(chk), selected)
if err != nil {
return err
}
err = rowContainer.PutChunkSelected(chk, selected, w.hashJoinCtx.isNullEQ)
}
}
failpoint.Inject("ConsumeRandomPanic", nil)
if err != nil {
return err
}
}
return nil
}
// NestedLoopApplyExec is the executor for apply.
type NestedLoopApplyExec struct {
exec.BaseExecutor
ctx sessionctx.Context
innerRows []chunk.Row
cursor int
innerExec exec.Executor
outerExec exec.Executor
innerFilter expression.CNFExprs
outerFilter expression.CNFExprs
joiner joiner
cache *applycache.ApplyCache
canUseCache bool
cacheHitCounter int
cacheAccessCounter int
outerSchema []*expression.CorrelatedColumn
outerChunk *chunk.Chunk
outerChunkCursor int
outerSelected []bool
innerList *chunk.List
innerChunk *chunk.Chunk
innerSelected []bool
innerIter chunk.Iterator
outerRow *chunk.Row
hasMatch bool
hasNull bool
outer bool
memTracker *memory.Tracker // track memory usage.
}
// Close implements the Executor interface.
func (e *NestedLoopApplyExec) Close() error {
e.innerRows = nil
e.memTracker = nil
if e.RuntimeStats() != nil {
runtimeStats := newJoinRuntimeStats()
if e.canUseCache {
var hitRatio float64
if e.cacheAccessCounter > 0 {
hitRatio = float64(e.cacheHitCounter) / float64(e.cacheAccessCounter)
}
runtimeStats.setCacheInfo(true, hitRatio)
} else {
runtimeStats.setCacheInfo(false, 0)
}
runtimeStats.SetConcurrencyInfo(execdetails.NewConcurrencyInfo("Concurrency", 0))
defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), runtimeStats)
}
return e.outerExec.Close()
}
// Open implements the Executor interface.
func (e *NestedLoopApplyExec) Open(ctx context.Context) error {
err := e.outerExec.Open(ctx)
if err != nil {
return err
}
e.cursor = 0
e.innerRows = e.innerRows[:0]
e.outerChunk = tryNewCacheChunk(e.outerExec)
e.innerChunk = tryNewCacheChunk(e.innerExec)
e.innerList = chunk.NewList(retTypes(e.innerExec), e.InitCap(), e.MaxChunkSize())
e.memTracker = memory.NewTracker(e.ID(), -1)
e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker)
e.innerList.GetMemTracker().SetLabel(memory.LabelForInnerList)
e.innerList.GetMemTracker().AttachTo(e.memTracker)
if e.canUseCache {
e.cache, err = applycache.NewApplyCache(e.ctx)
if err != nil {
return err
}
e.cacheHitCounter = 0
e.cacheAccessCounter = 0
e.cache.GetMemTracker().AttachTo(e.memTracker)
}
return nil
}
// aggExecutorTreeInputEmpty checks whether the executor tree returns empty if without aggregate operators.
// Note that, the prerequisite is that this executor tree has been executed already and it returns one row.
func aggExecutorTreeInputEmpty(e exec.Executor) bool {
children := e.Base().AllChildren()
if len(children) == 0 {
return false
}
if len(children) > 1 {
_, ok := e.(*UnionExec)
if !ok {
// It is a Join executor.
return false
}
for _, child := range children {
if !aggExecutorTreeInputEmpty(child) {
return false
}
}
return true
}
// Single child executors.
if aggExecutorTreeInputEmpty(children[0]) {
return true
}
if hashAgg, ok := e.(*HashAggExec); ok {
return hashAgg.isChildReturnEmpty
}
if streamAgg, ok := e.(*StreamAggExec); ok {
return streamAgg.isChildReturnEmpty
}
return false
}
func (e *NestedLoopApplyExec) fetchSelectedOuterRow(ctx context.Context, chk *chunk.Chunk) (*chunk.Row, error) {
outerIter := chunk.NewIterator4Chunk(e.outerChunk)
for {
if e.outerChunkCursor >= e.outerChunk.NumRows() {
err := Next(ctx, e.outerExec, e.outerChunk)
if err != nil {
return nil, err
}
if e.outerChunk.NumRows() == 0 {
return nil, nil
}
e.outerSelected, err = expression.VectorizedFilter(e.ctx, e.outerFilter, outerIter, e.outerSelected)
if err != nil {
return nil, err
}
// For cases like `select count(1), (select count(1) from s where s.a > t.a) as sub from t where t.a = 1`,
// if outer child has no row satisfying `t.a = 1`, `sub` should be `null` instead of `0` theoretically; however, the
// outer `count(1)` produces one row <0, null> over the empty input, we should specially mark this outer row
// as not selected, to trigger the mismatch join procedure.
if e.outerChunkCursor == 0 && e.outerChunk.NumRows() == 1 && e.outerSelected[0] && aggExecutorTreeInputEmpty(e.outerExec) {
e.outerSelected[0] = false
}
e.outerChunkCursor = 0
}
outerRow := e.outerChunk.GetRow(e.outerChunkCursor)
selected := e.outerSelected[e.outerChunkCursor]
e.outerChunkCursor++
if selected {
return &outerRow, nil
} else if e.outer {
e.joiner.onMissMatch(false, outerRow, chk)
if chk.IsFull() {
return nil, nil
}
}
}
}
// fetchAllInners reads all data from the inner table and stores them in a List.
func (e *NestedLoopApplyExec) fetchAllInners(ctx context.Context) error {
err := e.innerExec.Open(ctx)
defer terror.Call(e.innerExec.Close)
if err != nil {
return err
}
if e.canUseCache {
// create a new one since it may be in the cache
e.innerList = chunk.NewList(retTypes(e.innerExec), e.InitCap(), e.MaxChunkSize())
} else {
e.innerList.Reset()
}
innerIter := chunk.NewIterator4Chunk(e.innerChunk)
for {
err := Next(ctx, e.innerExec, e.innerChunk)
if err != nil {
return err
}
if e.innerChunk.NumRows() == 0 {
return nil
}
e.innerSelected, err = expression.VectorizedFilter(e.ctx, e.innerFilter, innerIter, e.innerSelected)
if err != nil {
return err
}
for row := innerIter.Begin(); row != innerIter.End(); row = innerIter.Next() {
if e.innerSelected[row.Idx()] {
e.innerList.AppendRow(row)
}
}
}
}
// Next implements the Executor interface.
func (e *NestedLoopApplyExec) Next(ctx context.Context, req *chunk.Chunk) (err error) {
req.Reset()
for {
if e.innerIter == nil || e.innerIter.Current() == e.innerIter.End() {
if e.outerRow != nil && !e.hasMatch {
e.joiner.onMissMatch(e.hasNull, *e.outerRow, req)
}
e.outerRow, err = e.fetchSelectedOuterRow(ctx, req)
if e.outerRow == nil || err != nil {
return err
}
e.hasMatch = false
e.hasNull = false
if e.canUseCache {
var key []byte
for _, col := range e.outerSchema {
*col.Data = e.outerRow.GetDatum(col.Index, col.RetType)
key, err = codec.EncodeKey(e.Ctx().GetSessionVars().StmtCtx, key, *col.Data)
if err != nil {
return err
}
}
e.cacheAccessCounter++
value, err := e.cache.Get(key)
if err != nil {
return err
}
if value != nil {
e.innerList = value
e.cacheHitCounter++
} else {
err = e.fetchAllInners(ctx)
if err != nil {
return err
}
if _, err := e.cache.Set(key, e.innerList); err != nil {
return err
}
}
} else {
for _, col := range e.outerSchema {
*col.Data = e.outerRow.GetDatum(col.Index, col.RetType)
}
err = e.fetchAllInners(ctx)
if err != nil {
return err
}
}
e.innerIter = chunk.NewIterator4List(e.innerList)
e.innerIter.Begin()
}
matched, isNull, err := e.joiner.tryToMatchInners(*e.outerRow, e.innerIter, req)
e.hasMatch = e.hasMatch || matched
e.hasNull = e.hasNull || isNull
if err != nil || req.IsFull() {
return err
}
}
}
// cacheInfo is used to save the concurrency information of the executor operator
type cacheInfo struct {
hitRatio float64
useCache bool
}
type joinRuntimeStats struct {
*execdetails.RuntimeStatsWithConcurrencyInfo
applyCache bool
cache cacheInfo
hasHashStat bool
hashStat hashStatistic
}
func newJoinRuntimeStats() *joinRuntimeStats {
stats := &joinRuntimeStats{
RuntimeStatsWithConcurrencyInfo: &execdetails.RuntimeStatsWithConcurrencyInfo{},
}
return stats
}
// setCacheInfo sets the cache information. Only used for apply executor.
func (e *joinRuntimeStats) setCacheInfo(useCache bool, hitRatio float64) {
e.Lock()
e.applyCache = true
e.cache.useCache = useCache
e.cache.hitRatio = hitRatio
e.Unlock()
}
func (e *joinRuntimeStats) String() string {
buf := bytes.NewBuffer(make([]byte, 0, 16))
buf.WriteString(e.RuntimeStatsWithConcurrencyInfo.String())
if e.applyCache {
if e.cache.useCache {
fmt.Fprintf(buf, ", cache:ON, cacheHitRatio:%.3f%%", e.cache.hitRatio*100)
} else {
buf.WriteString(", cache:OFF")
}
}
if e.hasHashStat {
buf.WriteString(", " + e.hashStat.String())
}
return buf.String()
}
// Tp implements the RuntimeStats interface.
func (*joinRuntimeStats) Tp() int {
return execdetails.TpJoinRuntimeStats
}
func (e *joinRuntimeStats) Clone() execdetails.RuntimeStats {
newJRS := &joinRuntimeStats{
RuntimeStatsWithConcurrencyInfo: e.RuntimeStatsWithConcurrencyInfo,
applyCache: e.applyCache,
cache: e.cache,
hasHashStat: e.hasHashStat,
hashStat: e.hashStat,
}
return newJRS
}
type hashJoinRuntimeStats struct {
fetchAndBuildHashTable time.Duration
hashStat hashStatistic
fetchAndProbe int64
probe int64
concurrent int
maxFetchAndProbe int64
}
func (e *hashJoinRuntimeStats) setMaxFetchAndProbeTime(t int64) {
for {
value := atomic.LoadInt64(&e.maxFetchAndProbe)
if t <= value {
return
}
if atomic.CompareAndSwapInt64(&e.maxFetchAndProbe, value, t) {
return
}
}
}
// Tp implements the RuntimeStats interface.
func (*hashJoinRuntimeStats) Tp() int {
return execdetails.TpHashJoinRuntimeStats
}
func (e *hashJoinRuntimeStats) String() string {
buf := bytes.NewBuffer(make([]byte, 0, 128))
if e.fetchAndBuildHashTable > 0 {
buf.WriteString("build_hash_table:{total:")
buf.WriteString(execdetails.FormatDuration(e.fetchAndBuildHashTable))
buf.WriteString(", fetch:")
buf.WriteString(execdetails.FormatDuration(e.fetchAndBuildHashTable - e.hashStat.buildTableElapse))
buf.WriteString(", build:")
buf.WriteString(execdetails.FormatDuration(e.hashStat.buildTableElapse))
buf.WriteString("}")
}
if e.probe > 0 {
buf.WriteString(", probe:{concurrency:")
buf.WriteString(strconv.Itoa(e.concurrent))
buf.WriteString(", total:")
buf.WriteString(execdetails.FormatDuration(time.Duration(e.fetchAndProbe)))
buf.WriteString(", max:")
buf.WriteString(execdetails.FormatDuration(time.Duration(atomic.LoadInt64(&e.maxFetchAndProbe))))
buf.WriteString(", probe:")
buf.WriteString(execdetails.FormatDuration(time.Duration(e.probe)))
buf.WriteString(", fetch:")
buf.WriteString(execdetails.FormatDuration(time.Duration(e.fetchAndProbe - e.probe)))
if e.hashStat.probeCollision > 0 {
buf.WriteString(", probe_collision:")
buf.WriteString(strconv.FormatInt(e.hashStat.probeCollision, 10))
}
buf.WriteString("}")
}
return buf.String()
}
func (e *hashJoinRuntimeStats) Clone() execdetails.RuntimeStats {
return &hashJoinRuntimeStats{
fetchAndBuildHashTable: e.fetchAndBuildHashTable,
hashStat: e.hashStat,
fetchAndProbe: e.fetchAndProbe,
probe: e.probe,
concurrent: e.concurrent,
maxFetchAndProbe: e.maxFetchAndProbe,
}
}
func (e *hashJoinRuntimeStats) Merge(rs execdetails.RuntimeStats) {
tmp, ok := rs.(*hashJoinRuntimeStats)
if !ok {
return
}
e.fetchAndBuildHashTable += tmp.fetchAndBuildHashTable
e.hashStat.buildTableElapse += tmp.hashStat.buildTableElapse
e.hashStat.probeCollision += tmp.hashStat.probeCollision
e.fetchAndProbe += tmp.fetchAndProbe
e.probe += tmp.probe
if e.maxFetchAndProbe < tmp.maxFetchAndProbe {
e.maxFetchAndProbe = tmp.maxFetchAndProbe
}
}
|
package core
func NewBoolean(value bool) *Type {
return &Type{Boolean: &value}
}
func (node *Type) IsBoolean() bool {
return node.Boolean != nil
}
func (node *Type) AsBoolean() bool {
if node.IsBoolean() {
return *node.Boolean
}
return false
}
func (node *Type) CompareBoolean(value bool) bool {
if node.IsBoolean() {
return (*node.Boolean) == value
}
return false
}
|
package main
import (
"net"
"time"
)
// Notifier stores and notifies client connections about stuff
type Notifier interface {
AddClient(UserClient)
Notify(string, Message)
}
// TCPConnectionNotifier handles TCP connections
type TCPConnectionNotifier struct {
clients map[string][]chan string
}
// NewTCPNotifier returns a new initialized notifier
func NewTCPNotifier() *TCPConnectionNotifier {
return &TCPConnectionNotifier{clients: map[string][]chan string{}}
}
// Notify actually sends notifications
func (notifier *TCPConnectionNotifier) Notify(id string, msg Message) {
channels, ok := notifier.clients[id]
if ok {
for index, ch := range channels {
select {
case ch <- msg.String():
// sent the notification, loop
default:
// It's filled up its buffer. Crush, kill, destroy. Remove it from the list
close(ch)
notifier.clients[id] = append(notifier.clients[id][:index], notifier.clients[id][index+1:]...)
}
}
}
}
// AddClient creates a new client and channel pair
func (notifier *TCPConnectionNotifier) AddClient(uc UserClient) {
// The buffer is 30. This should be plenty.
ch := make(chan string, 30)
go processNotifications(uc.Conn, ch)
notifier.clients[uc.ID] = append(notifier.clients[uc.ID], ch)
}
// Pull notifications from its channel until there's a problem with the connection or the channel closes
func processNotifications(conn net.Conn, notificationChannel <-chan string) {
defer conn.Close()
for notification := range notificationChannel {
conn.SetWriteDeadline(time.Now().Add(time.Second * 30))
_, err := conn.Write([]byte(notification + "\n"))
if err != nil {
return
}
}
}
|
package main
import (
"fmt"
"webapp/persistence/dao"
"webapp/persistence/bolt"
"webapp/entities"
)
func main() {
fmt.Println("DAO testing ")
fmt.Println("In memory DAO testing ")
dao.SetDAOImplementation(dao.MEMORY)
testDao(dao.GetStudentDAO())
fmt.Println("Bolt DAO testing ")
bolt.BoltStart("boltdb.data")
dao.SetDAOImplementation(dao.BOLTDB)
testDao(dao.GetStudentDAO())
}
func testDao(dao dao.StudentDAO) {
if !dao.Exists(999) {
fmt.Println("Student 999 not found => creation")
student := entities.NewStudent()
student.Id = 999
student.FirstName = "Aaa"
student.LastName = "Bbb"
dao.Create(student)
}
student := dao.Find(999)
if student == nil {
fmt.Println("Student 999 not found")
} else {
fmt.Println("OK, Student found")
fmt.Println(student.String())
}
}
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"time"
"github.com/gin-gonic/gin"
"github.com/polarismesh/polaris-go"
"github.com/polarismesh/polaris-go/pkg/model"
)
var (
namespace string
service string
port int64
)
func initArgs() {
flag.StringVar(&namespace, "namespace", "default", "namespace")
flag.StringVar(&service, "service", "DiscoverEchoServer", "service")
flag.Int64Var(&port, "port", 18080, "port")
}
// PolarisConsumer is a consumer of polaris
type PolarisConsumer struct {
consumer polaris.ConsumerAPI
namespace string
service string
}
// Run starts the consumer
func (svr *PolarisConsumer) Run() {
svr.runWebServer()
}
func (svr *PolarisConsumer) runWebServer() {
engine := gin.Default()
engine.GET("/echo", func(c *gin.Context) {
rw := c.Writer
log.Printf("start to invoke getOneInstance operation")
// DiscoverEchoServer
getOneRequest := &polaris.GetOneInstanceRequest{}
getOneRequest.Namespace = namespace
getOneRequest.Service = service
oneInstResp, err := svr.consumer.GetOneInstance(getOneRequest)
if err != nil {
log.Printf("[error] fail to getOneInstance, err is %v", err)
rw.WriteHeader(http.StatusInternalServerError)
_, _ = rw.Write([]byte(fmt.Sprintf("[error] fail to getOneInstance, err is %v", err)))
return
}
instance := oneInstResp.GetInstance()
if nil != instance {
log.Printf("instance getOneInstance is %s:%d", instance.GetHost(), instance.GetPort())
}
start := time.Now()
resp, err := http.Get(fmt.Sprintf("http://%s:%d/echo", instance.GetHost(), instance.GetPort()))
if err != nil {
log.Printf("[errot] send request to %s:%d fail : %s", instance.GetHost(), instance.GetPort(), err)
rw.WriteHeader(http.StatusInternalServerError)
_, _ = rw.Write([]byte(fmt.Sprintf("[errot] send request to %s:%d fail : %s", instance.GetHost(), instance.GetPort(), err)))
return
}
delay := time.Now().Sub(start)
ret := &polaris.ServiceCallResult{
ServiceCallResult: model.ServiceCallResult{
EmptyInstanceGauge: model.EmptyInstanceGauge{},
CalledInstance: instance,
Method: "/echo",
RetStatus: model.RetSuccess,
},
}
ret.SetDelay(delay)
ret.SetRetCode(int32(resp.StatusCode))
if err := svr.consumer.UpdateServiceCallResult(ret); err != nil {
log.Printf("do report service call result : %+v", err)
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("[error] read resp from %s:%d fail : %s", instance.GetHost(), instance.GetPort(), err)
rw.WriteHeader(http.StatusInternalServerError)
_, _ = rw.Write([]byte(fmt.Sprintf("[error] read resp from %s:%d fail : %s", instance.GetHost(), instance.GetPort(), err)))
return
}
rw.WriteHeader(http.StatusOK)
_, _ = rw.Write(data)
})
log.Printf("start run web server, port : %d", port)
if err := engine.Run(fmt.Sprintf("0.0.0.0:%d", port)); err != nil {
log.Fatalf("[ERROR]fail to run webServer, err is %v", err)
}
}
func main() {
initArgs()
flag.Parse()
if len(namespace) == 0 || len(service) == 0 {
log.Print("namespace and service are required")
return
}
consumer, err := polaris.NewConsumerAPI()
// 或者使用以下方法,则不需要创建配置文件
// consumer, err = api.NewConsumerAPIByAddress("127.0.0.1:8091")
if err != nil {
log.Fatalf("fail to create consumerAPI, err is %v", err)
}
defer consumer.Destroy()
svr := &PolarisConsumer{
consumer: consumer,
namespace: namespace,
service: service,
}
svr.Run()
}
|
package ascii
import (
"bufio"
"fmt"
"io"
"net/http"
"strings"
)
func check(e error) {
if e != nil {
panic(e)
}
}
func Art(input string, template string, w http.ResponseWriter) {
lines, err := UrlToLines("https://git.01.kood.tech/root/public/raw/branch/master/subjects/ascii-art/" + template + ".txt")
check(err)
arr := strings.Split(input, "\n")
for _, e := range arr {
for i := 0; i < 8; i++ {
printS := ""
r := []rune(e)
for _, runeR := range r {
printS += lines[int(runeR-32)*9+1+i]
}
if printS != "" {
fmt.Fprintln(w, printS)
}
}
fmt.Fprintln(w)
}
}
func UrlToLines(url string) ([]string, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return LinesFromReader(resp.Body)
}
func LinesFromReader(r io.Reader) ([]string, error) {
var lines []string
scanner := bufio.NewScanner(r)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
if err := scanner.Err(); err != nil {
return nil, err
}
return lines, nil
}
|
package encode
import (
"strconv"
"strings"
"unicode"
)
// RunLengthEncode compresses a string using run-length encoding.
func RunLengthEncode(s string) string {
b := &strings.Builder{}
var prev rune
var count int
for _, r := range s {
if r == prev {
count++
} else {
writeRune(b, count, prev)
prev = r
count = 1
}
}
writeRune(b, count, prev)
return b.String()
}
func writeRune(b *strings.Builder, count int, r rune) {
if count > 1 {
b.WriteString(strconv.Itoa(count))
}
if count > 0 {
b.WriteRune(r)
}
}
// RunLengthDecode decompresses a run-length encoded string.
func RunLengthDecode(s string) string {
var b strings.Builder
for i := 0; i < len(s); {
j := i
for unicode.IsNumber(rune(s[j])) {
j++
}
l := 1
if j > i {
var err error
l, err = strconv.Atoi(s[i:j])
if err != nil {
panic(err)
}
}
for ; l > 0; l-- {
b.WriteByte(s[j])
}
i = j + 1
}
return b.String()
}
|
package dcp
// At a popular bar, each customer has a set of favorite drinks, and will happily accept any drink among this set. For example, in the following situation, customer 0 will be satisfied with drinks 0, 1, 3, or 6.
// preferences = {
// 0: [0, 1, 3, 6],
// 1: [1, 4, 7],
// 2: [2, 4, 7, 5],
// 3: [3, 2, 5],
// 4: [5, 8]
// }
// A lazy bartender working at this bar is trying to reduce his effort by limiting the drink recipes he must memorize. Given a dictionary input such as the one above, return the fewest number of drinks he must learn in order to satisfy all customers.
// For the input above, the answer would be 2, as drinks 1 and 5 will satisfy everyone.
func lazyBartender(preferences map[int][]int) int {
contains := func(slice []int, item int) bool {
for _, current := range slice {
if item == current {
return true
}
}
return false
}
// create a list of unique drink indexes
drinkList := []int{}
{
drinkMap := make(map[int]bool)
for _, favorites := range preferences {
for _, drink := range favorites {
if _, exists := drinkMap[drink]; !exists {
drinkList = append(drinkList, drink)
}
drinkMap[drink] = true
}
}
}
// create a power set of drinks indexes
drinkSet := powerSet(drinkList)
minimum := len(drinkList)
// exhaustively check for a subset that satisfies every customers
for _, set := range drinkSet {
// bail early if set is empty or bigger than the current minimum
if len(set) >= minimum || len(set) == 0 {
continue
}
customersHaveDrink := make(map[int]bool)
for _, drink := range set {
for customer, favorites := range preferences {
customersHaveDrink[customer] = contains(favorites, drink) || customersHaveDrink[customer]
}
}
// a set is valid if all customers have at least one favorite drink inside the set
setIsValid := true
for _, customerHasDrink := range customersHaveDrink {
setIsValid = setIsValid && customerHasDrink
}
if setIsValid {
minimum = len(set)
}
}
return minimum
}
|
package cmd
import (
"github.com/oberd/ecsy/ecs"
"github.com/spf13/cobra"
)
//var taskArn string
// deployNewestTaskCmd represents the updateServiceTask command
var deployNewestTaskCmd = &cobra.Command{
Use: "deploy-newest-task [cluster] [service]",
Short: "deploy newest task definition to a service",
Long: `by default, fast forwards to newest task definition`,
Run: func(cmd *cobra.Command, args []string) {
cluster, service := ServiceChooser(args)
currentTask, err := ecs.GetCurrentTaskDefinition(cluster, service)
failOnError(err, "finding current definition")
newestTask, err := ecs.FindNewestDefinition(*currentTask.Family)
failOnError(err, "getting newest definition")
if newestTask.TaskDefinitionArn == currentTask.TaskDefinitionArn {
return
}
_, err = ecs.DeployTaskToService(cluster, service, newestTask)
failOnError(err, "deploying to service")
},
}
func init() {
RootCmd.AddCommand(deployNewestTaskCmd)
//deployNewestTaskCmd.Flags().StringVarP(&taskArn, "taskArn", "t", "", "Help message for toggle")
}
|
package dict
import "context"
type Dictionary interface {
Search(ctx context.Context, word string) (*Word, error)
}
type Pronunciation struct {
US string `json:"us"`
US_MP3URL string `json:"us_mp3url"`
UK string `json:"uk"`
UK_MP3URL string `json:"uk_mp3url"`
}
type Def struct {
PartOfSpeech string `json:"pos"`
Def string `json:"def"`
}
type SampleSentence struct {
English string `json:"en"`
Chinese string `json:"ch"`
MP3URL string `json:"mp3url"`
}
type Word struct {
Word string `json:"word"`
Pronunciation Pronunciation `json:"pronunciation"`
Defs []Def `json:"defs"`
SampleSentences []SampleSentence `json:"sample_sentences"`
}
|
package main
import (
"fmt"
)
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
func isPalindrome(head *ListNode) bool {
if head == nil {
return false
}
// 申请一个slice,将链表的所有值放入slice中,然后再对map做比较
numSlice := make([]int, 0, 100)
for head != nil {
numSlice = append(numSlice, head.Val)
head = head.Next
}
size := len(numSlice) - 1
a := 0
for a < size {
if numSlice[a] == numSlice[size] {
a++
size--
}
if numSlice[a] != numSlice[size] {
return false
}
}
return true
}
func main() {
l5 := &ListNode{Val: 5, Next: nil}
l4 := &ListNode{Val: 4, Next: l5}
l3 := &ListNode{Val: 3, Next: l4}
l2 := &ListNode{Val: 3, Next: l3}
l1 := &ListNode{Val: 4, Next: l2}
head := &ListNode{Val: 5, Next: l1}
fmt.Println(isPalindrome(head))
}
|
package RateLimiter
import (
"encoding/json"
"fmt"
"strings"
"testing"
"time"
)
//Lol more like testing then actual test
func init() {
gl := NewLimiterGroupAnd(NewLimiter(time.Second, 3), NewLimiterGroupOr(NewLimiter(time.Second, 2), NewLimiter(time.Second*5, 4)))
for i := 0; i < 5; i++ {
fmt.Printf("rem:%d,%t\n", gl.RemainingTally(), gl.TryAddTally())
}
sgl, _ := gl.(Storable)
gsd := sgl.GetStorableData()
fmt.Printf("raw data: %v\n", gsd)
jsonDat, _ := json.MarshalIndent(gsd, "", " ")
fmt.Printf("json: %s\n", string(jsonDat))
newI := &[]interface{}{}
_ = json.Unmarshal(jsonDat, newI)
fmt.Printf("NewI: %s\n", strings.Join(strings.Split(fmt.Sprintf("%#v", newI), "},"), "},\n"))
ncl := NewLimiterGroupAnd(NewLimiter(time.Second, 5), NewLimiterGroupOr(NewLimiter(time.Second, 2), NewLimiter(time.Second, 3)))
rs := ncl.(Storable)
res := rs.SetStorableData(*newI)
fmt.Printf("result: %t\n", res)
for i := 0; i < 4; i++ {
fmt.Printf("rem:%d,%t\n", gl.RemainingTally(), gl.TryAddTally())
}
db := ncl.(Debuggable)
fmt.Printf("\n%#v\n", db.GetDebug())
jsonDat, err := json.MarshalIndent(db.GetDebug(), "", " ")
if err != nil {
fmt.Printf("\n%s\n", err)
}
fmt.Printf("\n%s\n", jsonDat)
}
func TestAdd(t *testing.T) {
t2 := 2 * time.Second
unit := 5
l := NewLimiter(t2, unit)
c := 0
for i := 0; i < 10; i++ {
if l.TryAddTally() && c > unit {
t.Fail()
}
c++
}
}
func TestGet(t *testing.T) {
}
|
package factories
import (
"errors"
"fmt"
"github.com/barrydev/api-3h-shop/src/constants"
"github.com/barrydev/api-3h-shop/src/helpers"
"github.com/barrydev/api-3h-shop/src/model"
"github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
)
type AccessTokenClaims struct {
Id int64 `json:"_id"`
Role int64 `json:"role"`
jwt.StandardClaims
}
type ResponseAccessToken struct {
User *model.User `json:"user"`
AccessToken *string `json:"access_token"`
}
func RetriveAccessTokenPayload(c *gin.Context) (*AccessTokenClaims, error) {
tokenString := helpers.GetAccessToken(c)
if tokenString == "" {
return nil, errors.New("invalid token")
}
token, err := jwt.ParseWithClaims(tokenString, &AccessTokenClaims{}, func(token *jwt.Token) (interface{}, error) {
//Make sure that the token method conform to "SigningMethodHMAC"
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(constants.SECRET_KEY), nil
})
if err != nil {
return nil, err
}
if claims, ok := token.Claims.(*AccessTokenClaims); ok && token.Valid {
return claims, nil
} else {
return nil, err
}
}
|
package main
import (
"fmt"
)
// https://leetcode-cn.com/problems/word-ladder/
func ladderLength(beginWord string, endWord string, wordList []string) int {
n := len(wordList)
if n == 0 {
return 0
}
isOK := func(a, b string) bool {
diff := 0
for i := 0; i < len(a) && diff < 2; i++ {
if a[i] != b[i] {
diff++
}
}
return diff == 1
}
endIdx, mark := -1, make([]bool, n)
for i := 0; i < n; i++ {
if wordList[i] == endWord {
endIdx = i
}
}
if endIdx < 0 {
return 0
}
// dist[i]: 与 endWord 距离为i的单词下标
dist, pos := make([][]int, n), 0
dist[0], mark[endIdx] = []int{endIdx}, true
for ; pos < n; pos++ {
for i := 0; i < len(dist[pos]); i++ {
if isOK(beginWord, wordList[dist[pos][i]]) {
return pos + 2
}
for j := 0; j < n; j++ {
if !mark[j] && isOK(wordList[dist[pos][i]], wordList[j]) {
mark[j] = true
dist[pos+1] = append(dist[pos+1], j)
}
}
}
}
return 0
}
func main() {
cases := [][][]string{
{
{"lost", "miss"},
{"most", "mist", "miss", "lost", "fist", "fish"},
},
{
{"leet", "code"},
{"lest", "leet", "lose", "code", "lode", "robe", "lost"},
},
}
realCase := cases[1:]
for i, c := range realCase {
fmt.Println("## case", i)
// solve
fmt.Println(ladderLength(c[0][0], c[0][1], c[1]))
}
}
|
package model
import (
"time"
)
type File struct {
Name string `json:"name"`
FullPath string `json:"fullPath"`
IsDir bool `json:"isDir"`
Size int64 `json:"size"`
FileType string `json:"fileType"`
Created time.Time `json:"created"`
Modified time.Time `json:"modified"`
Accessed time.Time `json:"accessed"`
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-07-27 09:53
# @File : lt_142_Linked_List_Cycle_II.go
# @Description : 如果有环,返回入环节点
# @Attention :
如果碰撞了,则fast 到head 处步调一致移动
错误点:
当碰撞之后,slow 要移动到slow的下一个节点 既slow=slow.next
*/
package v0
func detectCycle(head *ListNode) *ListNode {
if head == nil || head.Next == nil {
return nil
}
fast := head.Next
slow := head
for nil != fast && nil != fast.Next {
fast = fast.Next.Next
slow = slow.Next
if fast == slow {
fast = head
slow = slow.Next
for fast != slow {
fast = fast.Next
slow = slow.Next
}
return fast
}
}
return nil
}
|
package repository
import (
"arep/config"
"arep/model"
"context"
"encoding/json"
"github.com/olivere/elastic"
"log"
"strconv"
)
type ElasticRepository struct {
client *elastic.Client
}
var ElasticStoresIndex = config.ElasticStoresIndex
func NewElasticRepository(config *config.ElasticConfiguration) (*ElasticRepository, error) {
client, err := elastic.NewClient(
elastic.SetURL(config.DbUrl),
elastic.SetBasicAuth(config.UserName, config.Password))
if err != nil {
return nil, err
}
repository := &ElasticRepository{
client: client,
}
return repository, nil
}
func (r *ElasticRepository) UpdateStore(ctx context.Context, storeID string, enabled bool) error {
update := struct {
Enabled bool `json:"enabled"`
}{Enabled: enabled}
_, err := r.client.Update().
Index(ElasticStoresIndex).
Type("_doc").
Id(storeID).
Doc(update).
Do(ctx)
return err
}
func (r *ElasticRepository) GetStores(ctx context.Context, storeIDs []int64) (*[]model.Store, error) {
items := make([]*elastic.MultiGetItem, len(storeIDs))
for _, storeID := range storeIDs {
item := elastic.NewMultiGetItem()
item.Index(ElasticStoresIndex)
item.Id(strconv.FormatInt(storeID, 10))
item.Type("_doc")
items = append(items, item)
}
resp, err := r.client.Mget().Add(items...).Do(ctx)
if err != nil {
return nil, err
}
stores := make([]model.Store, len(storeIDs))
for _, doc := range resp.Docs {
var store model.Store
err := json.Unmarshal(*doc.Source, &store)
if err != nil {
log.Print("error parsing store", err.Error())
} else {
stores = append(stores, store)
}
}
return &stores, nil
}
|
//
// riak-statsd
// Sends Riak stats to statsd every 60s.
//
// Usage:
// -nodename="riak": Riak node name
// -riak_host="127.0.0.1": Riak host
// -riak_http_port=8098: Riak HTTP port
// -statsd_host="127.0.0.1": Statsd host
// -statsd_port=8125: Statsd host
package main
import (
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"strings"
"time"
)
// The interesting metric keys and their statsd types
var MetricTypes = map[string]string{
"node_gets": "g",
"node_puts": "g",
"vnode_gets": "g",
"vnode_puts": "g",
"read_repairs": "g",
"read_repairs_total": "g",
"node_get_fsm_objsize_mean": "g",
"node_get_fsm_objsize_median": "g",
"node_get_fsm_objsize_95": "g",
"node_get_fsm_objsize_100": "g",
"node_get_fsm_time_mean": "ms",
"node_get_fsm_time_median": "ms",
"node_get_fsm_time_95": "ms",
"node_get_fsm_time_100": "ms",
"node_put_fsm_time_mean": "ms",
"node_put_fsm_time_median": "ms",
"node_put_fsm_time_95": "ms",
"node_put_fsm_time_100": "ms",
"node_get_fsm_siblings_mean": "g",
"node_get_fsm_siblings_median": "g",
"node_get_fsm_siblings_95": "g",
"node_get_fsm_siblings_100": "g",
"memory_processes_used": "g",
"node_get_fsm_active": "g",
"node_get_fsm_active_60s": "g",
"node_get_fsm_in_rate": "g",
"node_get_fsm_out_rate": "g",
"node_get_fsm_rejected": "g",
"node_get_fsm_rejected_60s": "g",
"node_get_fsm_rejected_total": "g",
"node_put_fsm_active": "g",
"node_put_fsm_active_60s": "g",
"node_put_fsm_in_rate": "g",
"node_put_fsm_out_rate": "g",
"node_put_fsm_rejected": "g",
"node_put_fsm_rejected_60s": "g",
"node_put_fsm_rejected_total": "g",
"index_fsm_create": "g",
"index_fsm_create_error": "g",
"index_fsm_active": "g",
"list_fsm_create": "g",
"list_fsm_create_error": "g",
"list_fsm_active": "g",
"sys_process_count": "g",
"coord_redirs_total": "g",
"pbc_connects": "g",
"pbc_active": "g",
}
func getRiakStats(host string, port int) (*map[string]interface{}, error) {
url := fmt.Sprintf("http://%s:%d/stats", host, port)
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
data := make(map[string]interface{})
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if err := json.Unmarshal(body, &data); err != nil {
return nil, err
}
return &data, nil
}
func pingRiak(host string, port int) error {
url := fmt.Sprintf("http://%s:%d/ping", host, port)
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return errors.New("Error reading response")
}
msg := string(body)
if msg != "OK" {
return errors.New(fmt.Sprintf("Not OK. Response was: %s", msg))
}
return nil
}
func prepareMetrics(nodename string, riakstats map[string]interface{}) *[]string {
metrics := make([]string, len(MetricTypes))
i := 0
for key, st := range MetricTypes {
value := riakstats[key]
metrics[i] = fmt.Sprintf("%s.%s:%v|%s", nodename, key, value, st)
i++
}
return &metrics
}
func sendRiakMetrics(conn *net.UDPConn, metrics *[]string) error {
data := []byte(strings.Join(*metrics, "\n"))
_, err := conn.Write(data)
if err != nil {
log.Println("Error sending metrics: %v", err)
}
return nil
}
func getAndSendRiakMetrics(conn *net.UDPConn, nodename string, host string, port int) {
data, _ := getRiakStats(host, port)
if data != nil {
metrics := prepareMetrics(nodename, *data)
sendRiakMetrics(conn, metrics)
}
}
func main() {
var statsdHost = flag.String("statsd_host", "127.0.0.1", "Statsd host")
var statsdPort = flag.Int("statsd_port", 8125, "Statsd host")
var nodename = flag.String("nodename", "riak", "Riak node name")
var riakHost = flag.String("riak_host", "127.0.0.1", "Riak host")
var riakHttpPort = flag.Int("riak_http_port", 8098, "Riak HTTP port")
flag.Parse()
// First ping to node to make sure it works
err := pingRiak(*riakHost, *riakHttpPort)
if err != nil {
log.Fatalf("Error: %v", err)
os.Exit(1)
}
statsd := fmt.Sprintf("%s:%d", *statsdHost, *statsdPort)
addr, err := net.ResolveUDPAddr("udp", statsd)
if err != nil {
log.Fatalf("Couldn't resolve UDP addr: %v", err)
os.Exit(1)
}
conn, err := net.DialUDP("udp", nil, addr)
if err != nil {
log.Fatalf("Couldn't connect to statsd at %s", statsd)
os.Exit(1)
}
// every 60s run hit the stats endpoint and then send to statsd
interval := time.NewTicker(time.Second * 60)
for _ = range interval.C {
go getAndSendRiakMetrics(conn, *nodename, *riakHost, *riakHttpPort)
}
}
|
package main
import (
"../"
"fmt"
"io"
"log"
"net/http"
"time"
"github.com/gin-gonic/gin"
_ "github.com/go-sql-driver/mysql"
"github.com/itsjamie/gin-cors"
"github.com/jinzhu/gorm"
)
func initDatabase(db *gorm.DB) {
var err error
*db, err = gorm.Open("mysql", "brave:brave@/brave?charset=utf8&parseTime=True&loc=Local")
if err != nil {
panic(err)
log.Fatal(err)
}
brave.MigrateDatabase(db)
db.LogMode(true)
}
func main() {
db := &gorm.DB{}
initDatabase(db)
router := gin.New()
router.Use(cors.Middleware(cors.Config{
Origins: "*",
Methods: "GET, PUT, POST, DELETE",
RequestHeaders: "Origin, Authorization, Content-Type",
ExposedHeaders: "",
MaxAge: 50 * time.Second,
Credentials: true,
ValidateHeaders: false,
}))
router.GET("/image-proxy", func(c *gin.Context) {
src := c.Query("src")
res, err := brave.Proxy(src)
if err != nil {
// FIXME: X-box image
c.String(404, "NotFind")
return
}
w := c.Writer
w.WriteHeader(res.StatusCode)
io.Copy(w, res.Body)
})
api := router.Group("api")
{
api.GET("/MangaList", func(c *gin.Context) {
q := c.Query("q")
mangaList := []brave.MangaInfo{}
db.Where("Name LIKE ?", "%"+q+"%").Order("Name").Limit("25").Find(&mangaList)
result := make([]interface{}, len(mangaList))
for i, manga := range mangaList {
result[i] = map[string]interface{}{
"ID": manga.ID,
"Name": manga.Name,
}
}
c.JSON(http.StatusOK, result)
})
api.GET("/MangaInfo/:id", func(c *gin.Context) {
mangaID := c.Param("id")
mangaInfo := brave.MangaInfo{}
db.First(&mangaInfo, mangaID)
result := map[string]interface{}{
"ID": mangaInfo.ID,
"Name": mangaInfo.Name,
}
c.JSON(http.StatusOK, result)
})
api.GET("/ChapterList/:id", func(c *gin.Context) {
mangaID := c.Param("id")
chapterList := []brave.ChapterInfo{}
db.Where("manga_id=?", mangaID).Order("number").Find(&chapterList)
result := make([]interface{}, len(chapterList))
for i, chapter := range chapterList {
result[i] = map[string]interface{}{
"ID": chapter.ID,
"Name": chapter.Name,
}
}
c.JSON(http.StatusOK, result)
})
api.GET("/ChapterInfo/:mangaID/:chapter", func(c *gin.Context) {
mangaID := c.Param("mangaID")
chapter := c.Param("chapter")
chapterInfo := brave.ChapterInfo{}
db.Where("manga_id=? AND number=?", mangaID, chapter).First(&chapterInfo)
result := map[string]interface{}{
"ID": chapterInfo.ID,
"Name": chapterInfo.Name,
}
c.JSON(http.StatusOK, result)
})
api.GET("/PageList/:mangaID/:chapter", func(c *gin.Context) {
mangaID := c.Param("mangaID")
chapter := c.Param("chapter")
pageList := []brave.PageInfo{}
db.Joins("JOIN chapter_infos ON page_infos.chapter_id=chapter_infos.id").Where("chapter_infos.manga_id=? AND chapter_infos.number=?", mangaID, chapter).Order("page_infos.number").Find(&pageList)
result := make([]interface{}, len(pageList))
for i, page := range pageList {
result[i] = map[string]interface{}{
"Src": page.Origin,
}
}
c.JSON(http.StatusOK, result)
})
}
cmd := router.Group("/admin/cmd")
{
cmd.GET("/ScrapMangaList", func(c *gin.Context) {
t := time.Now()
msg := brave.ScrapMangaList(db)
latency := time.Since(t)
c.String(http.StatusOK, fmt.Sprintf("%s in %s", msg, latency))
})
cmd.GET("/ScrapMangas", func(c *gin.Context) {
t := time.Now()
mangaList := brave.GetAllMangaList(db)
msg := brave.ScrapMangas(db, mangaList)
latency := time.Since(t)
c.String(http.StatusOK, fmt.Sprintf("%s in %s", msg, latency))
})
cmd.GET("/ScrapChapters", func(c *gin.Context) {
t := time.Now()
var chapterList []brave.ChapterInfo
if c.Query("force") == "true" {
chapterList = brave.GetAllChapterList(db)
} else {
chapterList = brave.GetUnscrapedChapterList(db)
}
msg := brave.ScrapChapters(db, chapterList)
latency := time.Since(t)
c.String(http.StatusOK, fmt.Sprintf("%s in %s", msg, latency))
})
}
router.Run(":3643")
}
|
package main
import (
"context"
"net/http"
"os"
"os/signal"
"time"
"go.uber.org/zap"
"prometheus-alertmanager-dingtalk/config"
"prometheus-alertmanager-dingtalk/dingtalk"
"prometheus-alertmanager-dingtalk/zaplog"
)
func init() {
config.SetupInit()
zaplog.SetupInit()
dingtalk.SetupInit()
http.HandleFunc("/ready", dingtalk.HandlerReady)
http.HandleFunc("/healthy", dingtalk.HandlerHealthy)
http.HandleFunc("/dingtalk/alertmanager", dingtalk.HandlerAlertManager)
}
func main() {
srv := &http.Server{
Addr: config.GetListenUri(),
ReadTimeout: 5 * time.Minute,
ReadHeaderTimeout: 5 * time.Second,
WriteTimeout: 5 * time.Minute,
}
zaplog.Logger.Debug("ConfigSetting",
zap.String("uri", config.GetDingTalkUri()),
zap.String("securitySettingsType", config.GetSecuritySettingsType()),
zap.String("secretKey", config.GetSecretKey()),
zap.String("templatePath", config.GetTemplatePath()),
zap.Strings("allowLabels", config.GetAllowLables()),
)
go func() {
zaplog.Logger.Info("Web Starting Completed !", zap.String("ListenUri", config.GetListenUri()))
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
zaplog.Logger.Fatal("web Server start Failed", zap.Error(err))
}
}()
// 等待中断信号以优雅地关闭服务器(设置 15 秒的超时时间)
osSignal := make(chan os.Signal)
signal.Notify(osSignal, os.Interrupt)
<-osSignal
// 启动服务器关闭流程
zaplog.Logger.Info("shutdown server ...")
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
if err := srv.Shutdown(ctx); err != nil {
zaplog.Logger.Fatal("Server Shutdown:", zap.Error(err))
}
zaplog.Logger.Info("server shutdown completed !")
}
|
package socket
import (
"jmcs/core/utils/net/port"
"jmcs/core/utils"
"strings"
"github.com/goinggo/mapstructure"
"net"
"fmt"
"errors"
"bytes"
"sync"
)
type socket struct {
Enable bool
Port port.Port
HeartEnable bool
}
var conf socket
const (
CONF_NAME = "net.socket" //配置名称,靠这个解析出该应用具体配置
)
func Run(waitGroup sync.WaitGroup) {
/*初始化socket应用配置*/
initconf()
/*启动socket服务端*/
start()
waitGroup.Done()
}
func initconf() {
/*socket配置*/
confs := strings.Split(CONF_NAME, ".")
baseconfig, ok := utils.Configs[confs[0]][confs[1]]
if !ok {
conf = socket{Enable: false}
}
conf = socket{}
err := mapstructure.Decode(baseconfig, &conf) //解析socket配置
utils.CheckErr(err)
if !conf.Enable {
return
}
if ok := conf.Port.CheckEnabled(nil); ok {
err := errors.New("端口" + conf.Port.String() + "已被占用,请更换端口")
utils.CheckErr(err)
}
}
func start() {
server := ":" + conf.Port.String()
tcpAddr, err := net.ResolveTCPAddr("tcp", server)
utils.CheckErr(err)
fmt.Println("启动监听tcp:", conf.Port)
listen := listenAddr(tcpAddr)
fmt.Println("启动完成")
for {
conn, err := listen.Accept()
fmt.Println("连接客户端:", conn.RemoteAddr().String())
if err != nil { //如果其中一个连接出错,只需要处理掉这个连接即可,不要结束
fmt.Println(err) //todo:这里需要日志处理,其他的错误处理
continue
}
go handleTcp(conn)
}
}
/*监听tcp端口*/
func listenAddr(tcpAddr *net.TCPAddr) *net.TCPListener {
listen, err := net.ListenTCP("tcp", tcpAddr)
utils.CheckErr(err)
return listen
}
/*socket业务具体处理 todo:这里调用路由合不合理*/
func handleTcp(conn net.Conn) {
/*defer func(){
if err := recover(); err != nil {
fmt.Println(":::::" , err, ":::::")
}
}()*/
for {
buf := make([]byte, 1024 * 1024)
conn.Read(buf) //todo:读取数据,需要按约定处理
data := bytes.TrimRight(buf, "\x00") //todo:其实不需要,更改socket自定义头就可以了,去掉所有末尾的空字节
head := Head{}
head.parse(data)
Handle(conn, head)
}
}
|
package blockchain
import (
"log"
"time"
"bytes"
"encoding/gob"
"crypto/sha256"
"encoding/binary"
)
// 区块结构
type Block struct {
Version uint64 // 版本号
PrevHash []byte // 前区块哈希
MerkleRoot []byte // 梅克尔根
TimeStamp uint64 // 时间戳
Difficulty uint64 // 难度值
Nonce uint64 // 随机数
Hash []byte // 当前区块哈希
Transactions []*Transaction // 交易数据
}
// 通过挖矿产生新区块
func NewBlock(txs []*Transaction, prevBlockHash []byte) *Block {
// 1. 初始化区块结构
block := &Block{
Version: 00,
PrevHash: prevBlockHash,
TimeStamp: uint64(time.Now().Unix()),
Difficulty: 0,
Transactions: txs,
}
// 2. 计算梅克尔根
block.MerkleRoot = block.MakeMerkleRoot()
// 3. 进行挖矿,产生区块的Hash和Nonce(由矿工完成)
pow := NewProofOfWork(block)
block.Hash, block.Nonce = pow.Run()
return block
}
// 计算梅克尔根
func (this *Block) MakeMerkleRoot() []byte {
// 1. 将区块中的所有交易做Hash处理
var txHashs [][]byte
for _, tx := range this.Transactions {
txHashs = append(txHashs, tx.TXID)
}
// 2. 计算梅克尔树
var alones [][]byte // 用于存储不能进行串联的单节点
for {
// 2.1 设置退出条件: 计算到整个集合只有一个值为止(最少需要2个值串联)
if len(txHashs) == 1 {
break
}
// 2.2 抽取出不能进行串联的单独节点
if len(txHashs) % 2 != 0 {
alones = append(alones, txHashs[len(txHashs) - 1])
txHashs = txHashs[:len(txHashs) - 1]
}
// 2.3 每个节点和相邻节点串联后计算哈希
var tmp [][]byte
for i := 0; i < len(txHashs); i += 2 {
var merge []byte
merge = append(merge, txHashs[i]...)
merge = append(merge, txHashs[i + 1]...)
hash := sha256.Sum256(merge)
tmp = append(tmp, hash[:])
}
txHashs = tmp
}
// 3. 和剩余的独立节点进行计算(若存在)
if len(alones) > 0 {
var result []byte
result = append(result, txHashs[0]...)
for _, elem := range alones {
result = append(result, elem...)
}
resHash := sha256.Sum256(result)
return resHash[:]
}
return txHashs[0]
}
// 工具函数: 序列化
func Serialize(block *Block) []byte {
var buffer bytes.Buffer
encoder := gob.NewEncoder(&buffer)
if err := encoder.Encode(block); err != nil {
log.Panic(err)
}
return buffer.Bytes()
}
// 工具函数: 反序列化
func DeSerialize(data []byte, block *Block) {
decoder := gob.NewDecoder(bytes.NewReader(data))
if err := decoder.Decode(block); err != nil {
log.Panic(err)
}
}
// 工具函数: uint64转[]byte
func Uint64ToByte(num uint64) []byte {
var buffer bytes.Buffer
/*
binary包:
实现了简单的数字与字节序列的转换以及变长值的编解码
binary.BigEndian:
大端字节序的实现
binary.Write:
将num的binary编码格式写入buffer,参数2指定写入数据的字节序,写入结构体时,名字中有'_'的字段会置为0
*/
if err := binary.Write(&buffer, binary.BigEndian, num); err != nil {
log.Panic(err)
}
return buffer.Bytes()
}
|
// Jamar Flowers
// golang simple server 1
// Project: create simple go server to send response as hello world to browser
// 5/27/18
package main
import (
"io"
"net/http"
)
// the function
// Hello is a response function to be called by our handler
func hello(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, "Hello Celestial Body!")
}
func main() {
// new object that gives access to the HandleFunc
mux := http.NewServeMux()
mux.HandleFunc("/", hello)
// 0000:8080 is used because server is being served
// over C9.
http.ListenAndServe("0.0.0.0:8080", mux)
// also a working solution in c9.
}
|
package routes
import (
"fmt"
"io"
"github.com/complyue/ddgo/pkg/dbc"
"github.com/complyue/ddgo/pkg/livecoll"
"github.com/complyue/hbigo/pkg/errors"
"github.com/globalsign/mgo"
"github.com/globalsign/mgo/bson"
"github.com/golang/glog"
)
func coll() *mgo.Collection {
return dbc.DB().C("waypoint")
}
// in-memory storage of all waypoints of a particular tenant.
// this data set should have a small footprint, small enough to be fully kept in memory
type WaypointCollection struct {
livecoll.HouseKeeper
Tid string
// this is the primary index to locate a waypoint by tid+seq
bySeq map[int]*Waypoint
}
// a single waypoint
type Waypoint struct {
Id bson.ObjectId `json:"_id" bson:"_id"`
// increase only seq within scope of a tenant
Seq int `json:"seq"`
Label string `json:"label"`
X float64 `json:"x"`
Y float64 `json:"y"`
}
func (wp *Waypoint) GetID() interface{} {
return wp.Id
}
func (wp *Waypoint) String() string {
return fmt.Sprintf("%+v", wp)
}
func (wp *Waypoint) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
io.WriteString(s, fmt.Sprintf("%s", wp.Label))
if s.Flag('+') {
io.WriteString(s, fmt.Sprintf("@(%0.1f,%0.1f)", wp.X, wp.Y))
}
}
}
var (
wpCollection *WaypointCollection
)
func ensureLoadedFor(tid string) error {
// sync is not strictly necessary for load, as worst scenario is to load more than once,
// while correctness not violated.
if wpCollection != nil {
// already have a full list loaded
if tid != wpCollection.Tid {
// should be coz of malfunctioning of service router, log and deny service
err := errors.New(fmt.Sprintf(
"Waypoint service already stuck to [%s], not serving [%s]!",
wpCollection.Tid, tid,
))
glog.Error(err)
return err
}
return nil
}
// the first time serving a tenant, load full list and stuck to this tid
var loadingList []Waypoint
err := coll().Find(bson.M{"tid": tid}).All(&loadingList)
if err != nil {
glog.Error(err)
return err
}
optimalSize := 2 * len(loadingList)
if optimalSize < 200 {
optimalSize = 200
}
var hk livecoll.HouseKeeper
if wpCollection != nil {
// inherite subscribers by reusing the housekeeper, if already loaded & subscribed
hk = wpCollection.HouseKeeper
} else {
hk = livecoll.NewHouseKeeper()
}
loadingColl := &WaypointCollection{
HouseKeeper: hk,
Tid: tid,
bySeq: make(map[int]*Waypoint, optimalSize),
}
memberList := make([]livecoll.Member, len(loadingList))
for i, wpo := range loadingList {
// the loop var is a fixed value variable of Waypoint struct,
// make a local copy and take pointer for collection storage.
wpCopy := wpo
memberList[i] = &wpCopy
loadingColl.bySeq[wpo.Seq] = &wpCopy
}
hk.Load(memberList)
wpCollection = loadingColl // only set globally after successfully loaded at all
return nil
}
// the snapshot of all waypoints of a specific tenant
type WaypointsSnapshot struct {
Tid string
CCN int
Waypoints []Waypoint
}
func FetchWaypoints(tid string) *WaypointsSnapshot {
if err := ensureLoadedFor(tid); err != nil {
// err has been logged
panic(err)
}
// todo make FetchAll return values instead of pointers, to avoid dirty value reads
ccn, wps := wpCollection.FetchAll()
snap := &WaypointsSnapshot{
Tid: tid,
CCN: ccn,
Waypoints: make([]Waypoint, len(wps)),
}
for i, wp := range wps {
snap.Waypoints[i] = *(wp.(*Waypoint))
}
return snap
}
// this service method has rpc style, with err-out converted to panic,
// which will induce forceful disconnection
func (ctx *serviceContext) FetchWaypoints(tid string) *WaypointsSnapshot {
return FetchWaypoints(tid)
}
type wpDelegate struct {
ctx *serviceContext
}
func (ctx *serviceContext) SubscribeWaypoints(tid string) {
if err := ensureLoadedFor(tid); err != nil {
panic(err)
}
dele := wpDelegate{ctx}
wpCollection.Subscribe(dele)
}
func (dele wpDelegate) Subscribed() (stop bool) {
// not relaying Subscribed event over hbi wire
return
}
func (dele wpDelegate) Epoch(ccn int) (stop bool) {
ctx := dele.ctx
if ctx.Cancelled() {
stop = true
return
}
po := ctx.MustPoToPeer()
po.Notif(fmt.Sprintf(`
WpEpoch(%#v)
`, ccn))
return
}
// Created
func (dele wpDelegate) MemberCreated(ccn int, eo livecoll.Member) (stop bool) {
ctx := dele.ctx
if ctx.Cancelled() {
stop = true
return
}
wp := eo.(*Waypoint)
po := ctx.MustPoToPeer()
po.NotifBSON(fmt.Sprintf(`
WpCreated(%#v)
`, ccn), wp, "&Waypoint{}")
return
}
// Updated
func (dele wpDelegate) MemberUpdated(ccn int, eo livecoll.Member) (stop bool) {
ctx := dele.ctx
if ctx.Cancelled() {
stop = true
return
}
wp := eo.(*Waypoint)
po := ctx.MustPoToPeer()
po.NotifBSON(fmt.Sprintf(`
WpUpdated(%#v)
`, ccn), wp, "&Waypoint{}")
return
}
// Deleted
func (dele wpDelegate) MemberDeleted(ccn int, id interface{}) (stop bool) {
ctx := dele.ctx
if ctx.Cancelled() {
stop = true
return
}
po := ctx.MustPoToPeer()
po.NotifBSON(fmt.Sprintf(`
WpDeleted(%#v,%#v)
`, ccn), id, "&Waypoint{}")
return
}
// individual in-memory waypoint objects do not store the tid, tid only
// meaningful for a waypoint collection. however when stored as mongodb documents,
// the tid field needs to present. so here's the struct, with an in-memory
// wp object embedded, to be inlined when marshaled to bson (for mango)
type wpForDb struct {
Tid string `bson:"tid"`
Waypoint `bson:",inline"`
}
func AddWaypoint(tid string, x, y float64) error {
if err := ensureLoadedFor(tid); err != nil {
return err
}
newSeq := 1 + len(wpCollection.bySeq) // assign tenant wide unique seq
newLabel := fmt.Sprintf("#%d#", newSeq) // label with some rules
waypoint := wpForDb{tid, Waypoint{
Id: bson.NewObjectId(),
Seq: newSeq, Label: newLabel,
X: x, Y: y,
}}
// write into backing storage, the db
err := coll().Insert(&waypoint)
if err != nil {
return err
}
// add to in-memory collection and index, after successful db insert
wp := &waypoint.Waypoint
wpCollection.bySeq[waypoint.Seq] = wp
wpCollection.Created(wp)
return nil
}
// this service method has async style, successful result will be published
// as an event asynchronously
func (ctx *serviceContext) AddWaypoint(tid string, x, y float64) error {
return AddWaypoint(tid, x, y)
}
func MoveWaypoint(tid string, seq int, id string, x, y float64) error {
if err := ensureLoadedFor(tid); err != nil {
return err
}
mwp, ok := wpCollection.Read(bson.ObjectIdHex(id))
if !ok || mwp == nil {
return errors.New(fmt.Sprintf("Waypoint seq=[%v], id=[%s] not exists for tid=%s", seq, id, tid))
}
wp := mwp.(*Waypoint)
if wp.Seq != seq {
return errors.New(fmt.Sprintf("Waypoint id=[%s], seq mismatch [%v] vs [%v]", id, seq, wp.Seq))
}
// update backing storage, the db
if err := coll().Update(bson.M{
"tid": tid, "_id": wp.Id,
}, bson.M{
"$set": bson.M{"x": x, "y": y},
}); err != nil {
return err
}
// update in-memory value, after successful db update
wp.X, wp.Y = x, y
wpCollection.Updated(wp)
return nil
}
// this service method has async style, successful result will be published
// as an event asynchronously
func (ctx *serviceContext) MoveWaypoint(
tid string, seq int, id string, x, y float64,
) error {
return MoveWaypoint(tid, seq, id, x, y)
}
|
package leetcode
import "testing"
func TestPivotIndex(t *testing.T) {
if pivotIndex([]int{1, 7, 3, 6, 5, 6}) != 3 {
t.Fatal()
}
if pivotIndex([]int{1, 2, 3}) != -1 {
t.Fatal()
}
}
|
package feed
type Feeds struct {
Id int `json:"id"`
Txt string `json:"txt"`
}
|
package spider
import (
"context"
"errors"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"go.uber.org/zap"
)
type httpResponseError struct {
statusCode int
}
func (e httpResponseError) Error() string {
return "http response error: " + strconv.Itoa(e.statusCode)
}
// Requester is something that can make a request.
type Requester interface {
Request(ctx context.Context, uri *url.URL) ([]byte, error)
SetUserAgent(agent string)
}
//go:generate mockery -name Requester -case underscore
type client struct {
client *http.Client
logger *zap.Logger
userAgent string
}
func (c client) SetUserAgent(agent string) {
c.userAgent = agent
}
func (c client) Request(ctx context.Context, uri *url.URL) ([]byte, error) {
if uri == nil {
return nil, errors.New("must provide uri to request")
}
c.logger.Info("Fetching URL", zap.String("url", uri.String()))
// Ignore this error as it's not possible to trigger with a valid URL and a constant method.
req, _ := http.NewRequest(http.MethodGet, uri.String(), nil)
req = req.WithContext(ctx)
req.Header.Set("User-Agent", c.userAgent)
res, err := c.client.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
return nil, httpResponseError{
statusCode: res.StatusCode,
}
}
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
return data, nil
}
|
package aoc2016
import (
"testing"
aoc "github.com/janreggie/aoc/internal"
"github.com/stretchr/testify/assert"
)
func Test_newIpv7Address(t *testing.T) {
assert := assert.New(t)
testCases := []struct {
input string
want ipv7Address
}{
{input: "abba[mnop]qrst",
want: []struct {
raw string
bracketed bool
}{
{"abba", false},
{"mnop", true},
{"qrst", false},
}},
}
for _, tt := range testCases {
assert.Equal(tt.want, newIpv7Address(tt.input), tt.input)
}
}
func Test_ipv7Address_supportTLS(t *testing.T) {
assert := assert.New(t)
testCases := []struct {
input string
want bool
}{
{input: "abba[mnop]qrst", want: true},
{input: "abcd[bddb]xyyx", want: false},
{input: "aaaa[qwer]tyui", want: false},
{input: "ioxxoj[asdfgh]zxcvbn", want: true},
}
for _, tt := range testCases {
ipv7 := newIpv7Address(tt.input)
assert.Equal(tt.want, ipv7.supportTLS(), tt.input)
}
}
func Test_ipv7Address_supportSSL(t *testing.T) {
assert := assert.New(t)
testCases := []struct {
input string
want bool
}{
{input: "aba[bab]xyz", want: true},
{input: "xyx[xyx]xyx", want: false},
{input: "aaa[kek]eke", want: true},
{input: "zazbz[bzb]cdb", want: true},
}
for _, tt := range testCases {
ipv7 := newIpv7Address(tt.input)
assert.Equal(tt.want, ipv7.supportSSL(), tt.input)
}
}
func TestDay07(t *testing.T) {
assert := assert.New(t)
testCase := aoc.TestCase{
Details: "Y2016D07 my input",
Input: day07myInput,
Result1: `110`,
Result2: `242`,
}
testCase.Test(Day07, assert)
}
func BenchmarkDay07(b *testing.B) {
aoc.Benchmark(Day07, b, day07myInput)
}
|
package application
import (
"net/http"
md "github.com/ebikode/eLearning-core/model"
tr "github.com/ebikode/eLearning-core/translation"
ut "github.com/ebikode/eLearning-core/utils"
validation "github.com/go-ozzo/ozzo-validation"
)
// ApplicationService provides application operations
type ApplicationService interface {
GetApplication(uint) *md.Application
GetUserApplications(string) []*md.Application
GetApplicationsByCourse(int) []*md.Application
GetApplicationsByCourseOwner(string) []*md.Application
GetApplications(int, int) []*md.Application
CreateApplication(md.Application) (*md.Application, tr.TParam, error)
UpdateApplication(*md.Application) (*md.Application, tr.TParam, error)
}
type service struct {
aRepo ApplicationRepository
}
// NewService creates a application service with the necessary dependencies
func NewService(
aRepo ApplicationRepository,
) ApplicationService {
return &service{aRepo}
}
// Get a application
func (s *service) GetApplication(id uint) *md.Application {
return s.aRepo.Get(id)
}
// GetApplications Get all applications from DB
//
// @userType == admin | customer
func (s *service) GetApplications(page, limit int) []*md.Application {
return s.aRepo.GetAll(page, limit)
}
func (s *service) GetUserApplications(userID string) []*md.Application {
return s.aRepo.GetByUser(userID)
}
func (s *service) GetApplicationsByCourse(courseID int) []*md.Application {
return s.aRepo.GetByCourse(courseID)
}
func (s *service) GetApplicationsByCourseOwner(userID string) []*md.Application {
return s.aRepo.GetByCourseOwner(userID)
}
// CreateApplication Creates New application
func (s *service) CreateApplication(c md.Application) (*md.Application, tr.TParam, error) {
// Generate ref
ref := ut.RandomBase64String(8, "elref")
c.ReferenceNo = ref
application, err := s.aRepo.Store(c)
if err != nil {
tParam := tr.TParam{
Key: "error.resource_creation_error",
TemplateData: nil,
PluralCount: nil,
}
return application, tParam, err
}
return application, tr.TParam{}, nil
}
// UpdateApplication update existing application
func (s *service) UpdateApplication(c *md.Application) (*md.Application, tr.TParam, error) {
application, err := s.aRepo.Update(c)
if err != nil {
tParam := tr.TParam{
Key: "error.resource_update_error",
TemplateData: nil,
PluralCount: nil,
}
return application, tParam, err
}
return application, tr.TParam{}, nil
}
// Validate Function for validating application input
func Validate(application Payload, r *http.Request) error {
return validation.ValidateStruct(&application,
validation.Field(&application.CourseID, ut.IDRule(r)...),
)
}
// ValidateUpdates Function for validating application update input
func ValidateUpdates(application Payload, r *http.Request) error {
return validation.ValidateStruct(&application)
}
|
package converters
import (
"encoding/json"
"errors"
"github.com/fintechstudios/ververica-platform-k8s-operator/api/v1beta1"
vpAPI "github.com/fintechstudios/ververica-platform-k8s-operator/appmanager-api-client"
)
// DeploymentMetadataToNative converts a Ververica Platform deployment into its native K8s representation
func DeploymentMetadataToNative(deploymentMetadata vpAPI.DeploymentMetadata) (v1beta1.VpMetadata, error) {
var vpMetadata v1beta1.VpMetadata
metadataJSON, err := json.Marshal(deploymentMetadata)
if err != nil {
return vpMetadata, errors.New("cannot encode Metadata: " + err.Error())
}
// now unmarshal it into the platform model
if err = json.Unmarshal(metadataJSON, &vpMetadata); err != nil {
return vpMetadata, errors.New("cannot encode VpDeployment Metadata: " + err.Error())
}
return vpMetadata, nil
}
// DeploymentMetadataFromNative converts a native K8s VpDeployment to the Ververica Platform's representation
func DeploymentMetadataFromNative(vpMetadata v1beta1.VpMetadata) (vpAPI.DeploymentMetadata, error) {
var deploymentMetadata vpAPI.DeploymentMetadata
vpMetadataJSON, err := json.Marshal(vpMetadata)
if err != nil {
return deploymentMetadata, errors.New("cannot encode VpDeployment Metadata: " + err.Error())
}
// now unmarshal it into the platform model
if err = json.Unmarshal(vpMetadataJSON, &deploymentMetadata); err != nil {
return deploymentMetadata, errors.New("cannot encode Deployment Metadata: " + err.Error())
}
return deploymentMetadata, nil
}
|
package config
import (
"io/ioutil"
)
type Config struct {
Docker_Host struct {
Ssh_Config string
Host string
}
Docker_Container struct {
Image string
Mount string
Before_All string
Command string
Filter string
}
Cargo struct {
Debug bool
GroupBy string
Concurrency int
User string
WorkDir string
}
}
func DefaultConfig() *Config {
cfg := &Config{}
cfg.Docker_Host.Ssh_Config = "~/.ssh/config"
cfg.Docker_Host.Host = "default"
cfg.Cargo.GroupBy = "file-size"
cfg.Cargo.Concurrency = 1
cfg.Cargo.User = "cargo"
cfg.Cargo.WorkDir = "/tmp/cargo"
return cfg
}
func CopyFromTemplate() error {
return ioutil.WriteFile("Cargofile", []byte(template()), 0644)
}
func template() string {
return `; Cargofile
[docker-host]
; ssh-config = ~/.ssh/config
; host = default
[docker-container]
image = ; container image
mount = ; mount volume
command = ; docker run command
[cargo]
; groupby = file-size
; user = cargo
; workdir = /tmp/cargo
concurrency = 2 ; number of concurrency
`
}
|
package instrumented
import (
"context"
"github.com/prometheus/client_golang/prometheus"
pubsub "github.com/utilitywarehouse/go-pubsub"
)
// ConcurrentMessageSource is an an Instrumented pubsub MessageSource
// The counter vector will have the labels "status" and "topic"
type ConcurrentMessageSource struct {
impl pubsub.ConcurrentMessageSource
counter *prometheus.CounterVec
topic string
}
// NewDefaultConcurrentMessageSource returns a new pubsub MessageSource wrapped in default instrumentation
func NewDefaultConcurrentMessageSource(source pubsub.ConcurrentMessageSource, topic string) pubsub.ConcurrentMessageSource {
return NewConcurrentMessageSource(
source,
prometheus.CounterOpts{
Name: "messages_consumed_total",
Help: "The total count of messages consumed",
},
topic,
)
}
// NewConcurrentMessageSource returns a new MessageSource
func NewConcurrentMessageSource(
source pubsub.ConcurrentMessageSource,
counterOpts prometheus.CounterOpts,
topic string) pubsub.ConcurrentMessageSource {
counter := prometheus.NewCounterVec(counterOpts, []string{"status", "topic"})
if err := prometheus.Register(counter); err != nil {
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
counter = are.ExistingCollector.(*prometheus.CounterVec)
} else {
panic(err)
}
}
counter.WithLabelValues("error", topic).Add(0)
counter.WithLabelValues("success", topic).Add(0)
return &ConcurrentMessageSource{source, counter, topic}
}
// ConsumeMessages is an implementation of interface method, wrapping the call in instrumentation
func (ims *ConcurrentMessageSource) ConsumeMessages(
ctx context.Context, handler pubsub.ConsumerMessageHandler, onError pubsub.ConsumerErrorHandler) error {
instrumentedHandler := newMsgHandler(handler, ims.counter, ims.topic)
return ims.impl.ConsumeMessages(ctx, instrumentedHandler, onError)
}
// ConsumeMessagesConcurrently is an implementation of interface method, wrapping the call in instrumentation
func (ims *ConcurrentMessageSource) ConsumeMessagesConcurrently(ctx context.Context, handler pubsub.ConsumerMessageHandler, onError pubsub.ConsumerErrorHandler) error {
instrumentedHandler := newMsgHandler(handler, ims.counter, ims.topic)
return ims.impl.ConsumeMessagesConcurrently(ctx, instrumentedHandler, onError)
}
// Status returns the status of this source, or an error if the status could not be determined.
func (ims *ConcurrentMessageSource) Status() (*pubsub.Status, error) {
return ims.impl.Status()
}
|
package main
import (
"fmt"
"os"
)
var (
version string
buildTime string
)
type app struct {
config struct {
ConfigPath string
Token string
}
opts struct {
Version bool `short:"v" long:"version" description:"Displays version and build info"`
Credit bool `short:"c" long:"credit" description:"Displays the current wallpaper author and link"`
Force bool `short:"f" long:"force" description:"Forces a wallpaper refresh even when in the same time span"`
Time string `short:"t" long:"time" description:"Specify a particular time of day" choice:"morning" choice:"noon" choice:"afternoon" choice:"evening" choice:"night"`
Search string `short:"s" long:"search" description:"Additional text query to be added while searching"`
Quality int `short:"q" long:"quality" description:"The downloaded image quality" default:"75"`
Extension string `short:"e" long:"ext" description:"The downloaded image extension" default:"jpg"`
ScreenWidth string `short:"w" long:"width" description:"The downloaded image width" default:"1920"`
Endpoint string `short:"a" long:"api" description:"The API endpoint" default:"https://api.unsplash.com/photos/random"`
DBUSEnv string `short:"d" long:"dbus" description:"Set this variable to the value of $DBUS_SESSION_BUS_ADDRESS; only needed when running through cronjob" `
}
}
func main() {
app := &app{}
err := app.parseOpts()
if err != nil {
os.Exit(1)
}
if app.opts.Version {
fmt.Printf("Version:\t%s\n", version)
fmt.Printf("Build time:\t%s\n", buildTime)
os.Exit(0)
}
err = app.newConfig()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if app.opts.Credit {
err = app.showCredits()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
os.Exit(0)
}
if app.opts.Force || app.shouldRefresh() {
err = app.setWallpaper()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
os.Exit(0)
}
|
package omsi
import (
"fmt"
"testing"
)
func Test_New(t *testing.T) {
om := New()
if om.Map == nil {
t.Error("map failed to initialize after calling New.")
}
}
func Test_Set(t *testing.T) {
om := New()
om.Set("cat", "funny")
link := om.Map["cat"]
if link.value != "funny" {
t.Error("Set failed to set value")
}
}
func Test_Get(t *testing.T) {
om := New()
om.Set("cat", "funny")
value, exists := om.Get("cat")
if exists == false {
t.Error("Get failed to retrieve value")
}
if value != "funny" {
t.Error("Get failed to get correct value")
}
}
func Test_Delete(t *testing.T) {
om := New()
om.Set("cat", "funny")
om.Set("dog", "silly")
om.Set("parrot", "green")
om.Delete("dog")
dog, exists := om.Get("dog")
if exists {
t.Error("Deleted failed to remove value")
}
if dog != nil {
msg := fmt.Sprintf("Delete failed to remove value; expected nil, got %v", dog)
t.Error(msg)
}
if om.startLink.next != om.endLink {
t.Error("Delete failed to associate doubley linked list correctly")
}
if om.endLink.previous != om.startLink {
t.Error("Delete failed to associate doubley linked list correctly")
}
}
func Test_Pop(t *testing.T) {
om := New()
om.Set("cat", "funny")
om.Set("dog", "silly")
om.Set("parrot", "green")
key, value, err := om.Pop()
if err != nil {
msg := fmt.Sprintf("Pop returned error: %s", err.Error())
t.Error(msg)
}
if key != "parrot" {
msg := fmt.Sprintf("Pop failed to yield correct key; expected 'parrot', got: %s", key)
t.Error(msg)
}
if value != "green" {
msg := fmt.Sprintf("Pop failed to yield correct key; expected 'green', got: %s", value)
t.Error(msg)
}
}
func Test_Keys(t *testing.T) {
om := New()
om.Set("cat", "funny")
om.Set("dog", "silly")
om.Set("parrot", "green")
expected := [3]string{"cat", "dog", "parrot"}
keys := om.Keys()
if len(keys) != len(expected) {
msg := fmt.Sprintf("Keys returned incorrect number of elements; expected %d got: %d", len(expected), len(keys))
t.Error(msg)
}
for i, key := range keys {
if key != expected[i] {
t.Error("Keys returned out of order indexed slice of keys")
}
}
}
func Test_Values(t *testing.T) {
om := New()
om.Set("cat", "funny")
om.Set("dog", "silly")
om.Set("parrot", "green")
expected := [3]string{"funny", "silly", "green"}
values := om.Values()
if len(values) != len(expected) {
msg := fmt.Sprintf("Values returned incorrect number of elements; expected %d got: %d", len(expected), len(values))
t.Error(msg)
}
for i, value := range values {
if value != expected[i] {
t.Error("Values returned out of order indexed slice of values")
}
}
}
|
package model
import (
"errors"
)
// 房间管理着一局棋局和玩家
type Room struct {
Id int64 `json:"id" xorm:"pk autoincr int(11)"`
PlayerId int64 `json:"player_id" xorm:"int(11)"` // 房主
PlayerStatus PlayerStatus `json:"player_status" xorm:"json"`
TimeToPlayerId int64 `json:"timeto_player_id" xorm:"int(11)"` // 该谁走棋
WinPlayerId int64 `json:"win_player_id" xorm:"int(11)"` // 胜利方
Status RoomStatus `json:"status" xorm:"tinyint(1)"` // 游戏状态
TablePieces TablePieces `json:"table_pieces" xorm:"json"` // 当前桌面上的棋子, 包括两方
}
type RoomStatus int64
const (
WaitPeopleStatus RoomStatus = 1 // 等待玩家加入
WaitReadStatus RoomStatus = 2 // 等待准备
PlayingStatus RoomStatus = 3 // 正在游戏中
EndStatus RoomStatus = 4 // 游戏结束
)
// 第一个人就是蓝色方, 第二个是红色方
type PlayerStatus []*PlayerStatusOne
type PlayerStatusOne struct {
PlayerId int64 `json:"player_id"`
Ready bool `json:"ready"`
Camp string `json:"camp"` // p1, p2 第一个进入房间的是p1
}
func (po PlayerStatusOne) IsP1() bool {
return po.Camp == "p1" || po.Camp == "blue"
}
func (ps PlayerStatus) IsAllReady() bool {
if len(ps) != 2 {
return false
}
for _, v := range ps {
if !v.Ready {
return false
}
}
return true
}
func (p PlayerStatus) IsFull() bool {
if len(p) != 2 {
return false
}
return true
}
// 加入房间, 不是准备状态
func (ps *PlayerStatus) Join(playerId int64) (s *PlayerStatusOne, err error) {
s, exist := ps.Get(playerId)
if exist {
return
}
if ps.IsFull() {
err = errors.New("房间人数已满")
return
}
camp := "p2"
if len(*ps) == 1 {
camp = "p1"
}
s = &PlayerStatusOne{
PlayerId: playerId,
Ready: false,
Camp: camp,
}
*ps = append(*ps, s)
return
}
// 离开房间
func (ps *PlayerStatus) Leave(playerId int64) (err error) {
x := PlayerStatus{}
for _, v := range *ps {
if v.PlayerId == playerId {
continue
}
x = append(x, v)
}
*ps = x
return
}
func (ps *PlayerStatus) Get(playerId int64) (*PlayerStatusOne, bool) {
for _, v := range *ps {
if v.PlayerId == playerId {
return v, true
}
}
return nil, false
}
func (ps *PlayerStatus) GetP1() (*PlayerStatusOne, bool) {
return ps.GetByCamp("p1")
}
func (ps *PlayerStatus) GetP2() (*PlayerStatusOne, bool) {
return ps.GetByCamp("p2")
}
func (ps *PlayerStatus) GetByCamp(camp string) (*PlayerStatusOne, bool) {
for _, v := range *ps {
if v.Camp == camp {
return v, true
}
}
return nil, false
}
// 下一个人
func (ps *PlayerStatus) Next(playerId int64) (po *PlayerStatusOne, err error) {
p, exist := ps.Get(playerId)
if !exist {
return nil, errors.New("not found playerId")
}
if p.IsP1() {
po, _ = ps.GetP2()
} else {
po, _ = ps.GetP1()
}
return
}
// 安放棋子并准备开始
func (ps *PlayerStatus) Ready(playerId int64, ) (err error) {
if one, exist := ps.Get(playerId); exist {
one.Ready = true
}
return
}
|
package contextio
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"github.com/garyburd/go-oauth/oauth"
)
const (
GetUsersEndPoint = "https://api.context.io/lite/users"
GetAttachmentsEndPoint = "https://api.context.io/lite/users/%s/email_accounts/%s/folders/%s/messages/%s/attachments/%s"
)
type User struct {
Created int `json:"created"`
EmailAccounts []EmailAccount `json:"email_accounts"`
EmailAddresses []string `json:"email_addresses"`
FirstName string `json:"first_name"`
ID string `json:"id"`
LastName string `json:"last_name"`
ResourceURL string `json:"resource_url"`
}
type EmailAccount struct {
AuthenticationType string `json:"authentication_type"`
Label string `json:"label"`
Port int `json:"port"`
ResourceURL string `json:"resource_url"`
Server string `json:"server"`
Status string `json:"status"`
Type string `json:"type"`
UseSsl bool `json:"use_ssl"`
Username string `json:"username"`
}
type Message struct {
Addresses EmailAddresses `json:"addresses"`
Attachments []Attachment `json:"attachments"`
Bodies []Body `json:"bodies"`
EmailMessageID string `json:"email_message_id"`
Folders []string `json:"folders"`
InReplyTo interface{} `json:"in_reply_to"`
ListHeaders struct {
List_Unsubscribe string `json:"list-unsubscribe"`
} `json:"list_headers"`
MessageID string `json:"message_id"`
PersonInfo struct {
Bbcnews_Email_Bbc_Com struct {
Thumbnail string `json:"thumbnail"`
} `json:"bbcnews@email.bbc.com"`
Dmlreturnpath_Gmail_Com struct {
Thumbnail string `json:"thumbnail"`
} `json:"dmlreturnpath@gmail.com"`
} `json:"person_info"`
ReceivedHeaders []string `json:"received_headers"`
References []interface{} `json:"references"`
ResourceURL string `json:"resource_url"`
SentAt float64 `json:"sent_at"`
Subject string `json:"subject"`
}
type Attachment struct {
AttachmentID float64 `json:"attachment_id"`
BodySection string `json:"body_section"`
ContentDisposition string `json:"content_disposition"`
FileName string `json:"file_name"`
Size float64 `json:"size"`
Type string `json:"type"`
}
type Body struct {
BodySection string `json:"body_section"`
Size float64 `json:"size"`
Type string `json:"type"`
}
type EmailAddresses struct {
Bcc []EmailAddress `json:"bcc"`
Cc []EmailAddress `json:"cc"`
From []EmailAddress `json:"from"`
ReplyTo []EmailAddress `json:"reply_to"`
Sender []EmailAddress `json:"sender"`
To []EmailAddress `json:"to"`
}
type EmailAddress struct {
Email string `json:"email"`
Name string `json:"name"`
}
type ContextIO struct {
key string
secret string
client *oauth.Client
}
func NewContextIO(key, secret string) *ContextIO {
c := &oauth.Client{
Credentials: oauth.Credentials{
Token: key,
Secret: secret,
},
}
return &ContextIO{
key: key,
secret: secret,
client: c,
}
}
const (
apiHost = `api.context.io`
)
// returns an *http.Response, the body must be defer response.Body.close()
func (c *ContextIO) Do(method, q string, params url.Values, body io.Reader) (response *http.Response, err error) {
// Cannot use http.NewRequest because of the possibility of encoded data in the url
req := &http.Request{
Method: method,
Host: apiHost, // takes precendence over Request.URL.Host
URL: &url.URL{
Host: apiHost,
Scheme: "https",
Opaque: q,
RawQuery: params.Encode(),
},
Header: http.Header{
"User-Agent": {"GoContextIO Simple library"},
},
}
fmt.Print("req.URL ")
fmt.Println(req.URL)
fmt.Print("req.URL.Opaque ")
fmt.Println(req.URL.Opaque)
if err != nil {
return
}
err = c.client.SetAuthorizationHeader(req.Header, nil, req.Method, req.URL, nil)
fmt.Println("HL:", req.Header)
if err != nil {
return
}
return http.DefaultClient.Do(req)
}
func (c *ContextIO) DoJson(method, u string, params url.Values, body io.Reader) (j []byte, err error) {
response, err := c.Do(method, u, params, body)
defer response.Body.Close()
j, err = ioutil.ReadAll(response.Body)
// json = string(bytes)
return j, err
}
//func (c *ContextIO) GetUsers(params map[string]string) (users []User, err error) {
// if err != nil {
// return
// }
// defer r.Body.Close()
//
// d := json.NewDecoder(r.Body)
// err = d.Decode(&users)
// if err != nil {
// return
// }
// return
//}
//
//func (c *ContextIO) GetAttachment() (f io.Reader, err error) {
// return
//}
|
package controllers
import (
"errors"
"fmt"
"io/ioutil"
"net"
"reflect"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/cloudnativelabs/kube-router/app/options"
"github.com/cloudnativelabs/kube-router/app/watchers"
"github.com/cloudnativelabs/kube-router/utils"
"github.com/coreos/go-iptables/iptables"
"github.com/golang/glog"
"github.com/mqliang/libipvs"
"github.com/vishvananda/netlink"
"k8s.io/client-go/kubernetes"
)
const (
KUBE_DUMMY_IF = "kube-dummy-if"
IFACE_NOT_FOUND = "Link not found"
IFACE_HAS_ADDR = "file exists"
IPVS_SERVER_EXISTS = "file exists"
)
var (
h libipvs.IPVSHandle
)
// Network services controller enables local node as network service proxy through IPVS/LVS.
// Support only Kuberntes network services of type NodePort, ClusterIP. For each service a
// IPVS service is created and for each service endpoint a server is added to the IPVS service.
// As services and endpoints are updated, network service controller gets the updates from
// the kubernetes api server and syncs the ipvs configuration to reflect state of services
// and endpoints
type NetworkServicesController struct {
nodeIP net.IP
nodeHostName string
syncPeriod time.Duration
mu sync.Mutex
serviceMap serviceInfoMap
endpointsMap endpointsInfoMap
podCidr string
masqueradeAll bool
client *kubernetes.Clientset
}
// internal representation of kubernetes service
type serviceInfo struct {
clusterIP net.IP
port int
protocol string
nodePort int
sessionAffinity bool
}
// map of all services, with unique service id(namespace name, service name, port) as key
type serviceInfoMap map[string]*serviceInfo
// internal representation of endpoints
type endpointsInfo struct {
ip string
port int
}
// map of all endpoints, with unique service id(namespace name, service name, port) as key
type endpointsInfoMap map[string][]endpointsInfo
// periodically sync ipvs configuration to reflect desired state of services and endpoints
func (nsc *NetworkServicesController) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {
t := time.NewTicker(nsc.syncPeriod)
defer t.Stop()
defer wg.Done()
glog.Infof("Starting network services controller")
// enable masquerade rule
err := ensureMasqueradeIptablesRule(nsc.masqueradeAll, nsc.podCidr)
if err != nil {
panic("Failed to do add masqurade rule in POSTROUTING chain of nat table due to: %s" + err.Error())
}
// enable ipvs connection tracking
err = ensureIpvsConntrack()
if err != nil {
panic("Failed to do sysctl net.ipv4.vs.conntrack=1 due to: %s" + err.Error())
}
// loop forever unitl notified to stop on stopCh
for {
select {
case <-stopCh:
glog.Infof("Shutting down network services controller")
return
default:
}
if watchers.PodWatcher.HasSynced() && watchers.NetworkPolicyWatcher.HasSynced() {
glog.Infof("Performing periodic syn of the ipvs services and server to reflect desired state of kubernetes services and endpoints")
nsc.sync()
} else {
continue
}
select {
case <-stopCh:
glog.Infof("Shutting down network services controller")
return
case <-t.C:
}
}
}
func (nsc *NetworkServicesController) sync() {
nsc.mu.Lock()
defer nsc.mu.Unlock()
nsc.serviceMap = buildServicesInfo()
nsc.endpointsMap = buildEndpointsInfo()
nsc.syncIpvsServices(nsc.serviceMap, nsc.endpointsMap)
}
// handle change in endpoints update from the API server
func (nsc *NetworkServicesController) OnEndpointsUpdate(endpointsUpdate *watchers.EndpointsUpdate) {
nsc.mu.Lock()
defer nsc.mu.Unlock()
glog.Infof("Received endpoints update from watch API")
if !(watchers.ServiceWatcher.HasSynced() && watchers.EndpointsWatcher.HasSynced()) {
glog.Infof("Skipping ipvs server sync as local cache is not synced yet")
}
// build new endpoints map to reflect the change
newEndpointsMap := buildEndpointsInfo()
if len(newEndpointsMap) != len(nsc.endpointsMap) || !reflect.DeepEqual(newEndpointsMap, nsc.endpointsMap) {
nsc.endpointsMap = newEndpointsMap
nsc.syncIpvsServices(nsc.serviceMap, nsc.endpointsMap)
} else {
glog.Infof("Skipping ipvs server sync on endpoints update because nothing changed")
}
}
// handle change in service update from the API server
func (nsc *NetworkServicesController) OnServiceUpdate(serviceUpdate *watchers.ServiceUpdate) {
nsc.mu.Lock()
defer nsc.mu.Unlock()
glog.Infof("Received service update from watch API")
if !(watchers.ServiceWatcher.HasSynced() && watchers.EndpointsWatcher.HasSynced()) {
glog.Infof("Skipping ipvs server sync as local cache is not synced yet")
}
// build new services map to reflect the change
newServiceMap := buildServicesInfo()
if len(newServiceMap) != len(nsc.serviceMap) || !reflect.DeepEqual(newServiceMap, nsc.serviceMap) {
nsc.serviceMap = newServiceMap
nsc.syncIpvsServices(nsc.serviceMap, nsc.endpointsMap)
} else {
glog.Infof("Skipping ipvs server sync on service update because nothing changed")
}
}
// sync the ipvs service and server details configured to reflect the desired state of services and endpoint
// as learned from services and endpoints information from the api server
func (nsc *NetworkServicesController) syncIpvsServices(serviceInfoMap serviceInfoMap, endpointsInfoMap endpointsInfoMap) {
start := time.Now()
defer func() {
glog.Infof("sync ipvs servers took %v", time.Since(start))
}()
dummyVipInterface := getKubeDummyInterface()
// map of active services and service endpoints
activeServiceEndpointMap := make(map[string][]string)
for k, svc := range serviceInfoMap {
var protocol uint16
if svc.protocol == "tcp" {
protocol = syscall.IPPROTO_TCP
} else {
protocol = syscall.IPPROTO_UDP
}
// assign cluster IP of the service to the dummy interface so that its routable from the pod's on the node
vip := &netlink.Addr{IPNet: &net.IPNet{svc.clusterIP, net.IPv4Mask(255, 255, 255, 255)}, Scope: syscall.RT_SCOPE_LINK}
err := netlink.AddrAdd(dummyVipInterface, vip)
if err != nil && err.Error() != IFACE_HAS_ADDR {
glog.Errorf("Failed to assign cluster ip to dummy interface %s", err)
continue
}
// create IPVS service for the service to be exposed through the cluster ip
ipvs_cluster_vip_svc, err := ipvsAddService(svc.clusterIP, protocol, uint16(svc.port), svc.sessionAffinity)
if err != nil {
glog.Errorf("Failed to create ipvs service for cluster ip: ", err.Error())
continue
}
var clusterServiceId = generateIpPortId(svc.clusterIP.String(), svc.protocol, strconv.Itoa(svc.port))
activeServiceEndpointMap[clusterServiceId] = make([]string, 0)
// create IPVS service for the service to be exposed through the nodeport
var ipvs_nodeport_svc *libipvs.Service
var nodeServiceId string
if svc.nodePort != 0 {
ipvs_nodeport_svc, err = ipvsAddService(nsc.nodeIP, protocol, uint16(svc.nodePort), svc.sessionAffinity)
if err != nil {
glog.Errorf("Failed to create ipvs service for node port")
continue
}
nodeServiceId = generateIpPortId(nsc.nodeIP.String(), svc.protocol, strconv.Itoa(svc.nodePort))
activeServiceEndpointMap[nodeServiceId] = make([]string, 0)
}
// add IPVS remote server to the IPVS service
endpoints := endpointsInfoMap[k]
for _, endpoint := range endpoints {
dst := libipvs.Destination{
Address: net.ParseIP(endpoint.ip),
AddressFamily: syscall.AF_INET,
Port: uint16(endpoint.port),
Weight: 1,
}
err := ipvsAddServer(ipvs_cluster_vip_svc, &dst)
if err != nil {
glog.Errorf(err.Error())
}
activeServiceEndpointMap[clusterServiceId] = append(activeServiceEndpointMap[clusterServiceId], endpoint.ip)
if svc.nodePort != 0 {
err := ipvsAddServer(ipvs_nodeport_svc, &dst)
activeServiceEndpointMap[nodeServiceId] = append(activeServiceEndpointMap[clusterServiceId], endpoint.ip)
if err != nil {
glog.Errorf(err.Error())
}
}
}
}
// cleanup stale ipvs service and servers
glog.Infof("Cleaning up if any, old ipvs service and servers which are no longer needed")
ipvsSvcs, err := h.ListServices()
if err != nil {
panic(err)
}
for _, ipvsSvc := range ipvsSvcs {
key := generateIpPortId(ipvsSvc.Address.String(), ipvsSvc.Protocol.String(), strconv.Itoa(int(ipvsSvc.Port)))
endpoints, ok := activeServiceEndpointMap[key]
if !ok {
glog.Infof("Found a IPVS service %s:%s:%s which is no longer needed so cleaning up", ipvsSvc.Address.String(), ipvsSvc.Protocol.String(), strconv.Itoa(int(ipvsSvc.Port)))
err := h.DelService(ipvsSvc)
if err != nil {
glog.Errorf("Failed to delete stale IPVS service: ", err.Error())
continue
}
} else {
dsts, err := h.ListDestinations(ipvsSvc)
if err != nil {
glog.Errorf("Failed to get list of servers from ipvs service")
}
for _, dst := range dsts {
validEp := false
for _, ep := range endpoints {
if ep == dst.Address.String() {
validEp = true
break
}
}
if !validEp {
glog.Infof("Found a IPVS service %s:%s:%s, destination %s which is no longer needed so cleaning up",
ipvsSvc.Address.String(), ipvsSvc.Protocol.String(), strconv.Itoa(int(ipvsSvc.Port)), dst.Address.String())
err := h.DelDestination(ipvsSvc, dst)
if err != nil {
glog.Errorf("Failed to delete server from ipvs service")
}
}
}
}
}
glog.Infof("IPVS servers and services are synced to desired state!!")
}
func buildServicesInfo() serviceInfoMap {
serviceMap := make(serviceInfoMap)
for _, svc := range watchers.ServiceWatcher.List() {
if svc.Spec.ClusterIP == "None" || svc.Spec.ClusterIP == "" {
glog.Infof("Skipping service name:%s namespace:%s as there is no cluster IP", svc.Name, svc.Namespace)
continue
}
if svc.Spec.Type == "LoadBalancer" || svc.Spec.Type == "ExternalName" {
glog.Infof("Skipping service name:%s namespace:%s due to service Type=%s", svc.Name, svc.Namespace, svc.Spec.Type)
continue
}
for _, port := range svc.Spec.Ports {
svcInfo := serviceInfo{
clusterIP: net.ParseIP(svc.Spec.ClusterIP),
port: int(port.Port),
protocol: strings.ToLower(string(port.Protocol)),
nodePort: int(port.NodePort),
}
svcInfo.sessionAffinity = (svc.Spec.SessionAffinity == "ClientIP")
svcId := generateServiceId(svc.Namespace, svc.Name, port.Name)
serviceMap[svcId] = &svcInfo
}
}
return serviceMap
}
func buildEndpointsInfo() endpointsInfoMap {
endpointsMap := make(endpointsInfoMap)
for _, ep := range watchers.EndpointsWatcher.List() {
for _, ep_subset := range ep.Subsets {
for _, port := range ep_subset.Ports {
svcId := generateServiceId(ep.Namespace, ep.Name, port.Name)
endpoints := make([]endpointsInfo, 0)
for _, addr := range ep_subset.Addresses {
endpoints = append(endpoints, endpointsInfo{ip: addr.IP, port: int(port.Port)})
}
endpointsMap[svcId] = endpoints
}
}
}
return endpointsMap
}
// Add an iptable rule to masqurade outbound IPVS traffic. IPVS nat requires that reverse path traffic
// to go through the director for its functioning. So the masquerade rule ensures source IP is modifed
// to node ip, so return traffic from real server (endpoint pods) hits the node/lvs director
func ensureMasqueradeIptablesRule(masqueradeAll bool, podCidr string) error {
iptablesCmdHandler, err := iptables.New()
if err != nil {
return errors.New("Failed to initialize iptables executor" + err.Error())
}
var args []string
if masqueradeAll {
args = []string{"-m", "ipvs", "--ipvs", "--vdir", "ORIGINAL", "--vmethod", "MASQ", "-m", "comment", "--comment", "", "-j", "MASQUERADE"}
err = iptablesCmdHandler.AppendUnique("nat", "POSTROUTING", args...)
if err != nil {
return errors.New("Failed to run iptables command" + err.Error())
}
}
if len(podCidr) > 0 {
args = []string{"-m", "ipvs", "--ipvs", "--vdir", "ORIGINAL", "--vmethod", "MASQ", "-m", "comment", "--comment", "",
"!", "-s", podCidr, "-j", "MASQUERADE"}
err = iptablesCmdHandler.AppendUnique("nat", "POSTROUTING", args...)
if err != nil {
return errors.New("Failed to run iptables command" + err.Error())
}
}
glog.Infof("Successfully added iptables masqurade rule")
return nil
}
func ensureIpvsConntrack() error {
return ioutil.WriteFile("/proc/sys/net/ipv4/vs/conntrack", []byte(strconv.Itoa(1)), 0640)
}
func deleteMasqueradeIptablesRule() error {
iptablesCmdHandler, err := iptables.New()
if err != nil {
return errors.New("Failed to initialize iptables executor" + err.Error())
}
postRoutingChainRules, err := iptablesCmdHandler.List("nat", "POSTROUTING")
if err != nil {
return errors.New("Failed to list iptable rules in POSTROUTING chain in nat table" + err.Error())
}
for i, rule := range postRoutingChainRules {
if strings.Contains(rule, "ipvs") && strings.Contains(rule, "MASQUERADE") {
err = iptablesCmdHandler.Delete("nat", "POSTROUTING", strconv.Itoa(i))
if err != nil {
return errors.New("Failed to run iptables command" + err.Error())
}
break
}
}
return nil
}
func ipvsAddService(vip net.IP, protocol, port uint16, persistent bool) (*libipvs.Service, error) {
svcs, err := h.ListServices()
if err != nil {
panic(err)
}
for _, svc := range svcs {
if strings.Compare(vip.String(), svc.Address.String()) == 0 &&
libipvs.Protocol(protocol) == svc.Protocol && port == svc.Port {
glog.Infof("ipvs service %s:%s:%s already exists so returning", vip.String(),
libipvs.Protocol(protocol), strconv.Itoa(int(port)))
return svc, nil
}
}
svc := libipvs.Service{
Address: vip,
AddressFamily: syscall.AF_INET,
Protocol: libipvs.Protocol(protocol),
Port: port,
SchedName: libipvs.RoundRobin,
}
if persistent {
// set bit to enable service persistence
svc.Flags.Flags |= (1 << 24)
svc.Flags.Mask |= 0xFFFFFFFF
// TODO: once service manifest supports timeout time remove hardcoding
svc.Timeout = 180 * 60
}
if err := h.NewService(&svc); err != nil {
return nil, fmt.Errorf("Failed to create service: %s:%s:%s", vip.String(), libipvs.Protocol(protocol), strconv.Itoa(int(port)))
}
glog.Infof("Successfully added service: %s:%s:%s", vip.String(), libipvs.Protocol(protocol), strconv.Itoa(int(port)))
return &svc, nil
}
func ipvsAddServer(service *libipvs.Service, dest *libipvs.Destination) error {
err := h.NewDestination(service, dest)
if err == nil {
glog.Infof("Successfully added destination %s:%s to the service %s:%s:%s", dest.Address,
strconv.Itoa(int(dest.Port)), service.Address, service.Protocol, strconv.Itoa(int(service.Port)))
return nil
}
if strings.Contains(err.Error(), IPVS_SERVER_EXISTS) {
glog.Infof("ipvs destination %s:%s already exists in the ipvs service %s:%s:%s so not adding destination", dest.Address,
strconv.Itoa(int(dest.Port)), service.Address, service.Protocol, strconv.Itoa(int(service.Port)))
} else {
return fmt.Errorf("Failed to add ipvs destination %s:%s to the ipvs service %s:%s:%s due to : %s", dest.Address,
strconv.Itoa(int(dest.Port)), service.Address, service.Protocol, strconv.Itoa(int(service.Port)), err.Error())
}
return nil
}
// unique identfier for a load-balanced service (namespace + name + portname)
func generateServiceId(namespace, svcName, port string) string {
return namespace + "-" + svcName + "-" + port
}
// unique identfier for a load-balanced service (namespace + name + portname)
func generateIpPortId(ip, protocol, port string) string {
return ip + "-" + protocol + "-" + port
}
func getKubeDummyInterface() netlink.Link {
var dummyVipInterface netlink.Link
dummyVipInterface, err := netlink.LinkByName(KUBE_DUMMY_IF)
if err != nil && err.Error() == IFACE_NOT_FOUND {
glog.Infof("Could not find dummy interface: " + KUBE_DUMMY_IF + " to assign cluster ip's, so creating one")
err = netlink.LinkAdd(&netlink.Dummy{netlink.LinkAttrs{Name: KUBE_DUMMY_IF}})
if err != nil {
panic("Failed to add dummy interface: " + err.Error())
}
dummyVipInterface, err = netlink.LinkByName(KUBE_DUMMY_IF)
err = netlink.LinkSetUp(dummyVipInterface)
if err != nil {
panic("Failed to bring dummy interface up: " + err.Error())
}
}
return dummyVipInterface
}
// clean up all the configurations (IPVS, iptables, links)
func (nsc *NetworkServicesController) Cleanup() {
// cleanup ipvs rules by flush
glog.Infof("Cleaning up IPVS configuration permanently")
err := h.Flush()
if err != nil {
glog.Errorf("Failed to cleanup ipvs rules: ", err.Error())
return
}
// cleanup iptable masqurade rule
err = deleteMasqueradeIptablesRule()
if err != nil {
glog.Errorf("Failed to cleanup iptable masquerade rule due to: ", err.Error())
return
}
// delete dummy interface used to assign cluster IP's
dummyVipInterface, err := netlink.LinkByName(KUBE_DUMMY_IF)
if err != nil {
if err.Error() != IFACE_NOT_FOUND {
glog.Infof("Dummy interface: " + KUBE_DUMMY_IF + " does not exist")
}
} else {
err = netlink.LinkDel(dummyVipInterface)
if err != nil {
glog.Errorf("Could not delete dummy interface: "+KUBE_DUMMY_IF, err.Error())
return
}
}
glog.Infof("Successfully cleaned the ipvs configuration done by kube-router")
}
func NewNetworkServicesController(clientset *kubernetes.Clientset, config *options.KubeRouterConfig) (*NetworkServicesController, error) {
handle, err := libipvs.New()
if err != nil {
panic(err)
}
h = handle
nsc := NetworkServicesController{}
nsc.syncPeriod = config.IpvsSyncPeriod
nsc.serviceMap = make(serviceInfoMap)
nsc.endpointsMap = make(endpointsInfoMap)
nsc.client = clientset
nsc.masqueradeAll = false
if config.MasqueradeAll {
nsc.masqueradeAll = true
}
if config.RunRouter {
cidr, err := utils.GetPodCidrFromNodeSpec(nsc.client, config.HostnameOverride)
if err != nil {
return nil, fmt.Errorf("Failed to get pod CIDR details from Node.spec: %s", err.Error())
}
nsc.podCidr = cidr
}
node, err := utils.GetNodeObject(clientset, config.HostnameOverride)
if err != nil {
panic(err.Error())
}
nsc.nodeHostName = node.Name
nodeIP, err := getNodeIP(node)
if err != nil {
panic(err.Error())
}
nsc.nodeIP = nodeIP
watchers.EndpointsWatcher.RegisterHandler(&nsc)
watchers.ServiceWatcher.RegisterHandler(&nsc)
return &nsc, nil
}
|
package qdn
import (
"bytes"
"errors"
"reflect"
"strconv"
"strings"
)
// Format returns the raw byte data in a more readable state for use in a text editor.
// Keep in mind that this uses considerable amounts of system resources,
// so its not adviseable for plain network transmissions
func Format(r []byte) ([]byte, error) {
var formatted []byte
var tabC int
const tab byte = byte('\t')
const nl byte = byte('\n')
for i := 0; i < len(r); i++ {
if r[i] == nl || r[i] == tab {
return r, errors.New("Bytes were already formatted")
}
if r[i] == byte('<') {
formatted = append(formatted, nl)
formatted = append(formatted, createTabS(tabC)...)
formatted = append(formatted, r[i], nl)
tabC++
formatted = append(formatted, createTabS(tabC)...)
continue
}
if r[i] == byte('>') {
formatted = append(formatted[:len(formatted)-1], r[i])
tabC--
continue
}
if r[i] == byte(',') {
formatted = append(formatted, r[i], nl)
formatted = append(formatted, createTabS(tabC)...)
continue
}
formatted = append(formatted, r[i])
}
return formatted, nil
}
func createTabS(tabC int) []byte {
var tabS []byte
for j := 0; j < tabC; j++ {
tabS = append(tabS, byte('\t'))
}
return tabS
}
// Unmarshal fills a given interface with the corresponding qdn byte data.
func Unmarshal(stru interface{}, data []byte) error {
if err := unmarshalInitErrors(stru, data); err != nil {
return err
}
count := reflect.TypeOf(stru).Elem().NumField()
if count < 1 {
return errors.New("qdn.Unmarshal error: The struct does not contain any fields")
}
s := reflect.ValueOf(stru).Elem()
var (
at int
err error
)
for i := 0; i < count; i++ {
if reflect.TypeOf(stru).Elem().Field(i).Type.Kind() == reflect.Struct {
c := bytes.Count(data[at+1:], []byte{byte('<')})
err = Unmarshal(s.Field(i).Addr().Interface(), data[at+1:2+at+allIndizes(data[at+1:], byte('>'))[c-1]])
at += allIndizes(data[at+1:], byte('>'))[c-1] + 2
if err != nil {
return err
}
continue
}
at, err = strToVal(s.Field(i), reflect.TypeOf(stru).Elem().Field(i).Type, data, at)
if err != nil {
return err
}
}
return nil
}
// Marshal turns the given struct into a raw byte array
func Marshal(stru interface{}) ([]byte, error) {
if reflect.TypeOf(stru).Kind() != reflect.Struct {
return []byte{}, errors.New("qdn.Marshal error: Provided parameter is no struct")
}
count := reflect.TypeOf(stru).NumField()
if count < 1 {
return []byte{}, errors.New("qdn.Marshal error: The struct does not contain any fields")
}
r := make([]byte, 0)
r = append(r, setupRaw(reflect.TypeOf(stru))...)
for i := 0; i < count; i++ {
if reflect.TypeOf(stru).Field(i).Type.Kind() == reflect.Struct {
raw, err := Marshal(reflect.ValueOf(stru).Field(i).Interface())
if err != nil {
return raw, err
}
r = append(r, raw...)
r = append(r, byte(','))
continue
}
r = append(r, fieldToRaw(reflect.ValueOf(stru).Field(i), reflect.TypeOf(stru).Field(i))...)
}
return append(r, byte('>')), nil
}
// allIndizes gets the indizes i of all occasions of a byte in a byte slice
func allIndizes(d []byte, b byte) []int {
var ins []int
for i, v := range d {
if v == b {
ins = append(ins, i)
}
}
return ins
}
// strToVal turns a string(of a raw slice) into the values for the struct
func strToVal(val reflect.Value, typ reflect.Type, data []byte, at int) (int, error) {
i := at + bytes.IndexRune(data[at:], '=')
at = i + bytes.IndexRune(data[i:], ',')
s := string(data[i+1 : at])
switch typ.Kind() {
case reflect.String:
val.SetString(s)
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return -1, errors.New("Conversion error: " + err.Error())
}
val.SetInt(n)
case reflect.Float32:
n, err := strconv.ParseFloat(s, 32)
if err != nil {
return -1, errors.New("Conversion error: " + err.Error())
}
val.SetFloat(n)
case reflect.Float64:
n, err := strconv.ParseFloat(s, 64)
if err != nil {
return -1, errors.New("Conversion error: " + err.Error())
}
val.SetFloat(n)
case reflect.Bool:
n, err := strconv.ParseBool(s)
if err != nil {
return -1, errors.New("Conversion error: " + err.Error())
}
val.SetBool(n)
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8:
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return -1, errors.New("Conversion error: " + err.Error())
}
val.SetUint(n)
case reflect.Complex128, reflect.Complex64:
r, err := strconv.ParseFloat(s[:strings.Index(s, ";")], 64)
if err != nil {
return -1, errors.New("Conversion error: " + err.Error())
}
im, err2 := strconv.ParseFloat(s[strings.Index(s, ";")+1:], 64)
if err2 != nil {
return -1, errors.New("Conversion error: " + err.Error())
}
val.SetComplex(complex(r, im))
}
return at, nil
}
// fieldToRaw turns the field name and the value into a raw slice
func fieldToRaw(val reflect.Value, typ reflect.StructField) []byte {
r := append([]byte(typ.Name), byte('='))
r = append(r, []byte(valToString(val, typ.Type))...)
return append(r, byte(','))
}
// valToString turns the actual value to a string
func valToString(val reflect.Value, typ reflect.Type) string {
switch typ.Kind() {
case reflect.String:
return val.String()
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
return strconv.FormatInt(val.Int(), 10)
case reflect.Float32:
return strconv.FormatFloat(val.Float(), byte('f'), -1, 32)
case reflect.Float64:
return strconv.FormatFloat(val.Float(), byte('f'), -1, 64)
case reflect.Bool:
return strconv.FormatBool(val.Bool())
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8:
return strconv.FormatUint(val.Uint(), 10)
case reflect.Complex128, reflect.Complex64:
return strconv.FormatFloat(real(val.Complex()), byte('f'), -1, 64) + ";" +
strconv.FormatFloat(imag(val.Complex()), byte('f'), -1, 64)
default:
return "type not found"
}
}
// checks for errors at the beginning of a Unmarshal function
func unmarshalInitErrors(stru interface{}, data []byte) error {
if reflect.TypeOf(stru).Kind() != reflect.Ptr {
return errors.New("qdn.Unmarshal error: Provided parameter is no pointer")
}
if reflect.ValueOf(stru).IsNil() {
return errors.New("qdn.Unmarshal error: Pointer is nil")
}
if !bytes.ContainsAny(data, reflect.TypeOf(stru).Elem().Name()) {
return errors.New("qdn.Unmarshal error: Missmatch: data does not represent the struct")
}
return nil
}
// initially sets up a struct raw slice
func setupRaw(struName reflect.Type) []byte {
return append([]byte(struName.Name()), byte('<'))
}
|
/*
* Copyright 2017 StreamSets Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package delay
import (
"github.com/streamsets/datacollector-edge/api"
"github.com/streamsets/datacollector-edge/api/validation"
"github.com/streamsets/datacollector-edge/container/common"
"github.com/streamsets/datacollector-edge/stages/stagelibrary"
"time"
)
const (
LIBRARY = "streamsets-datacollector-basic-lib"
STAGE_NAME = "com_streamsets_pipeline_stage_processor_delay_DelayProcessor"
VERSION = 1
)
type DelayProcessor struct {
*common.BaseStage
Delay float64 `ConfigDef:"type=NUMBER,required=true"`
}
func init() {
stagelibrary.SetCreator(LIBRARY, STAGE_NAME, func() api.Stage {
return &DelayProcessor{BaseStage: &common.BaseStage{}}
})
}
func (d *DelayProcessor) Init(stageContext api.StageContext) []validation.Issue {
return d.BaseStage.Init(stageContext)
}
func (d *DelayProcessor) Process(batch api.Batch, batchMaker api.BatchMaker) error {
time.Sleep(time.Duration(d.Delay) * time.Millisecond)
for _, record := range batch.GetRecords() {
batchMaker.AddRecord(record)
}
return nil
}
|
// problem 9.9
package chapter9
func consumePreorder(vs []string, st int) (*TreeNode, int) {
if vs[st] == "null" {
return nil, 1
} else {
elem := &TreeNode{Value: vs[st]}
left, lc := consumePreorder(vs, st+1)
elem.Left = left
right, rc := consumePreorder(vs, st+1+lc)
elem.Right = right
return elem, lc + rc + 1
}
}
func ReconstructPreorderWithNull(vs []string) *TreeNode {
root, _ := consumePreorder(vs, 0)
return root
}
|
package turnstile
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"log"
"net/http"
"sync"
"time"
"github.com/caddyserver/caddy"
"github.com/caddyserver/caddy/caddyhttp/httpserver"
)
// Turnstile is a Caddy middleware which records incoming traffic to a
// downstream Telegram bot.
type Turnstile struct {
collector Collector
next httpserver.Handler
wg *sync.WaitGroup
}
func init() {
caddy.RegisterPlugin("turnstile", caddy.Plugin{
ServerType: "http",
Action: setup,
})
}
func setup(c *caddy.Controller) error {
_ = c.Next() // skip directive name
var collectorName string
if ok := c.NextArg(); ok {
collectorName = c.Val()
} else {
return c.ArgErr()
}
var collector Collector
var err error
if collectorFactory, ok := collectors[collectorName]; ok {
collector, err = collectorFactory(&c.Dispenser)
if err != nil {
return err
}
} else {
return c.Errf(`turnstile: no such collector "%s"`, collectorName)
}
cfg := httpserver.GetConfig(c)
mid := func(next httpserver.Handler) httpserver.Handler {
return New(collector, next)
}
cfg.AddMiddleware(mid)
return nil
}
func (h Turnstile) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
if r.Method != http.MethodPost {
return h.next.ServeHTTP(w, r)
}
var buf bytes.Buffer
rdr := io.TeeReader(r.Body, &buf)
var update Update
err := json.NewDecoder(rdr).Decode(&update)
// restore the response body
// note: this will only contain whatever was read by the call to Decode
r.Body = ioutil.NopCloser(&buf)
if err != nil {
log.Printf("[WARNING] turnstile: error decoding json: %s", err)
return h.next.ServeHTTP(w, r)
}
r.Body.Close()
h.wg.Add(1)
go func(t time.Time, u Update) {
defer h.wg.Done()
if event := ExtractEvent(t, u); event != nil {
err := h.collector.Collect(*event)
if err != nil {
log.Printf("[ERROR] turnstile: error collecting event: %s", err)
}
}
}(time.Now(), update)
return h.next.ServeHTTP(w, r)
}
func New(c Collector, next httpserver.Handler) Turnstile {
return Turnstile{
collector: c,
next: next,
wg: new(sync.WaitGroup),
}
}
|
package flags
type Uint64 uint64
func (f Uint64) Add(b ...Uint64) Uint64 {
for i := 0; i < len(b); i++ {
f = f | b[i]
}
return f
}
func (f Uint64) Remove(b ...Uint64) Uint64 {
for i := 0; i < len(b); i++ {
f = f ^ b[i]
}
return f
}
func (f Uint64) Intersect(b ...Uint64) Uint64 {
s := Uint64(0).Add(b...)
return f & s
}
func (f Uint64) IsAny(b ...Uint64) bool {
return f.Intersect(b...) > 0
}
func (f Uint64) IsAll(b ...Uint64) bool {
return f.Intersect(b...) == f
}
|
package store
import (
"fmt"
"time"
)
type GetResult struct {
Err error
Value string
}
type Store interface {
// Get gets a value by key
// If the key is not found in the store, the Err is nil and Value is empty
Get(key string) GetResult
// MultiGet is a batch version of Get
MultiGet(keys []string) map[string] /*the-key*/ GetResult
// Set sets a value with key where the key exists or not
Set(key, value string) error
// Delete deletes a key/value pair from the store
Delete(key string) error
}
// Config contains the options for a storage client
type Config struct {
Timeout time.Duration
Addr string // The address of the backend store with "host:port" format
Password string // The authority password if necessarily
}
// The backend store name
const (
MEMCACHED string = "memcached"
REDIS string = "redis"
)
// StoreCreator is a function to create a new instance of Store
type StoreCreator func(Config) (Store, error)
var creators = make(map[string]StoreCreator)
// Register makes a store creator available by name.
func Register(name string, creator StoreCreator) {
if creator == nil {
panic("cache: Register adapter is nil")
}
if _, ok := creators[name]; ok {
panic("cache: Register called twice for adapter " + name)
}
creators[name] = creator
}
// NewStore creates a new store driver by store name and configurations.
func NewStore(name string, c Config) (db Store, err error) {
creator, ok := creators[name]
if !ok {
err = fmt.Errorf("unknown store name [%v] (forgot to import?)", name)
return
}
db, err = creator(c)
if err != nil {
db = nil
}
return db, err
}
|
package model
import (
"github.com/graphql-go/graphql"
"go.mongodb.org/mongo-driver/bson/primitive"
)
// Position represents a F1 Position
type Position struct {
ID primitive.ObjectID `json:"id" bson:"_id"`
DriverID string `json:"-" bson:"driverId"`
Driver Driver `json:"driver" bson:"-"`
Number int `json:"number" bson:"number"`
}
var PositionType *graphql.Object
func init() {
PositionType = graphql.NewObject(graphql.ObjectConfig{
Name: "Position",
Description: "A GP position for a single Driver",
Fields: graphql.Fields{
"id": &graphql.Field{
Name: "id",
Type: graphql.String,
Description: "Position's id",
},
"driver": &graphql.Field{
Name: "driver",
Type: DriverType,
Description: "The driver who got this position",
},
"number": &graphql.Field{
Name: "number",
Type: graphql.Int,
Description: "The position itself",
},
},
})
}
|
///////////////////////////////////////////////////////
// 리플렉션 사용하기
///////////////////////////////////////////////////////
//리플렉션은 실행 시점(Runtime, 런타임)에 인터페이스나 구조체 등의 타입 정보를 얻어내거나 결정하는 기능입니다.
/*
//간단하게 변수와 구조체의 타입을 표시해보겠습니다.
package main
import (
"fmt"
"reflect"
)
type Data struct { // 구조체 정의
a, b int
}
func main() {
var num int = 1
fmt.Println(reflect.TypeOf(num)) // int : reflect.TypeOf로 자료형 이름 출력
var s string = "Hello, world!"
fmt.Println(reflect.TypeOf(s)) // string : reflect.TypeOf로 자로형 이름 출력
var f float32 = 1.3
fmt.Println(reflect.TypeOf(f)) // float32: reflect.TypeOf로 자료형 이름 출력
var data Data = Data{1, 2}
fmt.Println(reflect.TypeOf(data)) // main.Data: reflect.TypeOf로 구조체 이름 출력
//int, string, float32 형 변수의 자료형이 출력됩니다.
//마찬가지로 구조체도 타입을 알아낼 수 있는데 Data 구조체는 main 패키지 안에 속해있기 때문에 main.Data로 나옵니다.
}
*/
/*
//리플렉션으로 변수의 타입뿐만 아니라 값에 대한 상세한 정보도 얻어올 수 있습니다.
package main
import (
"fmt"
"reflect"
)
func main() {
var f float64 = 1.3
t := reflect.TypeOf(f) // f의 타입 정보를 t에 저장
v := reflect.ValueOf(f) // f의 값 정보를 v에 저장
fmt.Println(t.Name()) // float64: 자로형 이름 출력
fmt.Println(t.Size()) // 8: 자료형 크기 출력
fmt.Println(t.Kind() == reflect.Float64) // true: 자로형 종류를 알아내어 reflect.Float64와 비교
fmt.Println(t.Kind() == reflect.Int64) // false: 자료형 종류를 알아내어 reflect.Int64와 비교
//reflect.ValueOf 함수로 float64 변수의 값 정보 reflect.Value를 얻어오면 타입 정보, 타입 종류, 변수에 저장된 값을 알 수 있습니다.
fmt.Println(v.Type()) // float64: 값이 담긴 변수의 자료형 이름 출력
fmt.Println(v.Kind() == reflect.Float64) // true: 값이 담긴 변수의 자료형 종류를 알아내어 reflect.Float64와 비교
fmt.Println(v.Kind() == reflect.Int64) // false: 값이 담긴 변수의 자료형 종류를 알아내어 reflect.Int64와 비교
//변수가 float64라면 v.Float(), int라면 v.Int(), string이라면 v.String()과 같이 각 타입에 맞는 함수를 사용하면 변수에 저장된 값을 가져올 수 있습니다.
fmt.Println(v.Float()) // 1.3: 값을 실수형으로 출력
}
*/
///////////////////////////////////////////////////////////////////////
// 구조체 태그 가져오기
/*
//다음과 같이 리플렉션으로 구조체의 태그도 가져올 수 있습니다.
package main
import (
"fmt"
"reflect"
)
type Person struct {
//구조체 필드의 태그는 `태그명:"내용"` 형식으로 지정합니다.
//태그는 문자열 형태이며 문자열 안에 " " (따옴표)가 포함되므로 ` ` (백쿼트)로 감싸줍니다.
//여러 개를 지정할 때는 공백으로 구분해줍니다.
name string 'tag1:"이름" tag2:"Name"' // 구조체에 태그 설정
age int 'tag1:"나이" tag2:"Age"' // 구조체에 태그 설정
}
func main() {
p := Person()
name, ok := reflect.TypeOf(p).FieldByName("name")
fmt.Println(ok, name.Tag.Get("tag1"), name.Tag.Get("tag2")) // true 이름 Name
age, ok := reflect.TypeOf(p).FieldByName("age")
fmt.Println(ok, age.Tag.Get("tag1"), age.Tag.Get("tag2")) // true 나이 Age
}
*/
////////////////////////////////////////////////////////////////////
// 포인터와 인터페이스의 값 가져오기
//다음은 일반 포인터와 인터페이스의 값을 가져오는 방법입니다.
package main
import (
"fmt"
"reflect"
)
func main() {
var a *int = new(int)
*a = 1
fmt.Println(reflect.TypeOf(a)) // *int
fmt.Println(reflect.ValueOf(a)) // <*int Value>
//int 포인터 a의 값 정보에서 바로 Int 함수로 값을 가져오려면 런타임 에러가 발생합니다.
//따라서 Elem 함수로 포인터의 메모리에 저장된 실제 값 정보를 가져와야 합니다.
//fmt.Println(reflect.ValueOf(a).Int()) // 런타임 에러
fmt.Println(reflect.ValueOf(a).Elem()) // 1: <int Value>
fmt.Println(reflect.ValueOf(a).Elem().Int()) // 1
fmt.Println()
var b interface{}
b = 1
//빈 인터페이스 b에 1을 대입하면 타입 정보는 int이고 값 정보는 int입니다.
fmt.Println(reflect.TypeOf(b)) // int
fmt.Println(reflect.ValueOf(b)) // 1: <int Value>
fmt.Println(reflect.ValueOf(b).Int()) // 1
//fmt.Println(reflect.ValueOf(b).Elem()) // 런타임 에러
}
|
package zfs
import (
"testing"
)
func TestDestroySnapshot(t *testing.T) {
err := DestroySnapshot(*testPool+"/tank2/tank1@zfs-auto-snap_daily-2019-06-05-1707")
if err != nil {
t.Log(err)
}
}
|
package order
type CreateOrderReq struct {
Quantity int `json:"quantity"`
ProductId int64 `json:"productId"`
UserId int64 `json:"userId"`
Fare float64 `json:"fare"`
DiscountAmt float64 `json:"discountAmt"`
}
|
package osinserver
import (
"github.com/RangelReale/osin"
mgostore "github.com/nguyenxuantuong/osin-mongo-storage"
"github.com/byrnedo/apibase/db/mongo/defaultmongo"
)
var Server *osin.Server
func init() {
config := osin.NewServerConfig()
sstorage := mgostore.NewOAuthStorage(defaultmongo.Conn(), "oauth_osin")
// MOVE THIS AND MAKE DYNAMIC
if _, err := sstorage.GetClient("test"); err != nil {
sstorage.SetClient("test", &osin.DefaultClient{
Id: "test",
Secret: "superSecret!",
RedirectUri: "http://localhost:14001/appauth",
})
}
Server = osin.NewServer(config, sstorage)
}
|
package web_service
import (
"2021/yunsongcailu/yunsong_server/web/web_dao"
"2021/yunsongcailu/yunsong_server/web/web_model"
)
type WebsiteServer interface {
GetWebsiteInfo() (websiteInfo web_model.WebsiteModel,err error)
}
type websiteServer struct {}
func NewWebsiteServer() WebsiteServer {
return &websiteServer{}
}
var wd = web_dao.NewWebsiteDao()
func (ws *websiteServer) GetWebsiteInfo() (websiteInfo web_model.WebsiteModel,err error) {
return wd.QueryWebsiteInfo()
} |
package main
import (
"fmt"
"strconv"
)
func part1(add int) string {
scoreboard := []int{3, 7}
elf1, elf2 := 0, 1
for len(scoreboard) < 10+add {
newScores := scoreboard[elf1] + scoreboard[elf2]
if newScores > 9 {
scoreboard = append(scoreboard, 1)
}
scoreboard = append(scoreboard, newScores%10)
elf1 = (elf1 + scoreboard[elf1] + 1) % len(scoreboard)
elf2 = (elf2 + scoreboard[elf2] + 1) % len(scoreboard)
}
rv := ""
for i := 0; i < 10; i++ {
rv += strconv.Itoa(scoreboard[i+add])
}
return rv
}
func slicesEqual(a, b []int) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
func part2(input int) int {
scoreboard := []int{3, 7}
elf1, elf2 := 0, 1
var seq []int
for input > 0 {
seq = append([]int{input % 10}, seq...)
input /= 10
}
for {
newScores := scoreboard[elf1] + scoreboard[elf2]
if newScores > 9 {
scoreboard = append(scoreboard, 1)
}
scoreboard = append(scoreboard, newScores%10)
elf1 = (elf1 + scoreboard[elf1] + 1) % len(scoreboard)
elf2 = (elf2 + scoreboard[elf2] + 1) % len(scoreboard)
if len(scoreboard) > len(seq) {
if slicesEqual(scoreboard[len(scoreboard)-len(seq):], seq) {
return len(scoreboard) - len(seq)
}
if newScores > 9 {
//fmt.Println(len(scoreboard), len(seq))
if slicesEqual(scoreboard[len(scoreboard)-len(seq)-1:len(scoreboard)-1], seq) {
return len(scoreboard) - len(seq) - 1
}
}
}
}
return 0
}
const INPUT = 793061
func main() {
fmt.Println(part1(INPUT))
fmt.Println(part2(INPUT))
}
|
/*
Write a program that takes in a string and spells that word out using the NATO Phonetic Alphabet.
The mapping is as follows:
'A' -> 'Alfa'
'B' -> 'Bravo'
'C' -> 'Charlie'
'D' -> 'Delta'
'E' -> 'Echo'
'F' -> 'Foxtrot'
'G' -> 'Golf'
'H' -> 'Hotel'
'I' -> 'India'
'J' -> 'Juliett'
'K' -> 'Kilo'
'L' -> 'Lima'
'M' -> 'Mike'
'N' -> 'November'
'O' -> 'Oscar'
'P' -> 'Papa'
'Q' -> 'Quebec'
'R' -> 'Romeo'
'S' -> 'Sierra'
'T' -> 'Tango'
'U' -> 'Uniform'
'V' -> 'Victor'
'W' -> 'Whiskey'
'X' -> 'Xray'
'Y' -> 'Yankee'
'Z' -> 'Zulu'
Example:
'Hello World' -> ['Hotel', 'Echo', 'Lima', 'Lima', 'Oscar', 'Whiskey', 'Oscar', 'Romeo', 'Lima', 'Delta']
The input can be any string, but will always be comprised of only letters and spaces. Case is irrelevant in the output, but the input may contain letters in uppercase, lowercase, or both. Spaces should be ignored in the output.
You can output in any reasonable format, but it must be a delimited set of NATO callsigns.
*/
package main
import (
"fmt"
"reflect"
"unicode"
)
func main() {
test("Hello World", []string{"Hotel", "Echo", "Lima", "Lima", "Oscar", "Whiskey", "Oscar", "Romeo", "Lima", "Delta"})
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func test(s string, r []string) {
p := nato(s)
fmt.Println(p)
assert(reflect.DeepEqual(p, r))
}
func nato(s string) []string {
m := map[rune]string{
'A': "Alfa",
'B': "Bravo",
'C': "Charlie",
'D': "Delta",
'E': "Echo",
'F': "Foxtrot",
'G': "Golf",
'H': "Hotel",
'I': "India",
'J': "Juliett",
'K': "Kilo",
'L': "Lima",
'M': "Mike",
'N': "November",
'O': "Oscar",
'P': "Papa",
'Q': "Quebec",
'R': "Romeo",
'S': "Sierra",
'T': "Tango",
'U': "Uniform",
'V': "Victor",
'W': "Whiskey",
'X': "Xray",
'Y': "Yankee",
'Z': "Zulu",
}
var p []string
for _, r := range s {
r = unicode.ToUpper(r)
if t, ok := m[r]; ok {
p = append(p, t)
}
}
return p
}
|
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package util
import (
"crypto/tls"
"fmt"
"net/http"
"time"
"github.com/vespa-engine/vespa/client/go/build"
)
type HTTPClient interface {
Do(request *http.Request, timeout time.Duration) (response *http.Response, error error)
UseCertificate(certificate []tls.Certificate)
}
type defaultHTTPClient struct {
client *http.Client
}
func (c *defaultHTTPClient) Do(request *http.Request, timeout time.Duration) (response *http.Response, error error) {
if c.client.Timeout != timeout { // Set wanted timeout
c.client.Timeout = timeout
}
if request.Header == nil {
request.Header = make(http.Header)
}
request.Header.Set("User-Agent", fmt.Sprintf("Vespa CLI/%s", build.Version))
return c.client.Do(request)
}
func (c *defaultHTTPClient) UseCertificate(certificates []tls.Certificate) {
c.client.Transport = &http.Transport{TLSClientConfig: &tls.Config{
Certificates: certificates,
}}
}
func CreateClient(timeout time.Duration) HTTPClient {
return &defaultHTTPClient{client: &http.Client{Timeout: timeout}}
}
|
package entity
import "time"
type Company struct {
ID int
Name string `gorm:"not null"`
BranchName string `gorm:"not null"`
PassWord string
Address string
Phone string
Describe string `gorm:"type:text"`
ThirdTradeNoPrefix string
AppId string `gorm:"type:varchar(64);unique;not null"`
DevelopKey string `gorm:"not null"`
DefaultSubNumber string `gorm:"not null"`
CreateTime time.Time `gorm:"type:datetime;not null"`
UpdateTime time.Time `gorm:"type:datetime;not null"`
}
|
package main
import "strings"
// 判断两个字符串排序之后是否相等
func isRegroup(s1, s2 string) bool {
sl1 :=len([]rune(s1))
sl2 :=len([]rune(s2))
if sl1 >5000 ||sl2 >5000 || sl1 !=sl2 {
return false
}
for _, v := range s1 {
if strings.Count(s1,string(v)) !=strings.Count(s2,string(v)) {
return false
}
}
return true
}
|
package application
import (
"github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/pkg/appfile/config"
"github.com/oam-dev/kubevela/pkg/dsl/process"
"github.com/oam-dev/kubevela/pkg/oam"
)
const (
// OAMApplicationLabel is application's metadata label
OAMApplicationLabel = "application.oam.dev"
)
// GenerateApplicationConfiguration converts an appFile to applicationConfig & Components
func (p *Parser) GenerateApplicationConfiguration(app *Appfile, ns string) (*v1alpha2.ApplicationConfiguration,
[]*v1alpha2.Component, error) {
appconfig := &v1alpha2.ApplicationConfiguration{}
appconfig.SetGroupVersionKind(v1alpha2.ApplicationConfigurationGroupVersionKind)
appconfig.Name = app.Name
appconfig.Namespace = ns
appconfig.Spec.Components = []v1alpha2.ApplicationConfigurationComponent{}
if appconfig.Labels == nil {
appconfig.Labels = map[string]string{}
}
appconfig.Labels[OAMApplicationLabel] = app.Name
var components []*v1alpha2.Component
for _, wl := range app.Workloads {
pCtx := process.NewContext(wl.Name)
userConfig := wl.GetUserConfigName()
if userConfig != "" {
cg := config.Configmap{Client: p.client}
// TODO(wonderflow): envName should not be namespace when we have serverside env
var envName = ns
data, err := cg.GetConfigData(config.GenConfigMapName(app.Name, wl.Name, userConfig), envName)
if err != nil {
return nil, nil, err
}
pCtx.SetConfigs(data)
}
if err := wl.EvalContext(pCtx); err != nil {
return nil, nil, err
}
for _, tr := range wl.Traits {
if err := tr.EvalContext(pCtx); err != nil {
return nil, nil, err
}
}
comp, acComp, err := evalWorkloadWithContext(pCtx, wl)
if err != nil {
return nil, nil, err
}
comp.Name = wl.Name
acComp.ComponentName = comp.Name
for _, sc := range wl.Scopes {
acComp.Scopes = append(acComp.Scopes, v1alpha2.ComponentScope{ScopeReference: v1alpha1.TypedReference{
APIVersion: sc.GVK.GroupVersion().String(),
Kind: sc.GVK.Kind,
Name: sc.Name,
}})
}
comp.Namespace = ns
if comp.Labels == nil {
comp.Labels = map[string]string{}
}
comp.Labels[OAMApplicationLabel] = app.Name
comp.SetGroupVersionKind(v1alpha2.ComponentGroupVersionKind)
components = append(components, comp)
appconfig.Spec.Components = append(appconfig.Spec.Components, *acComp)
}
return appconfig, components, nil
}
// evalWorkloadWithContext evaluate the workload's template to generate component and ACComponent
func evalWorkloadWithContext(pCtx process.Context, wl *Workload) (*v1alpha2.Component, *v1alpha2.ApplicationConfigurationComponent, error) {
base, assists := pCtx.Output()
componentWorkload, err := base.Unstructured()
if err != nil {
return nil, nil, err
}
workloadType := wl.Type
labels := componentWorkload.GetLabels()
if labels == nil {
labels = map[string]string{oam.WorkloadTypeLabel: workloadType}
} else {
labels[oam.WorkloadTypeLabel] = workloadType
}
componentWorkload.SetLabels(labels)
component := &v1alpha2.Component{}
component.Spec.Workload.Object = componentWorkload
acComponent := &v1alpha2.ApplicationConfigurationComponent{}
acComponent.Traits = []v1alpha2.ComponentTrait{}
for _, assist := range assists {
tr, err := assist.Ins.Unstructured()
if err != nil {
return nil, nil, err
}
tr.SetLabels(map[string]string{oam.TraitTypeLabel: assist.Type})
acComponent.Traits = append(acComponent.Traits, v1alpha2.ComponentTrait{
Trait: runtime.RawExtension{
Object: tr,
},
})
}
return component, acComponent, nil
}
|
//go:generate mockgen -destination=./mock/timer_mock.go github.com/nomkhonwaan/myblog/pkg/log Timer
package log
import "time"
// Timer is a compatible interface for retrieving current system date-time
type Timer interface {
// Return current system date-time
Now() time.Time
}
// DefaultTimer implements Timer interface which returns current system date-time from `time.Now()` function
type DefaultTimer time.Time
func (timer *DefaultTimer) Now() time.Time {
*timer = DefaultTimer(time.Now())
return time.Time(*timer)
}
// NewDefaultTimer returns new `DefaultTimer` pointer
func NewDefaultTimer() *DefaultTimer {
return new(DefaultTimer)
}
|
package dushengchen
type SliceStack struct {
top int
raw []int
}
func NewSliceStack(cap int) *SliceStack {
if cap <= 0 {
cap = 10
}
return &SliceStack{top: -1, raw: make([]int, 0, cap)}
}
func (s *SliceStack) Push(element int) {
s.top++
if s.top < len(s.raw) {
s.raw[s.top] = element
} else {
s.raw = append(s.raw, element)
}
}
func (s *SliceStack) GetTop() (element int, ok bool){
if s.top < 0 {
return 0, false
}
return s.raw[s.top], true
}
func (s *SliceStack) Pop() (element int, ok bool){
if s.top < 0 {
return 0, false
}
element = s.raw[s.top]
s.top--
ok = true
return
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"runtime"
"testing"
"github.com/mrled/caryatid/internal/util"
"github.com/mrled/caryatid/pkg/caryatid"
)
const integrationTestDirName = "integration_tests"
var (
_, thisfile, _, runtimeCallerOk = runtime.Caller(0)
thisdir, _ = path.Split(thisfile)
integrationTestDir = path.Join(thisdir, integrationTestDirName)
)
func TestMain(m *testing.M) {
var (
err error
keepFlag = flag.Bool("k", false, fmt.Sprintf("Keep the %v directory after running integration tests", integrationTestDir))
)
// Have to check this here because we can't put logic outside of a function
if !runtimeCallerOk {
panic("Failed to detect thisdir using runtime.Caller()")
}
fmt.Printf("Detected running the test directory as '%v'\n", thisdir)
err = os.MkdirAll(integrationTestDir, 0777)
if err != nil {
panic(fmt.Sprintf("Error trying to create test directory: %v\n", err))
}
testRv := m.Run()
// os.Exit() doesn't respect defer, so we can't have defered the call to os.RemoveAll() at creation time
if *keepFlag {
fmt.Printf("Will not remove integraion test dir after tests complete\n%v\n", integrationTestDir)
} else {
os.RemoveAll(integrationTestDir)
}
os.Exit(testRv)
}
func TestShowAction(t *testing.T) {
var (
err error
result string
boxName = "TestShowActionBox"
boxDesc = "TestShowActionBox Description"
catalogPath = path.Join(integrationTestDir, fmt.Sprintf("%v.json", boxName))
catalogUri = fmt.Sprintf("file://%v", catalogPath)
)
catalog := caryatid.Catalog{
boxName,
boxDesc,
[]caryatid.Version{
caryatid.Version{
"1.5.3",
[]caryatid.Provider{
caryatid.Provider{
"test-provider",
"test:///asdf/asdfqwer/something.box",
"FakeChecksum",
"0xDECAFBAD",
},
},
},
},
}
expectedCatalogString := `{TestShowActionBox TestShowActionBox Description [{1.5.3 [{test-provider test:///asdf/asdfqwer/something.box FakeChecksum 0xDECAFBAD}]}]}
`
jsonCatalog, err := json.MarshalIndent(catalog, "", " ")
if err != nil {
t.Fatalf("Error trying to marshal catalog: %v\n", err)
}
err = ioutil.WriteFile(catalogPath, jsonCatalog, 0666)
if err != nil {
t.Fatalf("Error trying to write catalog: %v\n", err)
}
result, err = showAction(catalogUri)
if err != nil {
t.Fatalf("showAction() error: %v\n", err)
}
if result != expectedCatalogString {
t.Fatalf("showAction() result was\n%v\nBut we expected it to be\n%v\nSad times :(", result, expectedCatalogString)
}
}
func TestCreateTestBoxAction(t *testing.T) {
var (
err error
boxPath = path.Join(integrationTestDir, "TestCreateTestBoxAction.box")
)
err = createTestBoxAction(boxPath, "TestProvider")
if err != nil {
t.Fatalf("createTestBoxAction() failed with error: %v\n", err)
}
}
func TestAddAction(t *testing.T) {
type ExpectedMatch struct {
Name string
In string
Out string
}
var (
err error
catalogBytes []byte
catalog caryatid.Catalog
expectedMatches []ExpectedMatch
boxPath = path.Join(integrationTestDir, "incoming-TestAddAction.box")
boxProvider = "TestAddActionProvider"
boxName = "TestAddActionBox"
boxDesc = "TestAddActionBox is a test box"
boxVersion = "1.6.3"
boxVersion2 = "2.0.1"
catalogPath = path.Join(integrationTestDir, fmt.Sprintf("%v.json", boxName))
catalogUri = fmt.Sprintf("file://%v", catalogPath)
)
if err = caryatid.CreateTestBoxFile(boxPath, boxProvider, true); err != nil {
t.Fatalf("TestAddAction(): Error trying to create test box file: %v\n", err)
}
// Test adding to an empty catalog
err = addAction(boxPath, boxName, boxDesc, boxVersion, catalogUri)
if err != nil {
t.Fatalf("addAction() failed with error: %v\n", err)
}
catalogBytes, err = ioutil.ReadFile(catalogPath)
if err != nil {
t.Fatalf("Could not read catalog we just created at '%v'\n", catalogPath)
}
if err = json.Unmarshal(catalogBytes, &catalog); err != nil {
t.Fatalf("Error trying to marshal the catalog: %v\n", err)
}
expectedMatches = []ExpectedMatch{
ExpectedMatch{"catalog name", catalog.Name, boxName},
ExpectedMatch{"catalog description", catalog.Description, boxDesc},
ExpectedMatch{"box provider", catalog.Versions[0].Providers[0].Name, boxProvider},
ExpectedMatch{"box version", catalog.Versions[0].Version, boxVersion},
}
for _, match := range expectedMatches {
if match.In != match.Out {
t.Fatalf("Expected %v to match, but the expected value was %v while the actual value was %v", match.Name, match.In, match.Out)
}
}
// Test adding another box to the same, now non-empty, catalog
err = addAction(boxPath, boxName, boxDesc, boxVersion2, catalogUri)
if err != nil {
t.Fatalf("addAction() failed with error: %v\n", err)
}
catalogBytes, err = ioutil.ReadFile(catalogPath)
if err != nil {
t.Fatalf("Could not read catalog we just created at '%v'\n", catalogPath)
}
if err = json.Unmarshal(catalogBytes, &catalog); err != nil {
t.Fatalf("Error trying to marshal the catalog: %v\n", err)
}
expectedMatches = []ExpectedMatch{
ExpectedMatch{"catalog name", catalog.Name, boxName},
ExpectedMatch{"catalog description", catalog.Description, boxDesc},
ExpectedMatch{"box provider", catalog.Versions[1].Providers[0].Name, boxProvider},
ExpectedMatch{"box version", catalog.Versions[1].Version, boxVersion2},
}
for _, match := range expectedMatches {
if match.In != match.Out {
t.Fatalf("Expected %v to match, but the expected value was %v while the actual value was %v", match.Name, match.In, match.Out)
}
}
}
func TestQueryAction(t *testing.T) {
var (
err error
result caryatid.Catalog
boxProvider1 = "StrongSapling"
boxProvider2 = "FeebleFungus"
boxPath1 = path.Join(integrationTestDir, "incoming-TestQueryActionBox-1.box")
boxPath2 = path.Join(integrationTestDir, "incoming-TestQueryActionBox-2.box")
boxVersions1 = []string{"0.3.5", "0.3.5-BETA", "1.0.0", "1.0.0-PRE", "1.4.5", "1.2.3", "1.2.4"}
boxVersions2 = []string{"0.3.4", "0.3.5-BETA", "1.0.1", "2.0.0", "2.10.0", "2.11.1", "1.2.3"}
boxName = "TestQueryActionBox"
boxDesc = "TestQueryActionBox is a test box"
catalogUri = fmt.Sprintf("file://%v/%v.json", integrationTestDir, boxName)
digestType = "TestQueryActionDigestType"
digest = "0xB00B1E5"
)
// Set up manager
manager, err := getManager(catalogUri)
if err != nil {
log.Printf("Error getting a BackendManager")
return
}
// Create the *input* boxes - that is, boxes that would come from packer-post-processor-vagrant
if err = caryatid.CreateTestBoxFile(boxPath1, boxProvider1, true); err != nil {
t.Fatalf("TestAddAction(): Error trying to create test box file: %v\n", err)
}
if err = caryatid.CreateTestBoxFile(boxPath2, boxProvider2, true); err != nil {
t.Fatalf("TestAddAction(): Error trying to create test box file: %v\n", err)
}
// Now copy those boxes multiple times to the Catalog,
// as if they were different versions each time
for _, version := range boxVersions1 {
if err = manager.AddBox(boxPath1, boxName, boxDesc, version, boxProvider1, digestType, digest); err != nil {
t.Fatalf("Error adding box metadata to catalog: %v\n", err)
return
}
}
for _, version := range boxVersions2 {
if err = manager.AddBox(boxPath2, boxName, boxDesc, version, boxProvider2, digestType, digest); err != nil {
t.Fatalf("Error adding box metadata to catalog: %v\n", err)
return
}
}
type TestCase struct {
VersionQuery string
ProviderQuery string
ExpectedResult caryatid.Catalog
}
testCases := []TestCase{
TestCase{ // Expect all items in catalog
"", "",
caryatid.Catalog{boxName, boxDesc, []caryatid.Version{
caryatid.Version{"0.3.5", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
caryatid.Version{"0.3.5-BETA", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
caryatid.Provider{boxProvider2, "FAKEURI", digestType, digest},
}},
caryatid.Version{"1.0.0", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
caryatid.Version{"1.0.0-PRE", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
caryatid.Version{"1.4.5", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
caryatid.Version{"1.2.3", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
caryatid.Provider{boxProvider2, "FAKEURI", digestType, digest},
}},
caryatid.Version{"1.2.4", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
caryatid.Version{"0.3.4", []caryatid.Provider{
caryatid.Provider{boxProvider2, "FAKEURI", digestType, digest},
}},
caryatid.Version{"1.0.1", []caryatid.Provider{
caryatid.Provider{boxProvider2, "FAKEURI", digestType, digest},
}},
caryatid.Version{"2.0.0", []caryatid.Provider{
caryatid.Provider{boxProvider2, "FAKEURI", digestType, digest},
}},
caryatid.Version{"2.10.0", []caryatid.Provider{
caryatid.Provider{boxProvider2, "FAKEURI", digestType, digest},
}},
caryatid.Version{"2.11.1", []caryatid.Provider{
caryatid.Provider{boxProvider2, "FAKEURI", digestType, digest},
}},
}},
},
TestCase{
"", "rongSap",
caryatid.Catalog{boxName, boxDesc, []caryatid.Version{
caryatid.Version{"0.3.5", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
caryatid.Version{"0.3.5-BETA", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
caryatid.Version{"1.0.0", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
caryatid.Version{"1.0.0-PRE", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
caryatid.Version{"1.4.5", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
caryatid.Version{"1.2.3", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
caryatid.Version{"1.2.4", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
}},
},
TestCase{
"<1", "",
caryatid.Catalog{boxName, boxDesc, []caryatid.Version{
caryatid.Version{"0.3.5", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
caryatid.Version{"0.3.5-BETA", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
caryatid.Provider{boxProvider2, "FAKEURI", digestType, digest},
}},
caryatid.Version{"0.3.4", []caryatid.Provider{
caryatid.Provider{boxProvider2, "FAKEURI", digestType, digest},
}},
}},
},
TestCase{
"<1", ".*rongSap.*",
caryatid.Catalog{boxName, boxDesc, []caryatid.Version{
caryatid.Version{"0.3.5", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
caryatid.Version{"0.3.5-BETA", []caryatid.Provider{
caryatid.Provider{boxProvider1, "FAKEURI", digestType, digest},
}},
}},
},
}
fuzzyEqualsParams := caryatid.CatalogFuzzyEqualsParams{SkipProviderUrl: true}
for _, tc := range testCases {
// Join the array into a multi-line string, and add a trailing newline
result, err = queryAction(catalogUri, tc.VersionQuery, tc.ProviderQuery)
if err != nil {
t.Fatalf("queryAction(*, *, '%v', '%v') returned an unexpected error: %v\n", tc.VersionQuery, tc.ProviderQuery, err)
} else if !result.FuzzyEquals(&tc.ExpectedResult, fuzzyEqualsParams) {
t.Fatalf(
"queryAction(*, *, '%v', '%v') returned result:\n%v\nBut we expected:\n%v\n",
tc.VersionQuery, tc.ProviderQuery, result.DisplayString(), tc.ExpectedResult.DisplayString())
}
}
}
func TestDeleteAction(t *testing.T) {
var (
err error
result caryatid.Catalog
boxProvider1 = "StrongSapling"
boxProvider2 = "FeebleFungus"
boxPath1 = path.Join(integrationTestDir, "incoming-TestDeleteActionBox-1.box")
boxPath2 = path.Join(integrationTestDir, "incoming-TestDeleteActionBox-2.box")
boxVersions1 = []string{"0.3.5", "0.3.5-BETA", "1.0.0"}
boxVersions2 = []string{"0.3.4", "0.3.5-BETA", "1.0.1"}
boxName = "TestDeleteActionBox"
boxDesc = "this is a test box"
digestType = "TDABDType"
digest = "0xB00B1E5"
)
// Create the *input* boxes - that is, boxes that would come from packer-post-processor-vagrant
if err = caryatid.CreateTestBoxFile(boxPath1, boxProvider1, true); err != nil {
t.Fatalf("TestAddAction(): Error trying to create test box file: %v\n", err)
}
if err = caryatid.CreateTestBoxFile(boxPath2, boxProvider2, true); err != nil {
t.Fatalf("TestAddAction(): Error trying to create test box file: %v\n", err)
}
type ExpectedFile struct {
Name string
Exists bool
}
type TestCase struct {
VersionQuery string
ProviderQuery string
ExpectedResult caryatid.Catalog
ExpectedFiles []ExpectedFile
}
boxVersions1 = []string{"0.3.5", "0.3.5-BETA", "1.0.0"}
boxVersions2 = []string{"0.3.4", "0.3.5-BETA", "1.0.1"}
testCases := []TestCase{
TestCase{
"", "",
caryatid.Catalog{Name: boxName, Description: boxDesc, Versions: []caryatid.Version{}},
[]ExpectedFile{
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.5", boxProvider1), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.5-BETA", boxProvider1), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "1.0.0", boxProvider1), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.4", boxProvider2), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.5-BETA", boxProvider2), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "1.0.1", boxProvider2), Exists: false},
},
},
TestCase{
"", "rongSap",
caryatid.Catalog{Name: boxName, Description: boxDesc, Versions: []caryatid.Version{
caryatid.Version{Version: "0.3.5-BETA", Providers: []caryatid.Provider{
caryatid.Provider{Name: boxProvider2, Url: "FAKEURI", ChecksumType: digestType, Checksum: digest},
}},
caryatid.Version{Version: "0.3.4", Providers: []caryatid.Provider{
caryatid.Provider{Name: boxProvider2, Url: "FAKEURI", ChecksumType: digestType, Checksum: digest},
}},
caryatid.Version{Version: "1.0.1", Providers: []caryatid.Provider{
caryatid.Provider{Name: boxProvider2, Url: "FAKEURI", ChecksumType: digestType, Checksum: digest},
}},
}},
[]ExpectedFile{
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.5", boxProvider1), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.5-BETA", boxProvider1), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "1.0.0", boxProvider1), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.4", boxProvider2), Exists: true},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.5-BETA", boxProvider2), Exists: true},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "1.0.1", boxProvider2), Exists: true},
},
},
TestCase{
"<1", "",
caryatid.Catalog{Name: boxName, Description: boxDesc, Versions: []caryatid.Version{
caryatid.Version{Version: "1.0.0", Providers: []caryatid.Provider{
caryatid.Provider{Name: boxProvider1, Url: "FAKEURI", ChecksumType: digestType, Checksum: digest},
}},
caryatid.Version{Version: "1.0.1", Providers: []caryatid.Provider{
caryatid.Provider{Name: boxProvider2, Url: "FAKEURI", ChecksumType: digestType, Checksum: digest},
}},
}},
[]ExpectedFile{
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.5", boxProvider1), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.5-BETA", boxProvider1), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "1.0.0", boxProvider1), Exists: true},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.4", boxProvider2), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.5-BETA", boxProvider2), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "1.0.1", boxProvider2), Exists: true},
},
},
TestCase{
"<1", ".*rongSap.*",
caryatid.Catalog{Name: boxName, Description: boxDesc, Versions: []caryatid.Version{
caryatid.Version{Version: "0.3.5-BETA", Providers: []caryatid.Provider{
caryatid.Provider{Name: boxProvider2, Url: "FAKEURI", ChecksumType: digestType, Checksum: digest},
}},
caryatid.Version{Version: "1.0.0", Providers: []caryatid.Provider{
caryatid.Provider{Name: boxProvider1, Url: "FAKEURI", ChecksumType: digestType, Checksum: digest},
}},
caryatid.Version{Version: "0.3.4", Providers: []caryatid.Provider{
caryatid.Provider{Name: boxProvider2, Url: "FAKEURI", ChecksumType: digestType, Checksum: digest},
}},
caryatid.Version{Version: "1.0.1", Providers: []caryatid.Provider{
caryatid.Provider{Name: boxProvider2, Url: "FAKEURI", ChecksumType: digestType, Checksum: digest},
}},
}},
[]ExpectedFile{
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.5", boxProvider1), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.5-BETA", boxProvider1), Exists: false},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "1.0.0", boxProvider1), Exists: true},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.4", boxProvider2), Exists: true},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "0.3.5-BETA", boxProvider2), Exists: true},
ExpectedFile{Name: fmt.Sprintf("%v_%v_%v.box", boxName, "1.0.1", boxProvider2), Exists: true},
},
},
}
for idx, tc := range testCases {
catalogRootPath := fmt.Sprintf("%v/%v_%v", integrationTestDir, boxName, idx)
if err = os.MkdirAll(catalogRootPath, 0700); err != nil {
t.Fatalf("Error creating catalogRootPath: %v\n", err)
}
catalogUri := fmt.Sprintf("file://%v/%v.json", catalogRootPath, boxName)
// Set up manager. Do this separately each time so we can use a different catalogUri
manager, err := getManager(catalogUri)
if err != nil {
t.Fatalf("Error getting a BackendManager: %v\n", err)
}
// Now copy those boxes multiple times to the Catalog,
// as if they were different versions each time
for _, version := range boxVersions1 {
if err = manager.AddBox(boxPath1, boxName, boxDesc, version, boxProvider1, digestType, digest); err != nil {
t.Fatalf("Error adding box metadata to catalog: %v\n", err)
return
}
}
for _, version := range boxVersions2 {
if err = manager.AddBox(boxPath2, boxName, boxDesc, version, boxProvider2, digestType, digest); err != nil {
t.Fatalf("Error adding box metadata to catalog: %v\n", err)
return
}
}
if err = deleteAction(catalogUri, tc.VersionQuery, tc.ProviderQuery); err != nil {
t.Fatalf("deleteAction(*, *, '%v', '%v') returned an unexpected error: %v\n", tc.VersionQuery, tc.ProviderQuery, err)
}
fuzzyEqualsParams := caryatid.CatalogFuzzyEqualsParams{SkipProviderUrl: true, LogMismatch: true}
if result, err = queryAction(catalogUri, "", ""); err != nil {
t.Fatalf("queryAction(*, *, '%v', '%v') returned an unexpected error: %v\n", tc.VersionQuery, tc.ProviderQuery, err)
} else if !result.FuzzyEquals(&tc.ExpectedResult, fuzzyEqualsParams) {
t.Fatalf(
"queryAction(*, *, '%v', '%v') returned result:\n%v\nBut we expected:\n%v\n",
tc.VersionQuery, tc.ProviderQuery, result.DisplayString(), tc.ExpectedResult.DisplayString())
}
for _, ef := range tc.ExpectedFiles {
existStr := "exist"
if !ef.Exists {
existStr = "not exist"
}
efPath := path.Join(catalogRootPath, boxName, ef.Name)
if util.PathExists(efPath) != ef.Exists {
t.Fatalf("Expected path '%v' to %v, but found the reverse\n", efPath, existStr)
}
}
}
}
|
package graph
import (
"bufio"
"errors"
"os"
"strconv"
"strings"
)
func LoadGraphFromFile(graphtype, filename string) interface{} {
// ioutil.ReadFile() O_RDONLY
path := "G:\\Code\\goAlgorithms\\src\\algorithms\\graph\\" + filename
file, err := os.OpenFile(path, os.O_RDWR|os.O_APPEND, 0666)
if err != nil {
panic(err)
}
defer file.Close()
var line []byte
reader := bufio.NewReader(file)
line, err = reader.ReadBytes('\n')
s := strings.Split(string(line), " ")
vertexs, _ := strconv.Atoi(s[0])
edges, _ := strconv.Atoi(strings.Replace(s[1], "\r\n", "", -1))
var graph1 *AdjacencyMatrix
var graph2 *AdjacencyList
if graphtype == "DenseGraph" {
graph1 = NewDenseGraph(vertexs, false)
for i := 0; i < edges; i++ {
line, err = reader.ReadBytes('\n')
s := strings.Split(string(line), " ")
v1, _ := strconv.Atoi(s[0])
v2, _ := strconv.Atoi(strings.Replace(s[1], "\r\n", "", -1))
graph1.AddEdge(v1, v2)
}
} else if graphtype == "SparseGraph" {
graph2 = NewSparseGraph(vertexs, false)
for i := 0; i < edges; i++ {
line, err = reader.ReadBytes('\n')
s := strings.Split(string(line), " ")
v1, _ := strconv.Atoi(s[0])
v2, _ := strconv.Atoi(strings.Replace(s[1], "\r\n", "", -1))
graph2.AddEdge(v1, v2)
}
} else {
return errors.New("wrong graph type!")
}
if graphtype == "DenseGraph" {
return graph1
}
return graph2
}
|
/*
You have three stacks of cylinders where each cylinder has the same diameter, but they may vary in height. You can change the height of a stack by removing and discarding its topmost cylinder any number of times.
Find the maximum possible height of the stacks such that all of the stacks are exactly the same height. This means you must remove zero or more cylinders from the top of zero or more of the three stacks until they are all the same height, then return the height.
Example
h1 = [1, 2, 1, 1]
h2 = [1, 1, 2]
h3 = [1, 1]
There are 4, 3 and 2 cylinders in the three stacks, with their heights in the three arrays. Remove the top 2 cylinders from h1 (heights = [1, 2]) and from h2 (heights = [1, 1]) so that the three stacks all are 2 units tall. Return 2 as the answer.
Note: An empty stack is still a stack.
Function Description
Complete the equalStacks function in the editor below.
equalStacks has the following parameters:
int h1[n1]: the first array of heights
int h2[n2]: the second array of heights
int h3[n3]: the third array of heights
Returns
int: the height of the stacks when they are equalized
Input Format
The first line contains three space-separated integers, n1, n2, and n3, the numbers of cylinders in stacks 1, 2, and 3. The subsequent lines describe the respective heights of each cylinder in a stack from top to bottom:
The second line contains n1 space-separated integers, the cylinder heights in stack 1. The first element is the top cylinder of the stack.
The third line contains n2 space-separated integers, the cylinder heights in stack 2. The first element is the top cylinder of the stack.
The fourth line contains n3 space-separated integers, the cylinder heights in stack 3. The first element is the top cylinder of the stack.
Constraints
0 < n1, n2, n3 <= 10^5
0 < height of any cylinder <= 100
Sample Input
STDIN Function
----- --------
5 3 4 h1[] size n1 = 5, h2[] size n2 = 3, h3[] size n3 = 4
3 2 1 1 1 h1 = [3, 2, 1, 1, 1]
4 3 2 h2 = [4, 3, 2]
1 1 4 1 h3 = [1, 1, 4, 1]
Sample Output
5
*/
package main
func main() {
assert(equalstacks([]int{1, 2, 1, 1}, []int{1, 1, 2}, []int{1, 1}) == 2)
assert(equalstacks([]int{3, 2, 1, 1, 1}, []int{4, 3, 2}, []int{1, 1, 4, 1}) == 5)
assert(equalstacks([]int{3}, []int{2}, []int{1}) == 0)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func equalstacks(h1, h2, h3 []int) int {
t1, t2, t3 := sum(h1), sum(h2), sum(h3)
for !(t1 == t2 && t2 == t3) {
var (
t *int
h *[]int
)
switch {
case t3 >= t2 && t3 >= t1:
t, h = &t3, &h3
case t2 >= t3 && t2 >= t1:
t, h = &t2, &h2
case t1 >= t3 && t1 >= t2:
t, h = &t1, &h1
}
if h == nil || len(*h) == 0 {
return -1
}
*t -= (*h)[0]
*h = (*h)[1:]
}
return t1
}
func sum(a []int) int {
r := 0
for _, v := range a {
r += v
}
return r
}
|
// +build appengine
package tokbox
import (
"golang.org/x/net/context"
"google.golang.org/appengine/urlfetch"
"net/http"
)
func client(ctx context.Context) *http.Client {
if ctx == nil {
return &http.Client{}
} else {
return urlfetch.Client(ctx)
}
}
|
package requests
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"github.com/iamtraining/go-github-issue-tool/editor"
"github.com/iamtraining/go-github-issue-tool/entity"
)
const (
repoUser = "https://api.github.com/repos/%s/%s/issues"
issueNum = repoUser + "/%s"
)
func sendRequest(oauth, method, url string, issue *entity.Issue) (*entity.Issue, error) {
marshal, err := json.Marshal(issue)
if err != nil {
return nil, fmt.Errorf("failure while: %w", err)
}
req, err := http.NewRequest(method, url, bytes.NewBuffer(marshal))
req.Header.Add("Accept", "application/vnd.github.v3+json")
req.Header.Add("Authorization", fmt.Sprintf("token %s", oauth))
if err != nil {
return nil, fmt.Errorf("creating request failure: %w", err)
}
client := http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("error while sending request %w", err)
}
defer resp.Body.Close()
if err = json.NewDecoder(resp.Body).Decode(&issue); err != nil {
return nil, fmt.Errorf("decode failure: %w", err)
}
return issue, nil
}
func Create(oauth, user, repo string) {
issue, err := editor.CreateIssue()
if err != nil {
fmt.Println(err)
}
result, err := sendRequest(oauth, "POST", fmt.Sprintf(repoUser, user, repo), issue)
if err != nil {
fmt.Println(err)
}
fmt.Println(result.String())
}
func Read(oauth, user, repo, number string) {
result, err := sendRequest(oauth, "GET", fmt.Sprintf(issueNum, user, repo, number), nil)
if err != nil {
fmt.Println(err)
}
fmt.Println(result.String())
}
func Update(oauth, user, repo, number string) {
result, err := sendRequest(oauth, "GET", fmt.Sprintf(issueNum, user, repo, number), nil)
if err != nil {
fmt.Println(err)
}
issue, err := editor.EditIssue(result)
result, err = sendRequest(oauth, "PATCH", fmt.Sprintf(issueNum, user, repo, number), issue)
if err != nil {
fmt.Println(err)
}
fmt.Println(result.String())
}
func UpdateState(oauth, user, repo, number, state string) {
switch state {
case "closed", "open":
issue := entity.Issue{
State: state,
}
result, err := sendRequest(oauth, "PATCH", fmt.Sprintf(issueNum, user, repo, number), &issue)
if err != nil {
fmt.Println(err)
}
fmt.Println(result.String())
default:
return
}
}
|
package main
func isSamePosition(head *Coordinates, tail *Coordinates) bool {
return head.X == tail.X && head.Y == tail.Y
}
func isSameRow(head *Coordinates, tail *Coordinates) bool {
return head.Y == tail.Y
}
func isRightEdge(grid *[][]int, head *Coordinates) bool {
return head.X >= len(*grid)-1
}
func isLeftEdge(grid *[][]int, head *Coordinates) bool {
return head.X == 0
}
func isTopEdge(grid *[][]int, head *Coordinates) bool {
return head.Y >= len(*grid)-1
// return head.Y == 0
}
func isBottomEdge(grid *[][]int, head *Coordinates) bool {
return head.Y == 0
}
func isAdjacent(head *Coordinates, tail *Coordinates) bool {
xDiff := head.X - tail.X
yDiff := head.Y - tail.Y
return isLessThanEqualOne(xDiff) && isLessThanEqualOne(yDiff)
}
func isLessThanEqualOne(value int) bool {
return value <= 1 && value >= -1
}
func isAdjacentVertical(head *Coordinates, tail *Coordinates) bool {
return isLessThanEqualOne(head.Y - tail.Y)
}
func isAdjacentHorizontal(head *Coordinates, tail *Coordinates) bool {
return isLessThanEqualOne(head.X - tail.Y)
}
func isAdjacentAngular(head *Coordinates, tail *Coordinates) bool {
return isAdjacent(head, tail) &&
!isAdjacentHorizontal(head, tail) &&
!isAdjacentVertical(head, tail)
}
|
package main
import (
"demo/grpc_test/cmd/server/service"
"demo/grpc_test/proto/chat"
"demo/grpc_test/proto/helloworld"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"log"
"net"
)
const (
port = ":50051"
)
func main() {
listen, err := net.Listen("tcp", port)
if err != nil {
log.Panic(err)
}
serv := grpc.NewServer()
chat.RegisterChatServer(serv, &service.ChatStreamer{})
helloworld.RegisterGreeterServer(serv, &service.GreeterServer{})
helloworld.RegisterMathServiceServer(serv, &service.MathServer{})
reflection.Register(serv)
if err = serv.Serve(listen); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
|
package 字符串
func firstUniqChar(s string) byte {
charStorage := NewCharStorage()
return byte(charStorage.GetFirstUniqueChar(s))
}
// --------- CharStorage ---------
type CharStorage struct {
charAppearingTimes map[rune]int
returnCharWhenUniqueCharDoesExist rune
sample string
}
func NewCharStorage() *CharStorage {
return &CharStorage{
returnCharWhenUniqueCharDoesExist: ' ',
}
}
func (sv *CharStorage) GetFirstUniqueChar(s string) rune {
sv.sample = s
sv.formCharAppearingTimes()
return sv.findOutFirstUniqueChar()
}
func (sv *CharStorage) formCharAppearingTimes() {
sv.charAppearingTimes = make(map[rune]int)
for _, char := range sv.sample {
sv.charAppearingTimes[char]++
}
}
func (sv *CharStorage) findOutFirstUniqueChar() rune {
for _, char := range sv.sample {
if sv.isCharUnique(char) {
return char
}
}
return sv.returnCharWhenUniqueCharDoesExist
}
func (sv *CharStorage) isCharUnique(char rune) bool {
return sv.charAppearingTimes[char] == 1
}
/*
题目链接: https://leetcode-cn.com/problemset/all/
总结:
1. 个人感觉这个代码不需要重构,因为原来的代码就很短了...这一重构,虽然职责分明了,
但是代码量增长了4倍...
2. 重构使得代码可重用性增强了
3. 当功能单一的时候,重构的必要性不是那么强。
(比如从命名方面来说,CharStorage其实不够准确,准确的应该是 StringHandler,而它们的关系是:StringHandler包含 CharStorage 了)
*/
|
// Copyright 2021 PingCAP, Inc. Licensed under Apache-2.0.
package export
import (
"fmt"
"testing"
"github.com/pingcap/tidb/br/pkg/version"
"github.com/stretchr/testify/require"
)
func TestRepeatableRead(t *testing.T) {
data := [][]interface{}{
{version.ServerTypeUnknown, ConsistencyTypeNone, true},
{version.ServerTypeMySQL, ConsistencyTypeFlush, true},
{version.ServerTypeMariaDB, ConsistencyTypeLock, true},
{version.ServerTypeTiDB, ConsistencyTypeNone, true},
{version.ServerTypeTiDB, ConsistencyTypeSnapshot, false},
{version.ServerTypeTiDB, ConsistencyTypeLock, true},
}
dec := func(d []interface{}) (version.ServerType, string, bool) {
return version.ServerType(d[0].(int)), d[1].(string), d[2].(bool)
}
for tag, datum := range data {
serverTp, consistency, expectRepeatableRead := dec(datum)
comment := fmt.Sprintf("test case number: %d", tag)
rr := needRepeatableRead(serverTp, consistency)
require.True(t, rr == expectRepeatableRead, comment)
}
}
func TestInfiniteChan(t *testing.T) {
in, out := infiniteChan[int]()
go func() {
for i := 0; i < 10000; i++ {
in <- i
}
}()
for i := 0; i < 10000; i++ {
j := <-out
require.Equal(t, i, j)
}
close(in)
}
|
// We often want to execute Go code at some point in the
// future, or repeatedly at some interval. Go's built-in
// _timer_ and _ticker_ features make both of these tasks
// easy. We'll look first at timers and then
// at [tickers](tickers).
package main
import "time"
import "fmt"
func main() {
timerExist := false
timer := time.NewTimer(3*time.Second)
for {
if !timerExist {
timer.Reset(3*time.Second)
timerExist = true
}
select {
case <-timer.C:
fmt.Println("we waited 3 seconds")
timerExist = false
default:
}
fmt.Println("SPAM")
time.Sleep(200*time.Millisecond)
}
}
|
package aggregate
import (
"time"
"github.com/XiaoMi/pegasus-go-client/idl/admin"
"github.com/XiaoMi/pegasus-go-client/idl/base"
)
// PartitionStats is a set of metrics retrieved from this partition.
type PartitionStats struct {
Gpid base.Gpid
// Address of the replica node where this partition locates.
Addr string
// perfCounter's name -> the value.
Stats map[string]float64
}
// TableStats has the aggregated metrics for this table.
type TableStats struct {
TableName string
AppID int
Partitions map[int]*PartitionStats
// the time when the stats was generated
Timestamp time.Time
// The aggregated value of table metrics.
// perfCounter's name -> the value.
Stats map[string]float64
}
// ClusterStats is the aggregated metrics for all the TableStats in this cluster.
// For example, 3 tables with "write_qps" [25, 70, 100] are summed up to
// `Stats: {"write_qps" : 195}`.
type ClusterStats struct {
Timestamp time.Time
Stats map[string]float64
}
func newTableStats(info *admin.AppInfo) *TableStats {
tb := &TableStats{
TableName: info.AppName,
AppID: int(info.AppID),
Partitions: make(map[int]*PartitionStats),
Stats: make(map[string]float64),
Timestamp: time.Now(),
}
for i := 0; i < int(info.PartitionCount); i++ {
tb.Partitions[i] = &PartitionStats{
Gpid: base.Gpid{Appid: int32(info.AppID), PartitionIndex: int32(i)},
Stats: make(map[string]float64),
}
}
return tb
}
func (tb *TableStats) aggregate() {
tb.Timestamp = time.Now()
for _, part := range tb.Partitions {
for name, value := range part.Stats {
tb.Stats[name] += value
}
}
}
func aggregateCustomStats(elements []string, stats *map[string]float64, resultName string) {
aggregated := float64(0)
for _, ele := range elements {
if v, found := (*stats)[ele]; found {
aggregated += v
}
}
(*stats)[resultName] = aggregated
}
// Extends the stat with read_qps/read_bytes/write_qps/write_bytes.
func extendStats(stats *map[string]float64) {
var reads = []string{
"get",
"multi_get",
"scan",
}
var readQPS []string
for _, r := range reads {
readQPS = append(readQPS, r+"_qps")
}
var readBytes []string
for _, r := range reads {
readBytes = append(readBytes, r+"_bytes")
}
aggregateCustomStats(readQPS, stats, "read_qps")
aggregateCustomStats(readBytes, stats, "read_bytes")
var writes = []string{
"put",
"remove",
"multi_put",
"multi_remove",
"check_and_set",
"check_and_mutate",
}
var writeQPS []string
for _, w := range writes {
writeQPS = append(writeQPS, w+"_qps")
}
var writeBytes []string
for _, w := range writes {
writeBytes = append(writeBytes, w+"_bytes")
}
aggregateCustomStats(writeQPS, stats, "write_qps")
aggregateCustomStats(writeBytes, stats, "write_bytes")
}
|
package conn
import (
"errors"
"fmt"
"github.com/ClarityServices/skynet2"
"github.com/ClarityServices/skynet2/log"
"github.com/ClarityServices/skynet2/rpc/bsonrpc"
"labix.org/v2/mgo/bson"
"net"
"net/rpc"
"time"
)
// TODO: Abstract out BSON logic into an interface that can be proviced for Encoding/Decoding data to a supplied interface
// this would allow developers to swap out the RPC logic, maybe implement our own ClientCodec/ServerCodec that have an additional WriteHandshake/ReadHandshake methods on each of them.
// bson for example we could create a custom type that is composed of our methods and the normal rpc codec
var (
HandshakeFailed = errors.New("Handshake Failed")
ServiceUnregistered = errors.New("Service is unregistered")
ConnectionClosed = errors.New("Connection is closed")
)
type serviceError struct {
msg string
}
func (se serviceError) Error() string {
return se.msg
}
/*
Connection
*/
type Connection interface {
SetIdleTimeout(timeout time.Duration)
Addr() string
Close()
IsClosed() bool
Send(ri *skynet.RequestInfo, fn string, in interface{}, out interface{}) (err error)
SendTimeout(ri *skynet.RequestInfo, fn string, in interface{}, out interface{}, timeout time.Duration) (err error)
}
/*
Conn
Implementation of Connection
*/
type Conn struct {
addr string
conn net.Conn
clientID string
serviceName string
rpcClient *rpc.Client
closed bool
idleTimeout time.Duration
}
/*
client.NewConnection() Establishes new connection to skynet service specified by addr
*/
func NewConnection(serviceName, network, addr string, timeout time.Duration) (conn Connection, err error) {
c, err := net.DialTimeout(network, addr, timeout)
if err != nil {
return
}
conn, err = NewConnectionFromNetConn(serviceName, c)
return
}
/*
client.NewConn() Establishes new connection to skynet service with existing net.Conn
This is beneficial if you want to communicate over a pipe
*/
func NewConnectionFromNetConn(serviceName string, c net.Conn) (conn Connection, err error) {
cn := &Conn{conn: c}
cn.addr = c.RemoteAddr().String()
cn.serviceName = serviceName
err = cn.performHandshake()
return cn, err
}
/*
Conn.Close() Close network connection
*/
func (c *Conn) Close() {
c.closed = true
c.rpcClient.Close()
}
/*
Conn.SetIdleTimeout() amount of time that can pass between requests before connection is closed
*/
func (c *Conn) SetIdleTimeout(timeout time.Duration) {
c.idleTimeout = timeout
c.setDeadline(timeout)
}
/*
Conn.IsClosed() Specifies if connection is closed
*/
func (c Conn) IsClosed() bool {
return c.closed
}
/*
Conn.Addr() Specifies the network address
*/
func (c Conn) Addr() string {
return c.addr
}
/*
Conn.Send() Sends RPC request to service
*/
func (c *Conn) Send(ri *skynet.RequestInfo, fn string, in interface{}, out interface{}) (err error) {
return c.SendTimeout(ri, fn, in, out, 0)
}
/*
Conn.SendTimeout() Acts like Send but takes a timeout
*/
func (c *Conn) SendTimeout(ri *skynet.RequestInfo, fn string, in interface{}, out interface{}, timeout time.Duration) (err error) {
if c.IsClosed() {
return ConnectionClosed
}
sin := skynet.ServiceRPCInWrite{
RequestInfo: ri,
Method: fn,
ClientID: c.clientID,
}
var b []byte
b, err = bson.Marshal(in)
if err != nil {
return serviceError{fmt.Sprintf("Error calling bson.Marshal: %v", err)}
}
sin.In = bson.Binary{
0x00,
b,
}
sout := skynet.ServiceRPCOutRead{}
// Set timeout for this request, then set it back to idle timeout
c.setDeadline(timeout)
defer c.setDeadline(c.idleTimeout)
log.Println(log.TRACE, fmt.Sprintf("Sending Method call %s with ClientID %s to: %s", sin.Method, sin.ClientID, c.addr))
err = c.rpcClient.Call(c.serviceName+".Forward", sin, &sout)
if err != nil {
c.Close()
err = serviceError{err.Error()}
return
}
if sout.ErrString != "" {
err = serviceError{sout.ErrString}
return
}
err = bson.Unmarshal(sout.Out, out)
if err != nil {
log.Println(log.ERROR, "Error unmarshalling nested document")
err = serviceError{err.Error()}
}
return
}
func (c *Conn) setDeadline(timeout time.Duration) {
if timeout == 0 {
var t time.Time
c.conn.SetDeadline(t)
} else {
c.conn.SetDeadline(time.Now().Add(timeout))
}
}
/*
Conn.performHandshake Responsible for performing handshake with service
*/
func (c *Conn) performHandshake() (err error) {
var sh skynet.ServiceHandshake
decoder := bsonrpc.NewDecoder(c.conn)
err = decoder.Decode(&sh)
if err != nil {
log.Println(log.ERROR, "Failed to decode ServiceHandshake", err)
c.conn.Close()
return HandshakeFailed
}
if sh.Name != c.serviceName {
log.Println(log.ERROR, "Attempted to send request to incorrect service: "+sh.Name)
return HandshakeFailed
}
ch := skynet.ClientHandshake{}
encoder := bsonrpc.NewEncoder(c.conn)
err = encoder.Encode(ch)
if err != nil {
log.Println(log.ERROR, "Failed to encode ClientHandshake", err)
c.conn.Close()
return HandshakeFailed
}
if !sh.Registered {
log.Println(log.ERROR, "Attempted to send request to unregistered service")
return ServiceUnregistered
}
c.rpcClient = bsonrpc.NewClient(c.conn)
c.clientID = sh.ClientID
return
}
|
package main
import (
"fmt"
"time"
)
func main(){
f := fmt.Println
now := time.Now()
f(now)
then := time.Date(
2009, 11, 17, 20, 34, 58, 651387237, time.UTC)
f(then)
f(then.Year())
f(then.Month())
f(then.Day())
f(then.Hour())
f(then.Minute())
f(then.Second())
f(then.Nanosecond())
f(then.Unix())
f(then.UnixNano())
f(then.Location())
f(then.Weekday())
f(then.Before(now))
f(then.After(now))
f(then.Equal(now))
diff := now.Sub(then)
f(diff)
f(diff.Hours())
f(diff.Minutes())
f(diff.Seconds())
f(diff.Nanoseconds())
f(then.Add(diff))
f(then.Add(-diff))
}
|
package main
import (
"strconv"
"strings"
)
// Action is a game action that can be triggered in various ways.
type Action interface {
Start()
}
type ChainAction struct {
chain []Action
}
func (act *ChainAction) Start() {
for _, action := range act.chain {
action.Start()
}
}
type NullAction struct {
}
func (act *NullAction) Start() {}
// TextAction displays text.
type TextAction struct {
source, text string
}
// Start the text action.
func (act *TextAction) Start() {
UI.PrintTalk(act.source, act.text)
}
type DescribedAction struct {
Description string
Subject Action
}
type SelectionAction struct {
choices []DescribedAction
}
func (act *SelectionAction) Start() {
userChoice := -1
for userChoice < 0 || userChoice >= len(act.choices) {
for i, choice := range act.choices {
UI.PrintTalk("", strconv.Itoa(i)+": "+choice.Description)
}
userChoice = UI.AskNumber()
}
act.choices[userChoice].Subject.Start()
}
type ReceiveItemAction struct {
itemId int
}
func (act *ReceiveItemAction) Start() {
item := GetItem(act.itemId)
Inventory.Add(item)
UI.Print("You received a " + item.Name() + ".")
}
type ItemSwitchAction struct {
itemId int
onTrue Action
onFalse Action
}
func (act *ItemSwitchAction) Start() {
if HasItem(act.itemId) {
act.onTrue.Start()
} else {
act.onFalse.Start()
}
}
type EquipItemAction struct {
itemId int
slot EquipmentSlot
}
func (act *EquipItemAction) Start() {
item := GetItem(act.itemId)
UI.Print("Do you want to equip '" + item.Name() + "'? [y/n]")
choice := strings.ToLower(UI.AskLine())
if choice == "y" || choice == "yes" {
Inventory.Equip(item, act.slot)
UI.Print("You equipped the item.")
}
}
|
package listen
type CmdHander interface {
CmdParseMsg() string
CmdParse(cmd string) bool
Start()
}
|
package model
type Datum struct {
T int `json:"t"`
D []interface{} `json:"d"`
}
|
package notification
import (
"encoding/json"
"net/http"
)
type User struct {
NetId string
Area string
}
type Response struct {
Status string `json:status`
Data interface{} `json:data`
}
func write(code int, res Response, w http.ResponseWriter) {
b, err := json.Marshal(res)
if err != nil {
w.WriteHeader(500)
return
}
w.Header().Add("Access-Control-Allow-Origin", "*")
w.WriteHeader(code)
w.Write(b)
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
validator "gopkg.in/go-playground/validator.v9"
yaml "gopkg.in/yaml.v2"
)
// Use a single instance of Validate, it will cache struct info.
var validate *validator.Validate
func main() {
var pc PublicCode
// Read data from file.
data, err := ioutil.ReadFile("publiccode.yml")
if err != nil {
fmt.Println(err)
return
}
// Unmarshal the yaml into Publiccode struct.
err = yaml.UnmarshalStrict(data, &pc)
if err != nil {
log.Fatalf("error: %v", err)
}
// Generate a new validator and add new validation rules.
validate = validator.New()
err = validate.RegisterValidation("spdx", spdxValidator)
checkErr(err)
err = validate.RegisterValidation("maintenance", maintenanceValidator)
checkErr(err)
err = validate.RegisterValidation("date", dateValidator)
checkErr(err)
err = validate.RegisterValidation("isnotemptyss", isNotEmptySameStruct)
checkErr(err)
// Apply validate rules on PublicCode struct.
err = validate.Struct(pc)
if err != nil {
log.Printf("error: %v", err)
} else {
log.Println("publiccode.yml is valid!")
}
}
// checkErr for better readability. Gometalinter checks every validate.RegisterValidation err.
func checkErr(err error) {
if err != nil {
log.Fatal("ERROR:", err)
}
}
|
package consensus
import (
"fmt"
"log"
"net"
"net/http"
"net/rpc"
"reflect"
"sync"
"time"
)
type Peer struct {
ip string
port string
}
func NewPeer(ip string, port string) *Peer {
peer := new(Peer)
peer.ip = ip
peer.port = port
return peer
}
type State int
const (
FOLLOWER = iota
CANDIDATE = iota
LEADER = iota
)
type TxType int
const ()
type Transaction struct {
txtype TxType
}
type Log struct {
tx Transaction
term int
}
type AppendEntryArgs struct {
term int
leaderId int
entries []Log
}
type AppendEntryReply struct {
term int
}
type RequestVoteArgs struct {
term int // term in which this election happens
candidateId int // who is asking for vote
}
type RequestVoteReply struct {
term int
voteGranted bool
}
type RaftServer struct {
mux sync.Mutex
peers []*Peer
me int
state State
votedFor int
leaderId int
currentTerm int
status int
//add channels
timeout chan int
newterm chan int
leader chan int
}
//handler for RequestVote RPC
func (server *RaftServer) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) error {
if args.term > server.currentTerm {
server.state = FOLLOWER
server.currentTerm = args.term
server.votedFor = args.candidateId
reply.term = server.currentTerm
reply.voteGranted = true
} else {
switch server.state {
case FOLLOWER:
//receives a vote request
if args.term < server.currentTerm {
reply.voteGranted = false
} else {
if server.votedFor == 0 || server.votedFor == args.candidateId {
server.votedFor = args.candidateId
reply.voteGranted = true
} else {
reply.voteGranted = false
}
//server.currentTerm = args.term
}
reply.term = server.currentTerm
break
case CANDIDATE:
case LEADER:
//candidate's votedFor must be itself
reply.term = server.currentTerm
reply.voteGranted = false
//assert(server.votedFor == server.me)
if server.votedFor != server.me {
} else {
}
break
default:
//panic()
break
}
}
return nil
}
//handler for AppendEntries RPC
func (server *RaftServer) AppendEntries(args *AppendEntryArgs, reply *AppendEntryReply) error {
if args.term < server.currentTerm {
reply.term = server.currentTerm
} else {
server.currentTerm = args.term
server.leaderId = args.leaderId
server.state = FOLLOWER
reply.term = server.currentTerm
}
return nil
}
func NewRaftServer(peers []*Peer, me int) *RaftServer {
server := new(RaftServer)
server.peers = peers
server.me = me
server.state = FOLLOWER
server.votedFor = -1
server.currentTerm = 0
server.leaderId = -1
return server
}
func (server *RaftServer) Init() {
n := len(server.peers)
server.timeout = make(chan int)
server.leader = make(chan int)
server.newterm = make(chan int, n)
rpc.Register(server)
rpc.HandleHTTP()
l, e := net.Listen("tcp", ":"+server.peers[server.me].port)
if e != nil {
log.Fatal("listen error:", e)
}
go http.Serve(l, nil)
}
func (server *RaftServer) Run() {
for {
select {
case <-server.timeout:
server.mux.Lock()
switch server.state {
case FOLLOWER:
//start an election
server.state = CANDIDATE
go server.candidate()
break
case CANDIDATE:
//start a new term
break
case LEADER:
//ignore
break
}
server.mux.Unlock()
case <-server.leader:
go server.follower()
//switch to follower
case <-server.newterm:
//switch to follower
}
}
}
func (server *RaftServer) leaderMode() {
n := len(server.peers)
cmds := make([]chan int, n)
for i := 0; i < n; i++ {
if i == server.me {
continue
}
addr := server.peers[i].ip + ":" + server.peers[i].port
go func(cmd chan int) {
for {
var reply AppendEntryReply
var appendCall *rpc.Call
var ch chan int
client, err := rpc.DialHTTP("tcp", addr)
if err == nil {
server.mux.Lock()
args := &AppendEntryArgs{term: server.currentTerm,
leaderId: server.me}
server.mux.Unlock()
appendCall = client.Go("RaftServer.AppendEntries", args, &reply, nil)
} else {
//wait some time
continue
}
select {
case <-cmd:
return
case heartbeat := <-appendCall.Done:
if heartbeat.Error == nil {
term := heartbeat.Reply.(AppendEntryReply).term
if term > server.currentTerm {
//signal new term
}
}
//time.Sleep(rand.Int63())
}
}
}(cmds[i])
}
cases := make([])
}
func (server *RaftServer) followerMode() {
//do nothing
}
func (server *RaftServer) candidateMode() {
//request for vote
n := len(server.peers)
replys := make([]chan *RequestVoteReply, n)
channels := map[int]int{}
for i, j := 0, 0; i < n; i++ {
if i == server.me {
continue
}
channels[j], j = i, j+1
addr := server.peers[i].ip + ":" + server.peers[i].port
go func(addr string, ch chan *RequestVoteReply) {
for {
var cli *rpc.Client
var reply RequestVoteReply
for {
client, err := rpc.DialHTTP("tcp", addr)
if err == nil {
cli = client
break
}
time.Sleep(30 * time.Millisecond)
}
server.mux.Lock()
args := &RequestVoteArgs{candidateId: server.me,
term: server.currentTerm}
server.mux.Unlock()
voteCall := cli.Go("RaftServer.RequestVote", args, &reply, nil)
replyCall := <-voteCall.Done
if replyCall.Error == nil {
r := replyCall.Reply.(RequestVoteReply)
ch <- (&r)
break
}
}
}(addr, replys[i])
}
cases := make([]reflect.SelectCase, n)
for i, ch := range replys {
cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ch)}
}
//cases[n-1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf()}
remaining := len(cases)
votes := 0
for remaining > 0 {
chosen, value, ok := reflect.Select(cases)
if !ok {
// The chosen channel has been closed, so zero out the channel to disable the case
cases[chosen].Chan = reflect.ValueOf(nil)
remaining -= 1
continue
}
term, voteGranted := int(value.FieldByName("term").Int()),
value.FieldByName("voteGranted").Bool()
fmt.Printf("Vote from #%d: (%d, %t)\n", channels[chosen], term, voteGranted)
if term > server.currentTerm {
//signal newterm to main routine
} else if voteGranted {
votes += 1
}
if votes > n/2 {
//signal leader
}
}
}
func (server *RaftServer) timer() {
for {
time.Sleep(250 * time.Millisecond)
server.timeout <- 1
}
}
/*
type Raft struct {
}
func Make(peers []*Peer, me int) *Raft {
}
func (raft *Raft) Start(command interface{}) {
}
func (raft *Raft) GetState() (term int, isLeader bool) {
}
*/
|
package utils
const AppName = "Gnemes"
|
package node
import (
"github.com/sherifabdlnaby/prism/app/component"
"github.com/sherifabdlnaby/prism/pkg/job"
"github.com/sherifabdlnaby/prism/pkg/response"
)
//output Wraps an output core
type output struct {
output *component.Output
*Node
}
//process output process will send the process to output plugin and await its resulj.
func (n *output) process(j job.Job) {
err := n.resource.Acquire(j.Context)
if err != nil {
j.ResponseChan <- response.NoAck(err)
return
}
responseChan := make(chan response.Response)
n.output.JobChan <- job.Job{
Payload: j.Payload,
Data: j.Data,
ResponseChan: responseChan,
Context: j.Context,
}
j.ResponseChan <- <-responseChan
n.resource.Release()
}
func (n *output) processStream(j job.Job) {
n.process(j)
}
|
package main
import "fmt"
type myFuncType func(int, int) int //自定义数据类型
type myInt int //自定义数据类型,虽然都是int类型,go实际上认为这不是同一个类型,所以如果有一个类型为int的,一个是myInt的,实际上他们是不能相互赋值的
func main() {
var num myInt = 666
fmt.Printf("res=%v\n", num)
res := myFun(getSum, 100, 120)
fmt.Printf("res=%v\n", res)
}
func getSum(num1 int, num2 int) int {
return num1 + num2
}
func myFun(function myFuncType, num1 int, num2 int) int {
return function(num1, num2)
}
|
package bosh
import (
"errors"
"log"
"time"
"github.com/skriptble/nine/element"
"github.com/skriptble/nine/stream"
)
// ErrSessionClosed is the error returned when a session has been closed and a
// call to Element is made.
var ErrSessionClosed = errors.New("Session is closed")
type Session struct {
processor chan *Request
restart chan struct{}
elements chan element.Element
responder chan element.Element
expired bool
exit chan struct{}
// current is the current RID being processed
current int
// ack is the highest RID that has been processed
ack int
sid string
wait time.Duration
inactivity time.Duration
}
// NewSession creates a new session and returns it.
func NewSession(sid string, rid, hold int, wait, inactivity time.Duration) *Session {
s := new(Session)
s.sid = sid
s.current = rid
s.wait = wait
s.inactivity = inactivity
s.processor = make(chan *Request)
s.elements = make(chan element.Element)
s.responder = make(chan element.Element)
s.restart = make(chan struct{}, 1)
s.exit = make(chan struct{})
requests := make(chan *Request, hold)
buffer := make(chan element.Element)
go s.process(requests, buffer)
go s.response(requests)
go s.buffer(buffer)
return s
}
// Write handles writting elements to the underlying requests. This method does
// not implement io.Writer because only well formed XML elements can be written
// into the body of a BOSH request of response.
func (s *Session) Write(el element.Element) (err error) {
select {
case <-s.exit:
err = stream.ErrStreamClosed
case s.responder <- el:
}
return
}
// UnregisterRequest returns a function that can be called to remove the given
// request from the registered requests for this session. This method is mainly
// used as the timeout variable for a request so that a request that has timed
// out is not used.
func (s *Session) UnregisterRequest() func() int {
return s.Ack
}
// Close implements io.Closer.
// TODO: This has a race condition, put a lock around it.
func (s *Session) Close() error {
select {
case <-s.exit:
return errors.New("Already closed")
default:
}
close(s.exit)
return nil
}
// Element returns the next element from the session.
func (s *Session) Element() (el element.Element, err error) {
select {
case <-s.exit:
// TODO: This should return something like io.EOF or ErrStreamClosed so
// the stream can properly handle it.
err = stream.ErrStreamClosed
case el = <-s.elements:
case <-s.restart:
err = stream.ErrRequireRestart
}
return
}
// Process processes a request
//
// TODO: Handle processing of repeated requests
// TODO: Handle overactivity as described in
// http://xmpp.org/extensions/xep-0124.html#overactive
func (s *Session) Process(r *Request) error {
s.processor <- r
return nil
}
// elementRunner handles processing elements from requests and adding requests
// to a queue of available requests for writers to use. If the buffer is filled
// the oldest request is removed closed and the recieved request is added to the
// buffer. This method ensures that requests' elements are buffered in order to
// meet the requirement of in-order processing.
func (s *Session) process(queue chan *Request, buffer chan<- element.Element) {
var requests map[int]*Request = make(map[int]*Request)
var current int = s.current
for {
select {
case <-s.exit:
return
case <-time.After(s.inactivity):
log.Println("session expiring")
s.expired = true
s.Close()
return
case r := <-s.processor:
// Handle history request
if r.RID() < current {
}
requests[r.RID()] = r
log.Println("processing request")
select {
case queue <- r:
case old := <-queue:
log.Println("Removing old requests ", old.RID())
old.Close()
queue <- r
default:
r.Close()
}
if r.body.Restart == true {
log.Println("Seneding restart")
s.restart <- struct{}{}
}
log.Println("Reqeusts queued")
for r, ok := requests[current]; ok; r, ok = requests[current] {
for _, el := range r.Elements() {
log.Println("Buffered element")
buffer <- el
}
s.ack = r.RID()
log.Println("Increasing ack")
current++
}
}
}
}
// elementBuffer handles receiving elements from the elementRunner and buffers
// one for a call to Element(). This is necessary because the runner cannot be
// blocked waiting for a call to Element, but we only want to send an element
// down the elements channel when we have one ready.
func (s *Session) buffer(buffer <-chan element.Element) {
var elements []element.Element
var current element.Element
var pending bool
for {
if pending {
select {
case <-s.exit:
return
case el := <-buffer:
elements = append(elements, el)
case s.elements <- current:
if len(elements) > 0 {
current, elements = elements[0], elements[1:]
pending = true
} else {
pending = false
}
}
} else {
select {
case <-s.exit:
return
// The only way to get here is if there are no elements in the
// slice, therefore we can assign directly to current and set
// pending to true.
case el := <-buffer:
current = el
pending = true
}
}
}
}
func (s *Session) response(queue <-chan *Request) {
var response []element.Element = make([]element.Element, 0, 10)
var timeout time.Duration
for {
select {
case <-s.exit:
return
case el := <-s.responder:
response = append(response, el)
timeout = 50 * time.Millisecond
// Exponentially decay the timeout for flushing. This allows to have
// a hard limit based on time.
loop:
for {
select {
case <-time.After(timeout):
break loop
case el := <-s.responder:
response = append(response, el)
timeout = timeout / 2
}
}
for {
// Get a request
r := <-queue
// Write the response to the request
err := r.Write(response...)
if err == ErrRequestClosed {
continue
}
break
}
// Create a history entry for the response
response = make([]element.Element, 0, 10)
}
}
}
// Ack returns the highest RID the session has processed.
func (s *Session) Ack() int {
return s.ack
}
// SID returns the session ID of this session.
func (s *Session) SID() string {
return s.sid
}
func (s *Session) Expired() bool {
return s.expired
}
func (s *Session) Wait() time.Duration {
return s.wait
}
|
package 二叉树
func leafSimilar(root1 *TreeNode, root2 *TreeNode) bool {
return areArraysSame(getLeafSequence(root1), getLeafSequence(root2))
}
func getLeafSequence(root *TreeNode) []int {
if root == nil {
return []int{}
}
if root.Left == nil && root.Right == nil {
return []int{root.Val}
}
leafSequence := make([]int, 0)
leafSequence = append(leafSequence, getLeafSequence(root.Left)...)
leafSequence = append(leafSequence, getLeafSequence(root.Right)...)
return leafSequence
}
func areArraysSame(arr1, arr2 []int) bool {
if len(arr1) != len(arr2) {
return false
}
for i := 0; i < len(arr1); i++ {
if arr1[i] != arr2[i] {
return false
}
}
return true
}
/*
题目链接: https://leetcode-cn.com/problems/leaf-similar-trees/submissions/
*/
|
package models
import (
"fmt"
"time"
"github.com/jinzhu/gorm"
)
// ===== BEGIN of all query sets
// ===== BEGIN of query set ProjectQuerySet
// ProjectQuerySet is an queryset type for Project
type ProjectQuerySet struct {
db *gorm.DB
}
// NewProjectQuerySet constructs new ProjectQuerySet
func NewProjectQuerySet(db *gorm.DB) ProjectQuerySet {
return ProjectQuerySet{
db: db.Model(&Project{}),
}
}
func (qs ProjectQuerySet) w(db *gorm.DB) ProjectQuerySet {
return NewProjectQuerySet(db)
}
// All is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) All(ret *[]Project) error {
return qs.db.Find(ret).Error
}
// Count is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) Count() (int, error) {
var count int
err := qs.db.Count(&count).Error
return count, err
}
// Create is an autogenerated method
// nolint: dupl
func (o *Project) Create(db *gorm.DB) error {
return db.Create(o).Error
}
// CreatedAtEq is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) CreatedAtEq(createdAt time.Time) ProjectQuerySet {
return qs.w(qs.db.Where("created_at = ?", createdAt))
}
// CreatedAtGt is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) CreatedAtGt(createdAt time.Time) ProjectQuerySet {
return qs.w(qs.db.Where("created_at > ?", createdAt))
}
// CreatedAtGte is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) CreatedAtGte(createdAt time.Time) ProjectQuerySet {
return qs.w(qs.db.Where("created_at >= ?", createdAt))
}
// CreatedAtLt is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) CreatedAtLt(createdAt time.Time) ProjectQuerySet {
return qs.w(qs.db.Where("created_at < ?", createdAt))
}
// CreatedAtLte is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) CreatedAtLte(createdAt time.Time) ProjectQuerySet {
return qs.w(qs.db.Where("created_at <= ?", createdAt))
}
// CreatedAtNe is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) CreatedAtNe(createdAt time.Time) ProjectQuerySet {
return qs.w(qs.db.Where("created_at != ?", createdAt))
}
// Delete is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) Delete() error {
return qs.db.Delete(Project{}).Error
}
// Delete is an autogenerated method
// nolint: dupl
func (o *Project) Delete(db *gorm.DB) error {
return db.Delete(o).Error
}
// DescriptionEq is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) DescriptionEq(description string) ProjectQuerySet {
return qs.w(qs.db.Where("description = ?", description))
}
// DescriptionIn is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) DescriptionIn(description string, descriptionRest ...string) ProjectQuerySet {
iArgs := []interface{}{description}
for _, arg := range descriptionRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("description IN (?)", iArgs))
}
// DescriptionNe is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) DescriptionNe(description string) ProjectQuerySet {
return qs.w(qs.db.Where("description != ?", description))
}
// DescriptionNotIn is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) DescriptionNotIn(description string, descriptionRest ...string) ProjectQuerySet {
iArgs := []interface{}{description}
for _, arg := range descriptionRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("description NOT IN (?)", iArgs))
}
// GetUpdater is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) GetUpdater() ProjectUpdater {
return NewProjectUpdater(qs.db)
}
// IDEq is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) IDEq(ID uint) ProjectQuerySet {
return qs.w(qs.db.Where("id = ?", ID))
}
// IDGt is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) IDGt(ID uint) ProjectQuerySet {
return qs.w(qs.db.Where("id > ?", ID))
}
// IDGte is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) IDGte(ID uint) ProjectQuerySet {
return qs.w(qs.db.Where("id >= ?", ID))
}
// IDIn is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) IDIn(ID uint, IDRest ...uint) ProjectQuerySet {
iArgs := []interface{}{ID}
for _, arg := range IDRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("id IN (?)", iArgs))
}
// IDLt is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) IDLt(ID uint) ProjectQuerySet {
return qs.w(qs.db.Where("id < ?", ID))
}
// IDLte is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) IDLte(ID uint) ProjectQuerySet {
return qs.w(qs.db.Where("id <= ?", ID))
}
// IDNe is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) IDNe(ID uint) ProjectQuerySet {
return qs.w(qs.db.Where("id != ?", ID))
}
// IDNotIn is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) IDNotIn(ID uint, IDRest ...uint) ProjectQuerySet {
iArgs := []interface{}{ID}
for _, arg := range IDRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("id NOT IN (?)", iArgs))
}
// Limit is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) Limit(limit int) ProjectQuerySet {
return qs.w(qs.db.Limit(limit))
}
// NameEq is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) NameEq(name string) ProjectQuerySet {
return qs.w(qs.db.Where("name = ?", name))
}
// NameIn is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) NameIn(name string, nameRest ...string) ProjectQuerySet {
iArgs := []interface{}{name}
for _, arg := range nameRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("name IN (?)", iArgs))
}
// NameNe is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) NameNe(name string) ProjectQuerySet {
return qs.w(qs.db.Where("name != ?", name))
}
// NameNotIn is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) NameNotIn(name string, nameRest ...string) ProjectQuerySet {
iArgs := []interface{}{name}
for _, arg := range nameRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("name NOT IN (?)", iArgs))
}
// One is used to retrieve one result. It returns gorm.ErrRecordNotFound
// if nothing was fetched
func (qs ProjectQuerySet) One(ret *Project) error {
return qs.db.First(ret).Error
}
// OrderAscByCreatedAt is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) OrderAscByCreatedAt() ProjectQuerySet {
return qs.w(qs.db.Order("created_at ASC"))
}
// OrderAscByID is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) OrderAscByID() ProjectQuerySet {
return qs.w(qs.db.Order("id ASC"))
}
// OrderAscByUpdatedAt is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) OrderAscByUpdatedAt() ProjectQuerySet {
return qs.w(qs.db.Order("updated_at ASC"))
}
// OrderDescByCreatedAt is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) OrderDescByCreatedAt() ProjectQuerySet {
return qs.w(qs.db.Order("created_at DESC"))
}
// OrderDescByID is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) OrderDescByID() ProjectQuerySet {
return qs.w(qs.db.Order("id DESC"))
}
// OrderDescByUpdatedAt is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) OrderDescByUpdatedAt() ProjectQuerySet {
return qs.w(qs.db.Order("updated_at DESC"))
}
// SetCreatedAt is an autogenerated method
// nolint: dupl
func (u ProjectUpdater) SetCreatedAt(createdAt time.Time) ProjectUpdater {
u.fields[string(ProjectDBSchema.CreatedAt)] = createdAt
return u
}
// SetDescription is an autogenerated method
// nolint: dupl
func (u ProjectUpdater) SetDescription(description string) ProjectUpdater {
u.fields[string(ProjectDBSchema.Description)] = description
return u
}
// SetID is an autogenerated method
// nolint: dupl
func (u ProjectUpdater) SetID(ID uint) ProjectUpdater {
u.fields[string(ProjectDBSchema.ID)] = ID
return u
}
// SetName is an autogenerated method
// nolint: dupl
func (u ProjectUpdater) SetName(name string) ProjectUpdater {
u.fields[string(ProjectDBSchema.Name)] = name
return u
}
// SetStatus is an autogenerated method
// nolint: dupl
func (u ProjectUpdater) SetStatus(status string) ProjectUpdater {
u.fields[string(ProjectDBSchema.Status)] = status
return u
}
// SetUpdatedAt is an autogenerated method
// nolint: dupl
func (u ProjectUpdater) SetUpdatedAt(updatedAt time.Time) ProjectUpdater {
u.fields[string(ProjectDBSchema.UpdatedAt)] = updatedAt
return u
}
// StatusEq is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) StatusEq(status string) ProjectQuerySet {
return qs.w(qs.db.Where("status = ?", status))
}
// StatusIn is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) StatusIn(status string, statusRest ...string) ProjectQuerySet {
iArgs := []interface{}{status}
for _, arg := range statusRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("status IN (?)", iArgs))
}
// StatusNe is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) StatusNe(status string) ProjectQuerySet {
return qs.w(qs.db.Where("status != ?", status))
}
// StatusNotIn is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) StatusNotIn(status string, statusRest ...string) ProjectQuerySet {
iArgs := []interface{}{status}
for _, arg := range statusRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("status NOT IN (?)", iArgs))
}
// Update is an autogenerated method
// nolint: dupl
func (u ProjectUpdater) Update() error {
return u.db.Updates(u.fields).Error
}
// UpdateNum is an autogenerated method
// nolint: dupl
func (u ProjectUpdater) UpdateNum() (int64, error) {
db := u.db.Updates(u.fields)
return db.RowsAffected, db.Error
}
// UpdatedAtEq is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) UpdatedAtEq(updatedAt time.Time) ProjectQuerySet {
return qs.w(qs.db.Where("updated_at = ?", updatedAt))
}
// UpdatedAtGt is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) UpdatedAtGt(updatedAt time.Time) ProjectQuerySet {
return qs.w(qs.db.Where("updated_at > ?", updatedAt))
}
// UpdatedAtGte is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) UpdatedAtGte(updatedAt time.Time) ProjectQuerySet {
return qs.w(qs.db.Where("updated_at >= ?", updatedAt))
}
// UpdatedAtLt is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) UpdatedAtLt(updatedAt time.Time) ProjectQuerySet {
return qs.w(qs.db.Where("updated_at < ?", updatedAt))
}
// UpdatedAtLte is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) UpdatedAtLte(updatedAt time.Time) ProjectQuerySet {
return qs.w(qs.db.Where("updated_at <= ?", updatedAt))
}
// UpdatedAtNe is an autogenerated method
// nolint: dupl
func (qs ProjectQuerySet) UpdatedAtNe(updatedAt time.Time) ProjectQuerySet {
return qs.w(qs.db.Where("updated_at != ?", updatedAt))
}
// ===== END of query set ProjectQuerySet
// ===== BEGIN of Project modifiers
type projectDBSchemaField string
func (f projectDBSchemaField) String() string {
return string(f)
}
// ProjectDBSchema stores db field names of Project
var ProjectDBSchema = struct {
ID projectDBSchemaField
CreatedAt projectDBSchemaField
UpdatedAt projectDBSchemaField
Name projectDBSchemaField
Description projectDBSchemaField
Status projectDBSchemaField
}{
ID: projectDBSchemaField("id"),
CreatedAt: projectDBSchemaField("created_at"),
UpdatedAt: projectDBSchemaField("updated_at"),
Name: projectDBSchemaField("name"),
Description: projectDBSchemaField("description"),
Status: projectDBSchemaField("status"),
}
// Update updates Project fields by primary key
func (o *Project) Update(db *gorm.DB, fields ...projectDBSchemaField) error {
dbNameToFieldName := map[string]interface{}{
"id": o.ID,
"created_at": o.CreatedAt,
"updated_at": o.UpdatedAt,
"name": o.Name,
"description": o.Description,
"status": o.Status,
}
u := map[string]interface{}{}
for _, f := range fields {
fs := f.String()
u[fs] = dbNameToFieldName[fs]
}
if err := db.Model(o).Updates(u).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return err
}
return fmt.Errorf("can't update Project %v fields %v: %s",
o, fields, err)
}
return nil
}
// ProjectUpdater is an Project updates manager
type ProjectUpdater struct {
fields map[string]interface{}
db *gorm.DB
}
// NewProjectUpdater creates new Project updater
func NewProjectUpdater(db *gorm.DB) ProjectUpdater {
return ProjectUpdater{
fields: map[string]interface{}{},
db: db.Model(&Project{}),
}
}
// ===== END of Project modifiers
// ===== END of all query sets
|
/*
init 函数可用于执行初始化任务,也可用于在执行开始之前验证程序的正确性。
一个包的初始化顺序如下:
包级别的变量首先被初始化
接着 init 函数被调用。一个包可以有多个 init 函数(在一个或多个文件中),它们的调用顺序为编译器解析它们的顺序。
如果一个包导入了另一个包,被导入的包先初始化。
尽管一个包可能被包含多次,但是它只被初始化一次。
下面让我们对我们的程序做一些修改来理解 init 函数。
首先在 rectprops.go 中添加一个 init 函数:
*/
// ---------------
/*
使用空指示符
在 Go 中只导入包却不在代码中使用它是非法的。如果你这么做了,编译器会报错。
这样做的原因是为了避免引入过多未使用的包而导致编译时间的显著增加。
将 geometry.go 中的代码替换为如下代码:
*/
// 示例 eg
//geometry.go
// package main
// import (
// "geometry/rectangle" //importing custom package
// )
// func main() {
// }
/*
上面的程序将会报错:geometry.go:6: imported and not used: "geometry/rectangle"
但是在开发过程中,导入包却不立即使用它是很常见的。可以用空指示符(_)来处理这种情况。
下面的代码可以避免抛出上面的错误:
*/
package main
import (
"test/rectangle"
)
var _ = rectangle.Area //error silencer
func main() {
}
/*
var _ = rectangle.Area 这一行屏蔽了错误。
我们应该跟踪这些“错误消音器”(error silencer),在开发结束时,我们应该去掉这些“错误消音器”,
并且如果没有使用相应的包,这些包也应该被一并移除。
因此,建议在 import 语句之后的包级别中写“错误消音器”。
有时我们导入一个包只是为了确保该包初始化的发生,而我们不需要使用包中的任何函数或变量。
例如,我们也许需要确保 rectangle 包的 init 函数被调用而不打算在代码中的任何地方使用这个包。
空指示符仍然可以处理这种情况,像下面的代码一样:
*/
/*
有init初始化函数时,在包上引用_ 上面的写法和下面的写法都可以
*/
// package main
// import (
// _ "test/rectangle"
// )
// func main() {
// }
|
package example
import (
"context"
"fmt"
"github.com/opentracing/opentracing-go"
"log"
"net/http"
"net/url"
"sourcegraph.com/sourcegraph/appdash"
appdashtracer "sourcegraph.com/sourcegraph/appdash/opentracing"
"sourcegraph.com/sourcegraph/appdash/traceapp"
"testing"
"time"
)
var tapp *traceapp.App
var ctx context.Context
var rootSpan opentracing.Span
func initOpentracing() {
var err error
// Create a recent in-memory store, evicting data after 20s.
//
// The store defines where information about traces (i.e. spans and
// annotations) will be stored during the lifetime of the application. This
// application uses a MemoryStore store wrapped by a RecentStore with an
// eviction time of 20s (i.e. all data after 20s is deleted from memory).
memStore := appdash.NewMemoryStore()
store := &appdash.RecentStore{
MinEvictAge: 20 * time.Second,
DeleteStore: memStore,
}
// Start the Appdash web UI on port 8700.
//
// This is the actual Appdash web UI -- usable as a Go package itself, We
// embed it directly into our application such that visiting the web server
// on HTTP port 8700 will bring us to the web UI, displaying information
// about this specific web-server (another alternative would be to connect
// to a centralized Appdash collection server).
url, err := url.Parse("http://localhost:8700")
if err != nil {
log.Fatal(err)
}
tapp, err = traceapp.New(nil, url)
if err != nil {
log.Fatal(err)
}
tapp.Store = store
tapp.Queryer = memStore
// We will use a local collector (as we are running the Appdash web UI
// embedded within our app).
//
// A collector is responsible for collecting the information about traces
// (i.e. spans and annotations) and placing them into a store. In this app
// we use a local collector (we could also use a remote collector, sending
// the information to a remote Appdash collection server).
collector := appdash.NewLocalCollector(store)
// Here we use the local collector to create a new opentracing.Tracer
tracer := appdashtracer.NewTracer(collector)
opentracing.InitGlobalTracer(tracer) // 一定要加
rootCtx := context.Background()
s, sctx := opentracing.StartSpanFromContextWithTracer(rootCtx, tracer, "testQuery")
if MysqlUri == "" || MysqlUri == "*" {
panic("no database url define in MysqlConfig.go , you must set the mysql link!")
}
fmt.Println(s, sctx)
ctx = sctx
rootSpan = s
}
func Test_queryTracing(t *testing.T) {
initOpentracing()
//使用mapper
result, err := exampleActivityMapper.SelectTemplete(ctx, "hello")
rootSpan.Finish()
fmt.Println("result=", result, "error=", err)
log.Println("Appdash web UI running on HTTP :8700")
log.Fatal(http.ListenAndServe(":8700", tapp))
}
func Test_updateTracing(t *testing.T) {
initOpentracing()
//使用mapper
result, err := exampleActivityMapper.UpdateById(ctx, nil, Activity{Id: "1", Name: "testName"})
rootSpan.Finish()
fmt.Println("result=", result, "error=", err)
log.Println("Appdash web UI running on HTTP :8700")
log.Fatal(http.ListenAndServe(":8700", tapp))
}
// 本地事务的例子
func Test_txTracing(t *testing.T) {
initOpentracing()
var session, err = exampleActivityMapper.NewSession(ctx)
if err != nil {
t.Fatal(err)
}
err = session.Begin() //开启事务
if err != nil {
t.Fatal(err)
}
var activityBean = Activity{
Id: "170",
Name: "rs168-8",
}
var updateNum, e = exampleActivityMapper.UpdateById(nil, &session, activityBean) //sessionId 有值则使用已经创建的session,否则新建一个session
fmt.Println("updateNum=", updateNum)
if e != nil {
panic(e)
}
activityBean.Id = "171"
activityBean.Name = "test-123"
updateNum, e = exampleActivityMapper.UpdateById(nil, &session, activityBean) //sessionId 有值则使用已经创建的session,否则新建一个session
if e != nil {
panic(e)
}
session.Commit() //提交事务
session.Close() //关闭事务
rootSpan.Finish()
log.Fatal(http.ListenAndServe(":8700", tapp))
}
// 嵌套事务
func Test_insideTx(t *testing.T) {
initOpentracing()
var session, err = exampleActivityMapper.NewSession(ctx)
if err != nil {
t.Fatal(err)
}
err = session.Begin() //开启事务
if err != nil {
t.Fatal(err)
}
var activityBean = Activity{
Id: "170",
Name: "rs168-8",
DeleteFlag: 1,
}
var updateNum, e = exampleActivityMapper.UpdateById(nil, &session, activityBean) //sessionId 有值则使用已经创建的session,否则新建一个session
fmt.Println("updateNum=", updateNum)
if e != nil {
panic(e)
}
err = session.Begin()
if err != nil {
t.Fatal(err)
}
activityBean.Id = "170"
activityBean.Name = "test-123456"
updateNum, e = exampleActivityMapper.UpdateById(nil, &session, activityBean) //sessionId 有值则使用已经创建的session,否则新建一个session
if e != nil {
panic(e)
}
activityBean.Id = "170"
activityBean.Name = "test-123456789"
updateNum, e = exampleActivityMapper.UpdateById(nil, &session, activityBean) //sessionId 有值则使用已经创建的session,否则新建一个session
if e != nil {
panic(e)
}
updateNum, e = exampleActivityMapper.UpdateTemplete(&session, activityBean) //sessionId 有值则使用已经创建的session,否则新建一个session
if e != nil {
panic(e)
}
session.Rollback()
session.Commit()
session.Close() //关闭事务
rootSpan.Finish()
log.Fatal(http.ListenAndServe(":8700", tapp))
}
|
package x0
import (
"encoding/json"
"fmt"
"github.com/x0tf/x0go/schema"
)
// ElementsClient is used to execute API requests directed to the element-related scopes
type ElementsClient struct {
http *httpClient
}
// GetList requests a paginated list of existent elements
func (client *ElementsClient) GetList(limit, skip int) (*schema.Elements, error) {
return client.GetListInNamespace("", limit, skip)
}
// GetListInNamespace requests a paginated list of existent elements in a specific namespace
func (client *ElementsClient) GetListInNamespace(namespaceID string, limit, skip int) (*schema.Elements, error) {
if limit <= 0 {
limit = 10
}
if skip < 0 {
skip = 0
}
response, err := client.http.execute("GET", fmt.Sprintf("%s/%s?limit=%d&skip=%d", endpointElements, namespaceID, limit, skip), nil)
if err != nil {
return nil, err
}
elements := new(schema.Elements)
if err := json.Unmarshal(response, elements); err != nil {
return nil, err
}
return elements, nil
}
// CreatePaste creates a new paste element
func (client *ElementsClient) CreatePaste(namespaceID string, data *schema.CreatePasteElementData) (*schema.Element, error) {
response, err := client.http.execute("POST", fmt.Sprintf("%s/%s/%s", endpointElements, namespaceID, endpointPartElementsPaste), data)
if err != nil {
return nil, err
}
element := new(schema.Element)
if err := json.Unmarshal(response, element); err != nil {
return nil, err
}
return element, nil
}
// CreateRedirect creates a new redirect element
func (client *ElementsClient) CreateRedirect(namespaceID string, data *schema.CreateRedirectElementData) (*schema.Element, error) {
response, err := client.http.execute("POST", fmt.Sprintf("%s/%s/%s", endpointElements, namespaceID, endpointPartElementsPaste), data)
if err != nil {
return nil, err
}
element := new(schema.Element)
if err := json.Unmarshal(response, element); err != nil {
return nil, err
}
return element, nil
}
// Patch patches an existing element
func (client *ElementsClient) Patch(namespaceID, key string, data *schema.PatchElementData) (*schema.Element, error) {
response, err := client.http.execute("PATCH", fmt.Sprintf("%s/%s/%s", endpointElements, namespaceID, key), data)
if err != nil {
return nil, err
}
element := new(schema.Element)
if err := json.Unmarshal(response, element); err != nil {
return nil, err
}
return element, nil
}
// Delete deletes an existing element
func (client *ElementsClient) Delete(namespaceID, key string) error {
_, err := client.http.execute("DELETE", fmt.Sprintf("%s/%s/%s", endpointElements, namespaceID, key), nil)
return err
}
|
package auth
import (
"net/http"
"github.com/gorilla/mux"
"gitlab.com/NagByte/Palette/db/wrapper"
"gitlab.com/NagByte/Palette/service/common"
"gitlab.com/NagByte/Palette/service/smsVerification"
)
type Auth interface {
TouchDevice(map[string]interface{}) (string, bool, string, error)
Signup(string, string, string, string) error
SignDeviceIn(string, string, string) error
WhoAmI(string) string
IsUniquePhoneNumber(string) bool
IsUniqueUsername(string) bool
DeviceTokenNeededMiddleware(handlerFunc) handlerFunc
AuthenticationNeededMiddleware(handlerFunc) handlerFunc
}
type authService struct {
baseURI string
handler http.Handler
db wrapper.Database
verifier smsVerification.SMSVerification
}
func New(verifier smsVerification.SMSVerification, db wrapper.Database) *authService {
service := &authService{}
service.db = db
service.verifier = verifier
service.baseURI = "/auth"
router := mux.NewRouter()
router.HandleFunc(service.baseURI+"/signUp/", service.DeviceTokenNeededMiddleware(service.signUpHandler)).
Methods("POST")
router.HandleFunc(service.baseURI+"/touchDevice/", service.touchDeviceHandler).
Methods("POST")
router.HandleFunc(service.baseURI+"/signIn/", service.DeviceTokenNeededMiddleware(service.signInHandler)).
Methods("POST")
router.HandleFunc(service.baseURI+"/signOut/", service.AuthenticationNeededMiddleware(service.signDeviceOutHandler)).
Methods("POST")
router.HandleFunc(service.baseURI+"/whoAmI/", service.DeviceTokenNeededMiddleware(service.whoAmIHandler)).
Methods("GET")
service.handler = router
service.handler = common.JSONContentTypeHandler{Handler: service.handler}
return service
}
func (as *authService) ServeHTTP(w http.ResponseWriter, r *http.Request) {
as.handler.ServeHTTP(w, r)
}
func (as *authService) URI() string {
return as.baseURI
}
|
package main
import (
"bufio"
"fmt"
"strings"
)
type space struct {
r, c int
length int
direction int
occupied bool
}
func main() {
s := bufio.NewScanner(os.Stdint)
board := make([]string, 10)
for i := 0; i < 10; i++ {
s.Scan()
board[i] = s.Text()
}
s.Scan()
words := strings.Split(s.Text(), ";")
}
func getSpaces(board []string, words int) map[int][]*space {
spaces := make([]*space, 0)
for i, v := range board {
if index := strings.Index(v, "-"); index != -1 {
last := strings.LastIndex(v, "-")
if last != index {
spaces = append(spaces, &space{r: i, c: index + last, length: last - index, direction: 0})
} else {
// we need to go thru all i+1 rows at the index column to get a vertical row
}
}
}
}
func check(words, board []string, spaces map[int][]*space) {
if len(words) == 0 {
fmt.Println(board)
return
}
// get next word
next := words[0]
remaining := words[1:]
// get next available spaces
possibleSlots := spaces[len(next)]
for _, s := range possibleSlots {
if !s.occupied {
if b, OK := canFit(word, s, board); OK {
s.occupied = true
check(remaining, b, spaces)
}
}
}
}
func canFit(word string, s *space, board []string) ([]string, OK) {
return nil, nil
}
|
package middleware
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/pomerium/pomerium/internal/urlutil"
)
func TestSetHeaders(t *testing.T) {
tests := []struct {
name string
securityHeaders map[string]string
}{
{"one option", map[string]string{"X-Frame-Options": "DENY"}},
{"two options", map[string]string{"X-Frame-Options": "DENY", "A": "B"}},
}
req, err := http.NewRequest(http.MethodGet, "/", nil)
if err != nil {
t.Fatal(err)
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
for k, want := range tt.securityHeaders {
if got := w.Header().Get(k); want != got {
t.Errorf("want %s got %q", want, got)
}
}
})
rr := httptest.NewRecorder()
handler := SetHeaders(tt.securityHeaders)(testHandler)
handler.ServeHTTP(rr, req)
})
}
}
func TestValidateSignature(t *testing.T) {
t.Parallel()
fn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, http.StatusText(http.StatusOK))
w.WriteHeader(http.StatusOK)
})
tests := []struct {
name string
secretA []byte
secretB []byte
wantStatus int
wantBody string
}{
{"good", []byte("secret"), []byte("secret"), http.StatusOK, http.StatusText(http.StatusOK)},
{"secret mistmatch", []byte("secret"), []byte("hunter42"), http.StatusBadRequest, "{\"Status\":400}\n"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
signedURL := urlutil.NewSignedURL(tt.secretB, &url.URL{Scheme: "https", Host: "pomerium.io"})
r := httptest.NewRequest(http.MethodGet, signedURL.String(), nil)
r.Header.Set("Accept", "application/json")
w := httptest.NewRecorder()
got := ValidateSignature(tt.secretA)(fn)
got.ServeHTTP(w, r)
if status := w.Code; status != tt.wantStatus {
t.Errorf("ValidateSignature() error = %v, wantErr %v\n%v", w.Result().StatusCode, tt.wantStatus, w.Body.String())
}
body := w.Body.String()
if diff := cmp.Diff(body, tt.wantBody); diff != "" {
t.Errorf("ValidateSignature() %s", diff)
t.Errorf("%s", signedURL)
}
})
}
}
func TestRequireBasicAuth(t *testing.T) {
t.Parallel()
fn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, http.StatusText(http.StatusOK))
w.WriteHeader(http.StatusOK)
})
tests := []struct {
name string
givenUser string
givenPass string
wantUser string
wantPass string
wantStatus int
}{
{"good", "foo", "bar", "foo", "bar", 200},
{"bad pass", "foo", "bar", "foo", "buzz", 401},
{"bad user", "foo", "bar", "buzz", "bar", 401},
{"empty", "", "", "", "", 401}, // don't add auth
{"empty user", "", "bar", "", "bar", 200},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, "/", nil)
if err != nil {
t.Fatal(err)
}
if tt.givenUser != "" || tt.givenPass != "" {
req.SetBasicAuth(tt.givenUser, tt.givenPass)
}
rr := httptest.NewRecorder()
handler := RequireBasicAuth(tt.wantUser, tt.wantPass)(fn)
handler.ServeHTTP(rr, req)
if status := rr.Code; status != tt.wantStatus {
t.Errorf("RequireBasicAuth() error = %v, wantErr %v\n%v", rr.Result().StatusCode, tt.wantStatus, rr.Body.String())
}
})
}
}
|
package extract
import (
"code.sajari.com/docconv"
"log"
)
func ExtractTextFromPdf(path string)string{
res, err := docconv.ConvertPath(path)
if err != nil {
log.Println(err)
return ""
}
return res.Body
} |
package mcservice
import (
"errors"
"bytes"
"encoding/json"
"log"
"net/http"
"strconv"
"sync"
)
// JSONRequest ...
type JSONRequest struct {
Method string `json:"method"`
Params []interface{} `json:"params"`
ID interface{} `json:"id"`
}
// JSONResponse ...
type JSONResponse struct {
Result interface{} `json:"result"`
Error interface{} `json:"error"`
ID interface{} `json:"id"`
}
var apiMap sync.Map
type key int
const userKey key = 0
const reqKey key = 1
const rspKey key = 2
const errKey key = 3
// apiFunc ...
type apiFunc func(*JSONRequest) (*JSONResponse, error)
func writeResponse(resp *JSONResponse, s http.ResponseWriter) {
rbuf, err := json.Marshal(resp)
if err != nil {
s.WriteHeader(500)
log.Printf("could not encode response: %s", err)
return
}
s.Header().Set("Content-Type", "application/json")
_, err = s.Write(rbuf)
if err != nil {
log.Printf("could not write response: %s", err)
}
return
}
func (s *MCService) registerAPI(method string, api apiFunc) {
apiMap.Store(method, api)
}
func (s *MCService) registerWildCardAPI(api apiFunc) {
apiMap.Store("*", api)
}
//go:generate go run gen.go
func (s *MCService) initialize() error {
err := s.checkConfig()
if err != nil {
return err
}
s.registerAllAPI()
return nil
}
func (s *MCService) platformAPI(req *JSONRequest) (*JSONResponse, error) {
rbuf, err := json.Marshal(req)
if err != nil {
return nil, err
}
r, nerr := http.NewRequest("POST",
"http://localhost:"+strconv.Itoa(s.cfg.RPCPort)+"/", bytes.NewBuffer(rbuf))
if nerr != nil {
return nil, nerr
}
r.SetBasicAuth(s.cfg.RPCUser, s.cfg.RPCPassword)
r.Header.Set("Content-Type", "application/json")
rsp, derr := http.DefaultClient.Do(r)
if derr != nil {
return nil, derr
}
defer rsp.Body.Close()
if rsp.StatusCode != 200 {
return nil, errInternal
}
resp := JSONResponse{}
err = json.NewDecoder(rsp.Body).Decode(&resp)
if err != nil {
return nil, err
}
return &resp, nil
}
// checkAuth ...
func (s *MCService) checkAuth(r *http.Request) int {
username, password, ok := r.BasicAuth()
if ok != true {
return 401
}
if username != s.cfg.UserName || password != s.cfg.PassWord {
return 403
}
return 200
}
func (s *MCService) passThru(req *JSONRequest) (*JSONResponse, error) {
return s.platformAPI(req)
}
var errInvalidConfig = errors.New("invalid config")
func (s *MCService) checkConfig() error {
if s.cfg.UserName == "" {
log.Printf("username not configured")
return errInvalidConfig
}
if s.cfg.PassWord == "" {
log.Printf("password not configured")
return errInvalidConfig
}
if s.cfg.ChainName == "" {
log.Printf("chain name not configured")
return errInvalidConfig
}
if s.cfg.RPCUser == "" {
log.Printf("RPC username not configured")
return errInvalidConfig
}
if s.cfg.RPCPassword == "" {
log.Printf("RPC passwrod not configured")
return errInvalidConfig
}
if s.cfg.RPCPort == 0 {
log.Printf("RPC port not configured")
return errInvalidConfig
}
if s.cfg.NativeEntity == "" {
log.Printf("native entity not configured")
return errInvalidConfig
}
return nil
} |
package main
import (
"bytes"
"flag"
"fmt"
"net"
"os"
"time"
"github.com/nkbai/goice/stun"
"github.com/nkbai/goice/turn"
"github.com/nkbai/goice/utils"
"github.com/nkbai/log"
)
var (
server = flag.String("server",
fmt.Sprintf("193.112.248.133:3478"),
"turn server address",
)
peer = flag.String("peer",
"182.254.155.208:3333", //test echo server
"peer addres",
)
username = flag.String("username", "smartraiden", "username")
password = flag.String("password", "smartraiden", "password")
)
const (
udp = "udp"
)
func init() {
log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, utils.MyStreamHandler(os.Stderr)))
}
func isErr(m *stun.Message) bool {
return m.Type.Class == stun.ClassErrorResponse
}
func do(req, res *stun.Message, c *net.UDPConn, attrs ...stun.Setter) error {
start := time.Now()
if err := req.Build(attrs...); err != nil {
log.Error(fmt.Sprintf("failed to build %s", err))
return err
}
if _, err := req.WriteTo(c); err != nil {
log.Error(fmt.Sprintf("failed to write %s m:%s", err, req))
return err
}
//log.Info(fmt.Sprintf("sent message m:%s", req))
if cap(res.Raw) < 800 {
res.Raw = make([]byte, 0, 1024)
}
c.SetReadDeadline(time.Now().Add(time.Second * 20))
_, err := res.ReadFrom(c)
if err != nil {
log.Error(fmt.Sprintf("failed to read err:%s message:%s", err, req))
}
log.Info(fmt.Sprintf("got message m:%s, rtt:%s", res, time.Since(start)))
return err
}
func main() {
flag.Parse()
var (
req = new(stun.Message)
res = new(stun.Message)
)
if flag.Arg(0) == "peer" {
_, port, err := net.SplitHostPort(*peer)
log.Info(fmt.Sprintf("running in peer mode"))
if err != nil {
log.Crit(fmt.Sprintf("failed to find port in peer address %s", err))
}
laddr, err := net.ResolveUDPAddr(udp, ":"+port)
if err != nil {
log.Crit(fmt.Sprintf("failed to resolve UDP addr %s", err))
}
c, err := net.ListenUDP(udp, laddr)
if err != nil {
log.Crit(fmt.Sprintf("failed to listen %s", err))
}
log.Info(fmt.Sprintf("listening as echo server laddr:%s", c.LocalAddr()))
for {
// Starting echo server.
buf := make([]byte, 1024)
n, addr, err := c.ReadFromUDP(buf)
if err != nil {
log.Crit(fmt.Sprintf("failed to read %s", err))
}
log.Info(fmt.Sprintf("got message body:%s raddr:%s", string(buf[:]), addr))
// Echoing back.
if _, err := c.WriteToUDP(buf[:n], addr); err != nil {
log.Crit(fmt.Sprintf("failed to write back %s", err))
}
log.Info(fmt.Sprintf("echoed back raddr:%s ", addr))
}
}
if len(*password) == 0 {
fmt.Fprintln(os.Stderr, "No password set, auth is required.")
flag.Usage()
os.Exit(2)
}
// Resolving to TURN server.
raddr, err := net.ResolveUDPAddr(udp, *server)
if err != nil {
log.Crit(fmt.Sprintf("failed to resolve TURN server %s", err))
}
c, err := net.DialUDP(udp, nil, raddr)
if err != nil {
log.Crit(fmt.Sprintf("failed to dial to TURN server %s", err))
}
log.Info(fmt.Sprintf("dial server laddr:%s raddr:%s", c.LocalAddr(), c.RemoteAddr()))
// Crafting allocation request.
if err = do(req, res, c,
stun.TransactionIDSetter,
turn.AllocateRequest,
turn.RequestedTransportUDP,
); err != nil {
log.Crit(fmt.Sprintf("do failed %s", err))
}
var (
code stun.ErrorCodeAttribute
nonce stun.Nonce
realm stun.Realm
)
if res.Type.Class != stun.ClassErrorResponse {
log.Crit(fmt.Sprintf("expected error class, got " + res.Type.Class.String()))
}
if err = code.GetFrom(res); err != nil {
log.Crit(fmt.Sprintf("failed to get error code from message %s", err))
}
if code.Code != stun.CodeUnauthorised {
log.Crit(fmt.Sprintf("unexpected code of error err:%s", code))
}
if err = nonce.GetFrom(res); err != nil {
log.Crit(fmt.Sprintf("failed to nonce from message %s", err))
}
if err = realm.GetFrom(res); err != nil {
log.Crit(fmt.Sprintf("failed to get realm from message %s", err))
}
realmStr := realm.String()
nonceStr := nonce.String()
log.Info(fmt.Sprintf("got credentials nonce:%s,realm:%s", nonce, realm))
var (
credentials = stun.NewLongTermIntegrity(*username, realm.String(), *password)
)
log.Info(fmt.Sprintf("using integrity i:%s", credentials))
// Constructing allocate request with integrity
req = new(stun.Message)
if err = do(req, res, c, stun.TransactionIDSetter, turn.AllocateRequest,
turn.RequestedTransportUDP, realm,
stun.NewUsername(*username), nonce, credentials,
); err != nil {
log.Crit(fmt.Sprintf("failed to do request %s", err))
}
if isErr(res) {
code.GetFrom(res)
log.Crit(fmt.Sprintf("got error response %s ", code))
}
// Decoding relayed and mapped address.
var (
reladdr turn.RelayedAddress
maddr stun.XORMappedAddress
)
if err = reladdr.GetFrom(res); err != nil {
log.Crit(fmt.Sprintf("failed to get relayed address %s", err))
}
log.Info(fmt.Sprintf("relayed address addr:%s", reladdr))
if err = maddr.GetFrom(res); err != nil && err != stun.ErrAttributeNotFound {
log.Crit(fmt.Sprintf("failed to decode relayed address %s", err))
} else {
log.Info(fmt.Sprintf("mapped address %s", maddr))
}
//test sendindication
//if err := do(req, res, c, stun.TransactionIDSetter, turn.SendIndication); err != nil {
// log.Crit(fmt.Sprintf("failed to sendindication %s", err))
//}
// Creating permission request.
echoAddr, err := net.ResolveUDPAddr(udp, *peer)
if err != nil {
log.Crit(fmt.Sprintf("failed to resonve addr %s", err))
}
peerAddr := turn.PeerAddress{
IP: echoAddr.IP,
Port: echoAddr.Port,
}
log.Info(fmt.Sprintf("peer address addr:%s", peerAddr))
if err := do(req, res, c, stun.TransactionIDSetter,
turn.CreatePermissionRequest,
peerAddr,
stun.Realm(realmStr),
stun.Nonce(nonceStr),
stun.Username(*username),
credentials,
); err != nil {
log.Crit(fmt.Sprintf("failed to do request %s", err))
}
if isErr(res) {
code.GetFrom(res)
log.Crit(fmt.Sprintf("failed to allocate %s ", code))
}
if err := credentials.Check(res); err != nil {
log.Error(fmt.Sprintf("failed to check integrity %s", err))
}
var (
sentData = turn.Data("Hello world!")
)
// Allocation succeed.
// Sending data to echo server.
// can be as resetTo(type, attrs)?
if err := do(req, res, c, stun.TransactionIDSetter,
turn.SendIndication,
sentData,
peerAddr,
stun.Fingerprint,
); err != nil {
log.Crit(fmt.Sprintf("failed to build %s", err))
}
log.Info(fmt.Sprintf("sent data %s", string(sentData)))
if isErr(res) {
code.GetFrom(res)
log.Crit(fmt.Sprintf("got error response %s", code))
}
var data turn.Data
if err := data.GetFrom(res); err != nil {
log.Crit(fmt.Sprintf("failed to get DATA attribute %s", err))
}
log.Info(fmt.Sprintf("got data v:%s", string(data)))
var peer turn.PeerAddress
if err := peer.GetFrom(res); err != nil {
log.Crit(fmt.Sprintf("failed to get peer addr %s", err))
}
log.Info(fmt.Sprintf("peer is :%s", peer.String()))
if bytes.Equal(data, sentData) {
log.Info("OK")
} else {
log.Info("DATA missmatch")
}
if true {
//for channel data
var (
sentData = turn.Data("Hello world, channel!")
)
// Allocation succeed.
// Sending data to echo server.
// can be as resetTo(type, attrs)?
if err := do(req, res, c, stun.TransactionIDSetter,
turn.ChannelBindRequest,
turn.ChannelNumber(0x4000),
peerAddr,
stun.Username(*username),
stun.Realm(realmStr),
stun.Nonce(nonceStr),
credentials,
); err != nil {
log.Crit(fmt.Sprintf("failed to build %s", err))
}
log.Info(fmt.Sprintf("sent data %s", string(sentData)))
if isErr(res) {
code.GetFrom(res)
log.Crit(fmt.Sprintf("got error response %s", code))
}
log.Info(fmt.Sprintf("channel bind success"))
var channelNumber uint16 = 0x4000
//send data on channel data
cdata := &turn.ChannelData{
ChannelNumber: channelNumber,
Data: []byte("hello,data from channel"),
}
if err := do(req, res, c, turn.ChannelDataRequest, cdata); err != nil {
log.Crit(fmt.Sprintf("failed to build %s", err))
}
if isErr(res) {
code.GetFrom(res)
log.Crit(fmt.Sprintf("got error response %s", code))
}
var cdata2 = &turn.ChannelData{}
if err := cdata2.GetFrom(res); err != nil {
log.Crit(fmt.Sprintf("failed to get channel data %s", err))
}
if cdata2.ChannelNumber != channelNumber {
log.Crit(fmt.Sprintf("channel number not equal expect=%d,got %d", channelNumber, cdata2.ChannelNumber))
}
if !bytes.Equal(cdata2.Data, cdata.Data) {
log.Crit("data not equal")
}
log.Info(fmt.Sprintf("received channel data :%s", string(cdata2.Data)))
}
//return
// De-allocating.
if err := do(req, res, c, stun.TransactionIDSetter,
turn.RefreshRequest,
stun.Realm(realmStr),
stun.Username(*username),
stun.Nonce(nonceStr),
turn.ZeroLifetime,
credentials,
); err != nil {
log.Crit(fmt.Sprintf("failed to do %s", err))
}
if isErr(res) {
code.GetFrom(res)
log.Crit(fmt.Sprintf("got error response %s", code))
}
log.Info("closing")
}
|
/*
* @Author: geoferry
* @Date: 2016-09-01 14:49:00
* @Last Modified by: geoferry
* @Last Modified time: 2016-09-02 09:15:35
*/
package main
import (
"bufio"
"fmt"
log "github.com/Sirupsen/logrus"
"io"
"os"
"strconv"
"strings"
)
func main() {
file, err := os.OpenFile("1.txt", os.O_RDONLY, 0666)
if err != nil {
log.Error(err)
return
}
defer file.Close()
countProvince := 0
countCity := 0
prefix := "{"
mainStr := ""
suffix := "};"
fmt.Print(prefix)
rd := bufio.NewReader(file)
for {
line, err := rd.ReadString('\n')
if err != nil || io.EOF == err {
// strings.TrimRight(mainStr, ",")
mainStr = strings.Replace(mainStr, ",", "", -1)
fmt.Print(mainStr)
break
}
fmt.Print(mainStr)
mainStr = ""
if strings.HasPrefix(line, "#") {
countProvince++
countCity = 0
fmt.Println()
}
if len(line) > 1 && !strings.HasPrefix(line, "#") {
// // strings.TrimSpace(line)
line = strings.Replace(line, "\n", "", -1)
countCity++
mainStr += `'` + strings.TrimSpace(line) + `' : '` + getCode(countProvince, countCity) + `',`
}
}
fmt.Println(suffix)
// fmt.Println("Total Province: ", countProvince)
}
func getProvinceCode(x int) string {
if x < 10 {
return "0" + strconv.Itoa(x)
}
return strconv.Itoa(x)
}
func getCityCode(x int) string {
if x < 10 {
return "00" + strconv.Itoa(x)
} else if x < 100 && x >= 10 {
return "0" + strconv.Itoa(x)
} else {
return strconv.Itoa(x)
}
}
func getCode(px, cx int) string {
return getProvinceCode(px) + getCityCode(cx)
}
|
/**********************************************************\
| |
| hprose |
| |
| Official WebSite: http://www.hprose.com/ |
| http://www.hprose.org/ |
| |
\**********************************************************/
/**********************************************************\
* *
* hprose/jsonrpc_service_filter.go *
* *
* jsonrpc service filter for Go. *
* *
* LastModified: Oct 15, 2014 *
* Author: Ma Bingyao <andot@hprose.com> *
* *
\**********************************************************/
package hprose
import (
"bytes"
"encoding/json"
)
type JSONRPCServiceFilter struct{}
func (filter JSONRPCServiceFilter) InputFilter(data []byte, context Context) []byte {
if len(data) > 0 && data[0] == '{' {
context.SetString("format", "jsonrpc")
var request map[string]interface{}
if err := json.Unmarshal(data, &request); err != nil {
return data
}
if id, ok := request["id"]; ok {
context.SetInterface("id", id)
} else {
context.SetInterface("id", nil)
}
if version, ok := request["version"].(string); ok {
context.SetString("version", version)
} else if jsonrpc, ok := request["jsonrpc"].(string); ok {
context.SetString("version", jsonrpc)
} else {
context.SetString("version", "1.0")
}
buf := new(bytes.Buffer)
writer := NewWriter(buf, true)
if method, ok := request["method"].(string); ok && method != "" {
if err := buf.WriteByte(TagCall); err != nil {
return data
}
if err := writer.WriteString(method); err != nil {
return data
}
if params, ok := request["params"].([]interface{}); ok && params != nil && len(params) > 0 {
if err := writer.Serialize(params); err != nil {
return data
}
}
}
buf.WriteByte(TagEnd)
data = buf.Bytes()
}
return data
}
func (filter JSONRPCServiceFilter) OutputFilter(data []byte, context Context) []byte {
if format, ok := context.GetString("format"); ok && format == "jsonrpc" {
response := make(map[string]interface{})
if version, ok := context.GetString("version"); ok && version != "2.0" {
if version == "1.1" {
response["version"] = "1.1"
}
response["result"] = nil
response["error"] = nil
} else {
response["jsonrpc"] = "2.0"
}
response["id"], _ = context.GetInterface("id")
if len(data) == 0 {
data, _ = json.Marshal(response)
return data
}
istream := NewBytesReader(data)
reader := NewReader(istream, false)
for tag, err := istream.ReadByte(); err == nil && tag != TagEnd; tag, err = istream.ReadByte() {
switch tag {
case TagResult:
reader.Reset()
var result interface{}
reader.Unserialize(&result)
if err != nil {
e := make(map[string]interface{})
e["code"] = -1
e["message"] = err.Error()
response["error"] = e
} else {
response["result"] = result
}
case TagError:
reader.Reset()
e := make(map[string]interface{})
e["code"] = -1
if message, err := reader.ReadString(); err == nil {
e["message"] = message
} else {
e["message"] = err.Error()
}
default:
data, _ = json.Marshal(response)
return data
}
}
data, _ = json.Marshal(response)
return data
}
return data
}
|
//+build wireinject
package main
import (
"go_restful/user"
"github.com/google/wire"
"github.com/jinzhu/gorm"
)
func InitUserApi(db *gorm.DB) user.UserApi {
wire.Build(user.ProvideUserRepository, user.ProvideUserService, user.ProvideUserAPI)
return user.UserApi{}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.